diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 669c9e64d82..4416a152b80 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -272,10 +272,17 @@ Trunk (Unreleased) HADOOP-8918. test-patch.sh is parsing modified files wrong. (Raja Aluri via suresh) + HADOOP-8589 ViewFs tests fail when tests and home dirs are nested. + (sanjay Radia) + + HADOOP-8974. TestDFVariations fails on Windows. (Chris Nauroth via suresh) + OPTIMIZATIONS HADOOP-7761. Improve the performance of raw comparisons. (todd) + HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia) + Release 2.0.3-alpha - Unreleased INCOMPATIBLE CHANGES @@ -285,6 +292,8 @@ Release 2.0.3-alpha - Unreleased HADOOP-8597. Permit FsShell's text command to read Avro files. (Ivan Vladimirov Ivanov via cutting) + HADOOP-9020. Add a SASL PLAIN server (daryn via bobby) + IMPROVEMENTS HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR. @@ -342,6 +351,16 @@ Release 2.0.3-alpha - Unreleased HADOOP-9010. Map UGI authenticationMethod to RPC authMethod (daryn via bobby) + HADOOP-9013. UGI should not hardcode loginUser's authenticationType (daryn + via bobby) + + HADOOP-9014. Standardize creation of SaslRpcClients (daryn via bobby) + + HADOOP-9015. Standardize creation of SaslRpcServers (daryn via bobby) + + HADOOP-8860. Split MapReduce and YARN sections in documentation navigation. + (tomwhite via tucu) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang @@ -400,6 +419,8 @@ Release 2.0.3-alpha - Unreleased HADOOP-9012. IPC Client sends wrong connection context (daryn via bobby) + HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index 4c49da0ad9f..b92ad27b67a 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -99,6 +99,13 @@ log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize} log4j.appender.TLA.layout=org.apache.log4j.PatternLayout log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n +# +# HDFS block state change log from block manager +# +# Uncomment the following to suppress normal block state change +# messages from BlockManager in NameNode. +#log4j.logger.BlockStateChange=WARN + # #Security appender # diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index 28f8dd0532e..a7579a96406 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -184,5 +184,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { */ public static final String KERBEROS_TICKET_CACHE_PATH = "hadoop.security.kerberos.ticket.cache.path"; -} + public static final String HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY = + "hadoop.security.uid.cache.secs"; + + public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT = + 4*60*60; // 4 hours + +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java index 962847154ac..1293448eea3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java @@ -125,6 +125,11 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem { public FsServerDefaults getServerDefaults() throws IOException { return fsImpl.getServerDefaults(); } + + @Override + public Path getHomeDirectory() { + return fsImpl.getHomeDirectory(); + } @Override public int getUriDefaultPort() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java index b3acf506f6a..e4988efeaff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java @@ -153,12 +153,6 @@ class ChRootedFileSystem extends FilterFileSystem { return makeQualified( new Path(chRootPathPartString + f.toUri().toString())); } - - @Override - public Path getHomeDirectory() { - return new Path("/user/"+System.getProperty("user.name")).makeQualified( - getUri(), null); - } @Override public Path getWorkingDirectory() { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index d4de01efa0e..f4fbc66b530 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -256,8 +256,9 @@ public class ViewFileSystem extends FileSystem { if (base == null) { base = "/user"; } - homeDir = - this.makeQualified(new Path(base + "/" + ugi.getShortUserName())); + homeDir = (base.equals("/") ? + this.makeQualified(new Path(base + ugi.getShortUserName())): + this.makeQualified(new Path(base + "/" + ugi.getShortUserName()))); } return homeDir; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index a8a77bec5d0..dcfe5f32031 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -248,8 +248,9 @@ public class ViewFs extends AbstractFileSystem { if (base == null) { base = "/user"; } - homeDir = - this.makeQualified(new Path(base + "/" + ugi.getShortUserName())); + homeDir = (base.equals("/") ? + this.makeQualified(new Path(base + ugi.getShortUserName())): + this.makeQualified(new Path(base + "/" + ugi.getShortUserName()))); } return homeDir; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java index b30c4a4da44..3d01810e712 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SecureIOUtils.java @@ -120,7 +120,7 @@ public class SecureIOUtils { FileInputStream fis = new FileInputStream(f); boolean success = false; try { - Stat stat = NativeIO.fstat(fis.getFD()); + Stat stat = NativeIO.getFstat(fis.getFD()); checkStat(f, stat.getOwner(), stat.getGroup(), expectedOwner, expectedGroup); success = true; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java index 4cfa0761edc..94ff5f6057f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java @@ -19,8 +19,13 @@ package org.apache.hadoop.io.nativeio; import java.io.FileDescriptor; import java.io.IOException; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.util.NativeCodeLoader; import org.apache.commons.logging.Log; @@ -30,6 +35,8 @@ import org.apache.commons.logging.LogFactory; * These functions should generally be used alongside a fallback to another * more portable mechanism. */ +@InterfaceAudience.Private +@InterfaceStability.Unstable public class NativeIO { // Flags for open() call from bits/fcntl.h public static final int O_RDONLY = 00; @@ -86,6 +93,8 @@ public class NativeIO { "hadoop.workaround.non.threadsafe.getpwuid"; static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = false; + private static long cacheTimeout = -1; + static { if (NativeCodeLoader.isNativeCodeLoaded()) { try { @@ -96,6 +105,14 @@ public class NativeIO { initNative(); nativeLoaded = true; + + cacheTimeout = conf.getLong( + CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY, + CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT) * + 1000; + LOG.debug("Initialized cache for IDs to User/Group mapping with a" + + " cache timeout of " + cacheTimeout/1000 + " seconds."); + } catch (Throwable t) { // This can happen if the user has an older version of libhadoop.so // installed - in this case we can continue without native IO @@ -115,7 +132,7 @@ public class NativeIO { /** Wrapper around open(2) */ public static native FileDescriptor open(String path, int flags, int mode) throws IOException; /** Wrapper around fstat(2) */ - public static native Stat fstat(FileDescriptor fd) throws IOException; + private static native Stat fstat(FileDescriptor fd) throws IOException; /** Wrapper around chmod(2) */ public static native void chmod(String path, int mode) throws IOException; @@ -176,6 +193,7 @@ public class NativeIO { * Result type of the fstat call */ public static class Stat { + private int ownerId, groupId; private String owner, group; private int mode; @@ -196,9 +214,9 @@ public class NativeIO { public static final int S_IWUSR = 0000200; /* write permission, owner */ public static final int S_IXUSR = 0000100; /* execute/search permission, owner */ - Stat(String owner, String group, int mode) { - this.owner = owner; - this.group = group; + Stat(int ownerId, int groupId, int mode) { + this.ownerId = ownerId; + this.groupId = groupId; this.mode = mode; } @@ -218,4 +236,61 @@ public class NativeIO { return mode; } } + + static native String getUserName(int uid) throws IOException; + + static native String getGroupName(int uid) throws IOException; + + private static class CachedName { + final long timestamp; + final String name; + + public CachedName(String name, long timestamp) { + this.name = name; + this.timestamp = timestamp; + } + } + + private static final Map USER_ID_NAME_CACHE = + new ConcurrentHashMap(); + + private static final Map GROUP_ID_NAME_CACHE = + new ConcurrentHashMap(); + + private enum IdCache { USER, GROUP } + + private static String getName(IdCache domain, int id) throws IOException { + Map idNameCache = (domain == IdCache.USER) + ? USER_ID_NAME_CACHE : GROUP_ID_NAME_CACHE; + String name; + CachedName cachedName = idNameCache.get(id); + long now = System.currentTimeMillis(); + if (cachedName != null && (cachedName.timestamp + cacheTimeout) > now) { + name = cachedName.name; + } else { + name = (domain == IdCache.USER) ? getUserName(id) : getGroupName(id); + if (LOG.isDebugEnabled()) { + String type = (domain == IdCache.USER) ? "UserName" : "GroupName"; + LOG.debug("Got " + type + " " + name + " for ID " + id + + " from the native implementation"); + } + cachedName = new CachedName(name, now); + idNameCache.put(id, cachedName); + } + return name; + } + + /** + * Returns the file stat for a file descriptor. + * + * @param fd file descriptor. + * @return the file descriptor file stat. + * @throws IOException thrown if there was an IO error while obtaining the file stat. + */ + public static Stat getFstat(FileDescriptor fd) throws IOException { + Stat stat = fstat(fd); + stat.owner = getName(IdCache.USER, stat.ownerId); + stat.group = getName(IdCache.GROUP, stat.groupId); + return stat; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index efaf6028ea9..60155e71ce8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -57,6 +57,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.LinkedBlockingQueue; +import javax.security.auth.callback.CallbackHandler; import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; @@ -87,6 +88,7 @@ import org.apache.hadoop.security.SaslRpcServer.SaslDigestCallbackHandler; import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler; import org.apache.hadoop.security.SaslRpcServer.SaslStatus; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.security.authorize.ProxyUsers; @@ -1078,7 +1080,6 @@ public abstract class Server { IpcConnectionContextProto connectionContext; String protocolName; - boolean useSasl; SaslServer saslServer; private AuthMethod authMethod; private boolean saslContextEstablished; @@ -1194,49 +1195,6 @@ public abstract class Server { if (!saslContextEstablished) { byte[] replyToken = null; try { - if (saslServer == null) { - switch (authMethod) { - case DIGEST: - if (secretManager == null) { - throw new AccessControlException( - "Server is not configured to do DIGEST authentication."); - } - secretManager.checkAvailableForRead(); - saslServer = Sasl.createSaslServer(AuthMethod.DIGEST - .getMechanismName(), null, SaslRpcServer.SASL_DEFAULT_REALM, - SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler( - secretManager, this)); - break; - default: - UserGroupInformation current = UserGroupInformation - .getCurrentUser(); - String fullName = current.getUserName(); - if (LOG.isDebugEnabled()) - LOG.debug("Kerberos principal name is " + fullName); - final String names[] = SaslRpcServer.splitKerberosName(fullName); - if (names.length != 3) { - throw new AccessControlException( - "Kerberos principal name does NOT have the expected " - + "hostname part: " + fullName); - } - current.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws SaslException { - saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS - .getMechanismName(), names[0], names[1], - SaslRpcServer.SASL_PROPS, new SaslGssCallbackHandler()); - return null; - } - }); - } - if (saslServer == null) - throw new AccessControlException( - "Unable to find SASL server implementation for " - + authMethod.getMechanismName()); - if (LOG.isDebugEnabled()) - LOG.debug("Created SASL server with mechanism = " - + authMethod.getMechanismName()); - } if (LOG.isDebugEnabled()) LOG.debug("Have read input token of size " + saslToken.length + " for processing by saslServer.evaluateResponse()"); @@ -1375,38 +1333,27 @@ public abstract class Server { dataLengthBuffer.clear(); if (authMethod == null) { throw new IOException("Unable to read authentication method"); - } + } + boolean useSaslServer = isSecurityEnabled; final boolean clientUsingSasl; switch (authMethod) { case SIMPLE: { // no sasl for simple - if (isSecurityEnabled) { - AccessControlException ae = new AccessControlException("Authorization (" - + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION - + ") is enabled but authentication (" - + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION - + ") is configured as simple. Please configure another method " - + "like kerberos or digest."); - setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, - null, ae.getClass().getName(), ae.getMessage()); - responder.doRespond(authFailedCall); - throw ae; - } clientUsingSasl = false; - useSasl = false; break; } - case DIGEST: { + case DIGEST: { // always allow tokens if there's a secret manager + useSaslServer |= (secretManager != null); clientUsingSasl = true; - useSasl = (secretManager != null); break; } default: { clientUsingSasl = true; - useSasl = isSecurityEnabled; break; } - } - if (clientUsingSasl && !useSasl) { + } + if (useSaslServer) { + saslServer = createSaslServer(authMethod); + } else if (clientUsingSasl) { // security is off doSaslReply(SaslStatus.SUCCESS, new IntWritable( SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null); authMethod = AuthMethod.SIMPLE; @@ -1448,7 +1395,7 @@ public abstract class Server { continue; } boolean isHeaderRead = connectionContextRead; - if (useSasl) { + if (saslServer != null) { saslReadAndProcess(data.array()); } else { processOneRpc(data.array()); @@ -1462,6 +1409,84 @@ public abstract class Server { } } + private SaslServer createSaslServer(AuthMethod authMethod) + throws IOException { + try { + return createSaslServerInternal(authMethod); + } catch (IOException ioe) { + final String ioeClass = ioe.getClass().getName(); + final String ioeMessage = ioe.getLocalizedMessage(); + if (authMethod == AuthMethod.SIMPLE) { + setupResponse(authFailedResponse, authFailedCall, + RpcStatusProto.FATAL, null, ioeClass, ioeMessage); + responder.doRespond(authFailedCall); + } else { + doSaslReply(SaslStatus.ERROR, null, ioeClass, ioeMessage); + } + throw ioe; + } + } + + private SaslServer createSaslServerInternal(AuthMethod authMethod) + throws IOException { + SaslServer saslServer = null; + String hostname = null; + String saslProtocol = null; + CallbackHandler saslCallback = null; + + switch (authMethod) { + case SIMPLE: { + throw new AccessControlException("Authorization (" + + CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION + + ") is enabled but authentication (" + + CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION + + ") is configured as simple. Please configure another method " + + "like kerberos or digest."); + } + case DIGEST: { + if (secretManager == null) { + throw new AccessControlException( + "Server is not configured to do DIGEST authentication."); + } + secretManager.checkAvailableForRead(); + hostname = SaslRpcServer.SASL_DEFAULT_REALM; + saslCallback = new SaslDigestCallbackHandler(secretManager, this); + break; + } + case KERBEROS: { + String fullName = UserGroupInformation.getCurrentUser().getUserName(); + if (LOG.isDebugEnabled()) + LOG.debug("Kerberos principal name is " + fullName); + KerberosName krbName = new KerberosName(fullName); + hostname = krbName.getHostName(); + if (hostname == null) { + throw new AccessControlException( + "Kerberos principal name does NOT have the expected " + + "hostname part: " + fullName); + } + saslProtocol = krbName.getServiceName(); + saslCallback = new SaslGssCallbackHandler(); + break; + } + default: + throw new AccessControlException( + "Server does not support SASL " + authMethod); + } + + String mechanism = authMethod.getMechanismName(); + saslServer = Sasl.createSaslServer( + mechanism, saslProtocol, hostname, + SaslRpcServer.SASL_PROPS, saslCallback); + if (saslServer == null) { + throw new AccessControlException( + "Unable to find SASL server implementation for " + mechanism); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Created SASL server with mechanism = " + mechanism); + } + return saslServer; + } + /** * Try to set up the response to indicate that the client version * is incompatible with the server. This can contain special-case @@ -1523,7 +1548,7 @@ public abstract class Server { .getProtocol() : null; UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext); - if (!useSasl) { + if (saslServer == null) { user = protocolUser; if (user != null) { user.setAuthenticationMethod(AuthMethod.SIMPLE); @@ -1999,7 +2024,7 @@ public abstract class Server { private void wrapWithSasl(ByteArrayOutputStream response, Call call) throws IOException { - if (call.connection.useSasl) { + if (call.connection.saslServer != null) { byte[] token = response.toByteArray(); // synchronization may be needed since there can be multiple Handler // threads using saslServer to wrap responses. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java new file mode 100644 index 00000000000..7d1b98062b0 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.security; + +import java.security.Provider; +import java.util.Map; + +import javax.security.auth.callback.*; +import javax.security.sasl.AuthorizeCallback; +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslException; +import javax.security.sasl.SaslServer; +import javax.security.sasl.SaslServerFactory; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class SaslPlainServer implements SaslServer { + @SuppressWarnings("serial") + public static class SecurityProvider extends Provider { + public SecurityProvider() { + super("SaslPlainServer", 1.0, "SASL PLAIN Authentication Server"); + put("SaslServerFactory.PLAIN", + SaslPlainServerFactory.class.getName()); + } + } + + public static class SaslPlainServerFactory implements SaslServerFactory { + @Override + public SaslServer createSaslServer(String mechanism, String protocol, + String serverName, Map props, CallbackHandler cbh) + throws SaslException { + return "PLAIN".equals(mechanism) ? new SaslPlainServer(cbh) : null; + } + @Override + public String[] getMechanismNames(Map props){ + return (props == null) || "false".equals(props.get(Sasl.POLICY_NOPLAINTEXT)) + ? new String[]{"PLAIN"} + : new String[0]; + } + } + + private CallbackHandler cbh; + private boolean completed; + private String authz; + + SaslPlainServer(CallbackHandler callback) { + this.cbh = callback; + } + + @Override + public String getMechanismName() { + return "PLAIN"; + } + + @Override + public byte[] evaluateResponse(byte[] response) throws SaslException { + if (completed) { + throw new IllegalStateException("PLAIN authentication has completed"); + } + if (response == null) { + throw new IllegalArgumentException("Received null response"); + } + try { + String payload; + try { + payload = new String(response, "UTF-8"); + } catch (Exception e) { + throw new IllegalArgumentException("Received corrupt response", e); + } + // [ authz, authn, password ] + String[] parts = payload.split("\u0000", 3); + if (parts.length != 3) { + throw new IllegalArgumentException("Received corrupt response"); + } + if (parts[0].isEmpty()) { // authz = authn + parts[0] = parts[1]; + } + + NameCallback nc = new NameCallback("SASL PLAIN"); + nc.setName(parts[1]); + PasswordCallback pc = new PasswordCallback("SASL PLAIN", false); + pc.setPassword(parts[2].toCharArray()); + AuthorizeCallback ac = new AuthorizeCallback(parts[1], parts[0]); + cbh.handle(new Callback[]{nc, pc, ac}); + if (ac.isAuthorized()) { + authz = ac.getAuthorizedID(); + } + } catch (Exception e) { + throw new SaslException("PLAIN auth failed: " + e.getMessage()); + } finally { + completed = true; + } + return null; + } + + private void throwIfNotComplete() { + if (!completed) { + throw new IllegalStateException("PLAIN authentication not completed"); + } + } + + @Override + public boolean isComplete() { + return completed; + } + + @Override + public String getAuthorizationID() { + throwIfNotComplete(); + return authz; + } + + @Override + public Object getNegotiatedProperty(String propName) { + throwIfNotComplete(); + return Sasl.QOP.equals(propName) ? "auth" : null; + } + + @Override + public byte[] wrap(byte[] outgoing, int offset, int len) + throws SaslException { + throwIfNotComplete(); + throw new IllegalStateException( + "PLAIN supports neither integrity nor privacy"); + } + + @Override + public byte[] unwrap(byte[] incoming, int offset, int len) + throws SaslException { + throwIfNotComplete(); + throw new IllegalStateException( + "PLAIN supports neither integrity nor privacy"); + } + + @Override + public void dispose() throws SaslException { + cbh = null; + authz = null; + } +} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java index 98b3f5db295..8365fa7ccd0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcClient.java @@ -25,6 +25,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.Map; import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; @@ -45,6 +46,7 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SaslRpcServer.SaslStatus; +import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; @@ -69,40 +71,48 @@ public class SaslRpcClient { public SaslRpcClient(AuthMethod method, Token token, String serverPrincipal) throws IOException { + String saslUser = null; + String saslProtocol = null; + String saslServerName = null; + Map saslProperties = SaslRpcServer.SASL_PROPS; + CallbackHandler saslCallback = null; + switch (method) { - case DIGEST: - if (LOG.isDebugEnabled()) - LOG.debug("Creating SASL " + AuthMethod.DIGEST.getMechanismName() - + " client to authenticate to service at " + token.getService()); - saslClient = Sasl.createSaslClient(new String[] { AuthMethod.DIGEST - .getMechanismName() }, null, null, SaslRpcServer.SASL_DEFAULT_REALM, - SaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(token)); - break; - case KERBEROS: - if (LOG.isDebugEnabled()) { - LOG.debug("Creating SASL " + AuthMethod.KERBEROS.getMechanismName() - + " client. Server's Kerberos principal name is " - + serverPrincipal); + case DIGEST: { + saslServerName = SaslRpcServer.SASL_DEFAULT_REALM; + saslCallback = new SaslClientCallbackHandler(token); + break; } - if (serverPrincipal == null || serverPrincipal.length() == 0) { - throw new IOException( - "Failed to specify server's Kerberos principal name"); + case KERBEROS: { + if (serverPrincipal == null || serverPrincipal.isEmpty()) { + throw new IOException( + "Failed to specify server's Kerberos principal name"); + } + KerberosName name = new KerberosName(serverPrincipal); + saslProtocol = name.getServiceName(); + saslServerName = name.getHostName(); + if (saslServerName == null) { + throw new IOException( + "Kerberos principal name does NOT have the expected hostname part: " + + serverPrincipal); + } + break; } - String names[] = SaslRpcServer.splitKerberosName(serverPrincipal); - if (names.length != 3) { - throw new IOException( - "Kerberos principal name does NOT have the expected hostname part: " - + serverPrincipal); - } - saslClient = Sasl.createSaslClient(new String[] { AuthMethod.KERBEROS - .getMechanismName() }, null, names[0], names[1], - SaslRpcServer.SASL_PROPS, null); - break; - default: - throw new IOException("Unknown authentication method " + method); + default: + throw new IOException("Unknown authentication method " + method); } - if (saslClient == null) + + String mechanism = method.getMechanismName(); + if (LOG.isDebugEnabled()) { + LOG.debug("Creating SASL " + mechanism + + " client to authenticate to service at " + saslServerName); + } + saslClient = Sasl.createSaslClient( + new String[] { mechanism }, saslUser, saslProtocol, saslServerName, + saslProperties, saslCallback); + if (saslClient == null) { throw new IOException("Unable to find SASL client implementation"); + } } private static void readStatus(DataInputStream inStream) throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index 31b4c35dae2..33942dc0885 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -23,6 +23,7 @@ import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.IOException; +import java.security.Security; import java.util.Map; import java.util.TreeMap; @@ -89,6 +90,7 @@ public class SaslRpcServer { SASL_PROPS.put(Sasl.QOP, saslQOP.getSaslQop()); SASL_PROPS.put(Sasl.SERVER_AUTH, "true"); + Security.addProvider(new SaslPlainServer.SecurityProvider()); } static String encodeIdentifier(byte[] identifier) { @@ -138,7 +140,8 @@ public class SaslRpcServer { public static enum AuthMethod { SIMPLE((byte) 80, ""), KERBEROS((byte) 81, "GSSAPI"), - DIGEST((byte) 82, "DIGEST-MD5"); + DIGEST((byte) 82, "DIGEST-MD5"), + PLAIN((byte) 83, "PLAIN"); /** The code for this method. */ public final byte code; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 88f82912025..f630d695a90 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -238,14 +238,17 @@ public class UserGroupInformation { */ private static synchronized void initUGI(Configuration conf) { AuthenticationMethod auth = SecurityUtil.getAuthenticationMethod(conf); - if (auth == AuthenticationMethod.SIMPLE) { - useKerberos = false; - } else if (auth == AuthenticationMethod.KERBEROS) { - useKerberos = true; - } else { - throw new IllegalArgumentException("Invalid attribute value for " + - HADOOP_SECURITY_AUTHENTICATION + - " of " + auth); + switch (auth) { + case SIMPLE: + useKerberos = false; + break; + case KERBEROS: + useKerberos = true; + break; + default: + throw new IllegalArgumentException("Invalid attribute value for " + + HADOOP_SECURITY_AUTHENTICATION + + " of " + auth); } try { kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong( @@ -637,19 +640,20 @@ public class UserGroupInformation { try { Subject subject = new Subject(); LoginContext login; + AuthenticationMethod authenticationMethod; if (isSecurityEnabled()) { + authenticationMethod = AuthenticationMethod.KERBEROS; login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME, subject, new HadoopConfiguration()); } else { + authenticationMethod = AuthenticationMethod.SIMPLE; login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME, subject, new HadoopConfiguration()); } login.login(); loginUser = new UserGroupInformation(subject); loginUser.setLogin(login); - loginUser.setAuthenticationMethod(isSecurityEnabled() ? - AuthenticationMethod.KERBEROS : - AuthenticationMethod.SIMPLE); + loginUser.setAuthenticationMethod(authenticationMethod); loginUser = new UserGroupInformation(login.getSubject()); String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION); if (fileLocation != null) { diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c index 4a91d0af954..7e82152c1fe 100644 --- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c +++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c @@ -72,16 +72,27 @@ static int workaround_non_threadsafe_calls(JNIEnv *env, jclass clazz) { static void stat_init(JNIEnv *env, jclass nativeio_class) { // Init Stat jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$Stat"); - PASS_EXCEPTIONS(env); + if (!clazz) { + return; // exception has been raised + } stat_clazz = (*env)->NewGlobalRef(env, clazz); + if (!stat_clazz) { + return; // exception has been raised + } stat_ctor = (*env)->GetMethodID(env, stat_clazz, "", - "(Ljava/lang/String;Ljava/lang/String;I)V"); - + "(III)V"); + if (!stat_ctor) { + return; // exception has been raised + } jclass obj_class = (*env)->FindClass(env, "java/lang/Object"); - assert(obj_class != NULL); + if (!obj_class) { + return; // exception has been raised + } jmethodID obj_ctor = (*env)->GetMethodID(env, obj_class, "", "()V"); - assert(obj_ctor != NULL); + if (!obj_ctor) { + return; // exception has been raised + } if (workaround_non_threadsafe_calls(env, nativeio_class)) { pw_lock_object = (*env)->NewObject(env, obj_class, obj_ctor); @@ -158,8 +169,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_fstat( JNIEnv *env, jclass clazz, jobject fd_object) { jobject ret = NULL; - char *pw_buf = NULL; - int pw_lock_locked = 0; int fd = fd_get(env, fd_object); PASS_EXCEPTIONS_GOTO(env, cleanup); @@ -171,71 +180,14 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_fstat( goto cleanup; } - size_t pw_buflen = get_pw_buflen(); - if ((pw_buf = malloc(pw_buflen)) == NULL) { - THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); - goto cleanup; - } - - if (pw_lock_object != NULL) { - if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) { - goto cleanup; - } - pw_lock_locked = 1; - } - - // Grab username - struct passwd pwd, *pwdp; - while ((rc = getpwuid_r(s.st_uid, &pwd, pw_buf, pw_buflen, &pwdp)) != 0) { - if (rc != ERANGE) { - throw_ioe(env, rc); - goto cleanup; - } - free(pw_buf); - pw_buflen *= 2; - if ((pw_buf = malloc(pw_buflen)) == NULL) { - THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); - goto cleanup; - } - } - assert(pwdp == &pwd); - - jstring jstr_username = (*env)->NewStringUTF(env, pwd.pw_name); - if (jstr_username == NULL) goto cleanup; - - // Grab group - struct group grp, *grpp; - while ((rc = getgrgid_r(s.st_gid, &grp, pw_buf, pw_buflen, &grpp)) != 0) { - if (rc != ERANGE) { - throw_ioe(env, rc); - goto cleanup; - } - free(pw_buf); - pw_buflen *= 2; - if ((pw_buf = malloc(pw_buflen)) == NULL) { - THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); - goto cleanup; - } - } - assert(grpp == &grp); - - jstring jstr_groupname = (*env)->NewStringUTF(env, grp.gr_name); - PASS_EXCEPTIONS_GOTO(env, cleanup); - // Construct result ret = (*env)->NewObject(env, stat_clazz, stat_ctor, - jstr_username, jstr_groupname, s.st_mode); + (jint)s.st_uid, (jint)s.st_gid, (jint)s.st_mode); cleanup: - if (pw_buf != NULL) free(pw_buf); - if (pw_lock_locked) { - (*env)->MonitorExit(env, pw_lock_object); - } return ret; } - - /** * public static native void posix_fadvise( * FileDescriptor fd, long offset, long len, int flags); @@ -385,6 +337,128 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_chmod( (*env)->ReleaseStringUTFChars(env, j_path, path); } +/* + * static native String getUserName(int uid); + */ +JNIEXPORT jstring JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_getUserName(JNIEnv *env, +jclass clazz, jint uid) +{ + int pw_lock_locked = 0; + if (pw_lock_object != NULL) { + if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) { + goto cleanup; + } + pw_lock_locked = 1; + } + + char *pw_buf = NULL; + int rc; + size_t pw_buflen = get_pw_buflen(); + if ((pw_buf = malloc(pw_buflen)) == NULL) { + THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); + goto cleanup; + } + + // Grab username + struct passwd pwd, *pwdp; + while ((rc = getpwuid_r((uid_t)uid, &pwd, pw_buf, pw_buflen, &pwdp)) != 0) { + if (rc != ERANGE) { + throw_ioe(env, rc); + goto cleanup; + } + free(pw_buf); + pw_buflen *= 2; + if ((pw_buf = malloc(pw_buflen)) == NULL) { + THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); + goto cleanup; + } + } + if (pwdp == NULL) { + char msg[80]; + snprintf(msg, sizeof(msg), "uid not found: %d", uid); + THROW(env, "java/io/IOException", msg); + goto cleanup; + } + if (pwdp != &pwd) { + char msg[80]; + snprintf(msg, sizeof(msg), "pwd pointer inconsistent with reference. uid: %d", uid); + THROW(env, "java/lang/IllegalStateException", msg); + goto cleanup; + } + + jstring jstr_username = (*env)->NewStringUTF(env, pwd.pw_name); + +cleanup: + if (pw_lock_locked) { + (*env)->MonitorExit(env, pw_lock_object); + } + if (pw_buf != NULL) free(pw_buf); + return jstr_username; +} + +/* + * static native String getGroupName(int gid); + */ +JNIEXPORT jstring JNICALL +Java_org_apache_hadoop_io_nativeio_NativeIO_getGroupName(JNIEnv *env, +jclass clazz, jint gid) +{ + int pw_lock_locked = 0; + + if (pw_lock_object != NULL) { + if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) { + goto cleanup; + } + pw_lock_locked = 1; + } + + char *pw_buf = NULL; + int rc; + size_t pw_buflen = get_pw_buflen(); + if ((pw_buf = malloc(pw_buflen)) == NULL) { + THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); + goto cleanup; + } + + // Grab group + struct group grp, *grpp; + while ((rc = getgrgid_r((uid_t)gid, &grp, pw_buf, pw_buflen, &grpp)) != 0) { + if (rc != ERANGE) { + throw_ioe(env, rc); + goto cleanup; + } + free(pw_buf); + pw_buflen *= 2; + if ((pw_buf = malloc(pw_buflen)) == NULL) { + THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer"); + goto cleanup; + } + } + if (grpp == NULL) { + char msg[80]; + snprintf(msg, sizeof(msg), "gid not found: %d", gid); + THROW(env, "java/io/IOException", msg); + goto cleanup; + } + if (grpp != &grp) { + char msg[80]; + snprintf(msg, sizeof(msg), "pwd pointer inconsistent with reference. gid: %d", gid); + THROW(env, "java/lang/IllegalStateException", msg); + goto cleanup; + } + + jstring jstr_groupname = (*env)->NewStringUTF(env, grp.gr_name); + PASS_EXCEPTIONS_GOTO(env, cleanup); + +cleanup: + if (pw_lock_locked) { + (*env)->MonitorExit(env, pw_lock_object); + } + if (pw_buf != NULL) free(pw_buf); + return jstr_groupname; +} + /* * Throw a java.IO.IOException, generating the message from errno. diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index affcc523520..bd6e9420305 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -214,6 +214,17 @@ + + + hadoop.security.uid.cache.secs + 14400 + + This is the config controlling the validity of the entries in the cache + containing the userId to userName and groupId to groupName used by + NativeIO getFstat(). + + + hadoop.rpc.protection authentication diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CLIMiniCluster.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/CLIMiniCluster.apt.vm rename to hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm rename to hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/SingleCluster.apt.vm rename to hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java index 38e07d8aace..2c058ca3098 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileSystemTestHelper.java @@ -61,19 +61,28 @@ public final class FileSystemTestHelper { return data; } + + /* + * get testRootPath qualified for fSys + */ public static Path getTestRootPath(FileSystem fSys) { return fSys.makeQualified(new Path(TEST_ROOT_DIR)); } + /* + * get testRootPath + pathString qualified for fSys + */ public static Path getTestRootPath(FileSystem fSys, String pathString) { return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString)); } // the getAbsolutexxx method is needed because the root test dir - // can be messed up by changing the working dir. + // can be messed up by changing the working dir since the TEST_ROOT_PATH + // is often relative to the working directory of process + // running the unit tests. - public static String getAbsoluteTestRootDir(FileSystem fSys) + static String getAbsoluteTestRootDir(FileSystem fSys) throws IOException { // NOTE: can't cache because of different filesystems! //if (absTestRootDir == null) diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java index b5bd65bc1a9..b291dd2200f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java @@ -23,6 +23,8 @@ import java.io.File; import java.io.IOException; import java.util.EnumSet; +import org.apache.hadoop.util.Shell; + public class TestDFVariations extends TestCase { public static class XXDF extends DF { @@ -51,7 +53,9 @@ public class TestDFVariations extends TestCase { public void testOSParsing() throws Exception { for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) { XXDF df = new XXDF(ost.getId()); - assertEquals(ost.getId() + " mount", "/foo/bar", df.getMount()); + assertEquals(ost.getId() + " mount", + Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/foo/bar", + df.getMount()); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java index 35e2cb7649c..f2aace9472d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFileSystem.java @@ -73,10 +73,10 @@ public class TestChRootedFileSystem { URI uri = fSys.getUri(); Assert.assertEquals(chrootedTo.toUri(), uri); Assert.assertEquals(fSys.makeQualified( - new Path("/user/" + System.getProperty("user.name"))), + new Path(System.getProperty("user.home"))), fSys.getWorkingDirectory()); Assert.assertEquals(fSys.makeQualified( - new Path("/user/" + System.getProperty("user.name"))), + new Path(System.getProperty("user.home"))), fSys.getHomeDirectory()); /* * ChRootedFs as its uri like file:///chrootRoot. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java index c47308fbc09..c52280154d6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestChRootedFs.java @@ -70,10 +70,10 @@ public class TestChRootedFs { URI uri = fc.getDefaultFileSystem().getUri(); Assert.assertEquals(chrootedTo.toUri(), uri); Assert.assertEquals(fc.makeQualified( - new Path("/user/" + System.getProperty("user.name"))), + new Path(System.getProperty("user.home"))), fc.getWorkingDirectory()); Assert.assertEquals(fc.makeQualified( - new Path("/user/" + System.getProperty("user.name"))), + new Path(System.getProperty("user.home"))), fc.getHomeDirectory()); /* * ChRootedFs as its uri like file:///chrootRoot. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java index 5641c9d70bf..74f558509f1 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestFcMainOperationsLocalFs.java @@ -39,44 +39,7 @@ public class TestFcMainOperationsLocalFs extends @Override @Before public void setUp() throws Exception { - /** - * create the test root on local_fs - the mount table will point here - */ - fclocal = FileContext.getLocalFSFileContext(); - targetOfTests = FileContextTestHelper.getTestRootPath(fclocal); - // In case previous test was killed before cleanup - fclocal.delete(targetOfTests, true); - - fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true); - - - - - // We create mount table so that the test root on the viewFs points to - // to the test root on the target. - // DOing this helps verify the FileStatus.path. - // - // The test root by default when running eclipse - // is a test dir below the working directory. - // (see FileContextTestHelper). - // Since viewFs has no built-in wd, its wd is /user/. - // If this test launched via ant (build.xml) the test root is absolute path - - String srcTestRoot; - if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) { - srcTestRoot = FileContextTestHelper.TEST_ROOT_DIR; - } else { - srcTestRoot = "/user/" + System.getProperty("user.name") + "/" + - FileContextTestHelper.TEST_ROOT_DIR; - } - - Configuration conf = new Configuration(); - ConfigUtil.addLink(conf, srcTestRoot, - targetOfTests.toUri()); - - fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); - //System.out.println("SRCOfTests = "+ FileContextTestHelper.getTestRootPath(fc, "test")); - //System.out.println("TargetOfTests = "+ targetOfTests.toUri()); + fc = ViewFsTestSetup.setupForViewFsLocalFs(); super.setUp(); } @@ -84,6 +47,6 @@ public class TestFcMainOperationsLocalFs extends @After public void tearDown() throws Exception { super.tearDown(); - fclocal.delete(targetOfTests, true); + ViewFsTestSetup.tearDownForViewFsLocalFs(); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java index 525f28bea7c..446b38e60b8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.fs.viewfs; +import java.net.URI; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FsConstants; @@ -32,14 +35,19 @@ import org.mortbay.log.Log; * * If tests launched via ant (build.xml) the test root is absolute path * If tests launched via eclipse, the test root is - * is a test dir below the working directory. (see FileSystemTestHelper). - * Since viewFs has no built-in wd, its wd is /user/ - * (or /User/ on mac) + * is a test dir below the working directory. (see FileContextTestHelper) * - * We set a viewFileSystems with mount point for - * /" pointing to the target fs's testdir + * We set a viewFileSystems with 3 mount points: + * 1) /" of testdir pointing to same in target fs + * 2) /" of home pointing to same in target fs + * 3) /" of wd pointing to same in target fs + * (note in many cases the link may be the same - viewFileSytem handles this) + * + * We also set the view file system's wd to point to the wd. */ public class ViewFileSystemTestSetup { + + static public String ViewFSTestDir = "/testDir"; /** * @@ -56,24 +64,26 @@ public class ViewFileSystemTestSetup { fsTarget.delete(targetOfTests, true); fsTarget.mkdirs(targetOfTests); - // Setup a link from viewfs to targetfs for the first component of - // path of testdir. + + // Set up viewfs link for test dir as described above String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri() .getPath(); - int indexOf2ndSlash = testDir.indexOf('/', 1); - String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash); - ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified( - new Path(testDirFirstComponent)).toUri()); + linkUpFirstComponents(conf, testDir, fsTarget, "test dir"); + + + // Set up viewfs link for home dir as described above + setUpHomeDir(conf, fsTarget); + + + // the test path may be relative to working dir - we need to make that work: + // Set up viewfs link for wd as described above + String wdDir = fsTarget.getWorkingDirectory().toUri().getPath(); + linkUpFirstComponents(conf, wdDir, fsTarget, "working dir"); - // viewFs://home => fsTarget://home - String homeDirRoot = fsTarget.getHomeDirectory() - .getParent().toUri().getPath(); - ConfigUtil.addLink(conf, homeDirRoot, - fsTarget.makeQualified(new Path(homeDirRoot)).toUri()); - ConfigUtil.setHomeDirConf(conf, homeDirRoot); - Log.info("Home dir base " + homeDirRoot); FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); + fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd. + Log.info("Working dir is: " + fsView.getWorkingDirectory()); return fsView; } @@ -91,4 +101,33 @@ public class ViewFileSystemTestSetup { conf.set("fs.viewfs.impl", ViewFileSystem.class.getName()); return conf; } + + static void setUpHomeDir(Configuration conf, FileSystem fsTarget) { + String homeDir = fsTarget.getHomeDirectory().toUri().getPath(); + int indexOf2ndSlash = homeDir.indexOf('/', 1); + if (indexOf2ndSlash >0) { + linkUpFirstComponents(conf, homeDir, fsTarget, "home dir"); + } else { // home dir is at root. Just link the home dir itse + URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri(); + ConfigUtil.addLink(conf, homeDir, linkTarget); + Log.info("Added link for home dir " + homeDir + "->" + linkTarget); + } + // Now set the root of the home dir for viewfs + String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath(); + ConfigUtil.setHomeDirConf(conf, homeDirRoot); + Log.info("Home dir base for viewfs" + homeDirRoot); + } + + /* + * Set up link in config for first component of path to the same + * in the target file system. + */ + static void linkUpFirstComponents(Configuration conf, String path, FileSystem fsTarget, String info) { + int indexOf2ndSlash = path.indexOf('/', 1); + String firstComponent = path.substring(0, indexOf2ndSlash); + URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri(); + ConfigUtil.addLink(conf, firstComponent, linkTarget); + Log.info("Added link for " + info + " " + + firstComponent + "->" + linkTarget); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java index 4ab91158778..ac63217fd43 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsTestSetup.java @@ -17,12 +17,15 @@ */ package org.apache.hadoop.fs.viewfs; +import java.net.URI; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContextTestHelper; import org.apache.hadoop.fs.FsConstants; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.viewfs.ConfigUtil; +import org.mortbay.log.Log; /** @@ -31,13 +34,20 @@ import org.apache.hadoop.fs.viewfs.ConfigUtil; * * If tests launched via ant (build.xml) the test root is absolute path * If tests launched via eclipse, the test root is - * is a test dir below the working directory. (see FileContextTestHelper). - * Since viewFs has no built-in wd, its wd is /user/. + * is a test dir below the working directory. (see FileContextTestHelper) * - * We set up fc to be the viewFs with mount point for - * /" pointing to the local file system's testdir + * We set a viewfs with 3 mount points: + * 1) /" of testdir pointing to same in target fs + * 2) /" of home pointing to same in target fs + * 3) /" of wd pointing to same in target fs + * (note in many cases the link may be the same - viewfs handles this) + * + * We also set the view file system's wd to point to the wd. */ + public class ViewFsTestSetup { + + static public String ViewFSTestDir = "/testDir"; /* @@ -47,30 +57,31 @@ public class ViewFsTestSetup { /** * create the test root on local_fs - the mount table will point here */ - FileContext fclocal = FileContext.getLocalFSFileContext(); - Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal); + FileContext fsTarget = FileContext.getLocalFSFileContext(); + Path targetOfTests = FileContextTestHelper.getTestRootPath(fsTarget); // In case previous test was killed before cleanup - fclocal.delete(targetOfTests, true); + fsTarget.delete(targetOfTests, true); - fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true); - - String srcTestFirstDir; - if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) { - int indexOf2ndSlash = FileContextTestHelper.TEST_ROOT_DIR.indexOf('/', 1); - srcTestFirstDir = FileContextTestHelper.TEST_ROOT_DIR.substring(0, indexOf2ndSlash); - } else { - srcTestFirstDir = "/user"; - - } - //System.out.println("srcTestFirstDir=" + srcTestFirstDir); - - // Set up the defaultMT in the config with mount point links - // The test dir is root is below /user/ + fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true); Configuration conf = new Configuration(); - ConfigUtil.addLink(conf, srcTestFirstDir, - targetOfTests.toUri()); + + // Set up viewfs link for test dir as described above + String testDir = FileContextTestHelper.getTestRootPath(fsTarget).toUri() + .getPath(); + linkUpFirstComponents(conf, testDir, fsTarget, "test dir"); + + + // Set up viewfs link for home dir as described above + setUpHomeDir(conf, fsTarget); + + // the test path may be relative to working dir - we need to make that work: + // Set up viewfs link for wd as described above + String wdDir = fsTarget.getWorkingDirectory().toUri().getPath(); + linkUpFirstComponents(conf, wdDir, fsTarget, "working dir"); FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf); + fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd. + Log.info("Working dir is: " + fc.getWorkingDirectory()); //System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test")); //System.out.println("TargetOfTests = "+ targetOfTests.toUri()); return fc; @@ -85,5 +96,36 @@ public class ViewFsTestSetup { Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal); fclocal.delete(targetOfTests, true); } + + + static void setUpHomeDir(Configuration conf, FileContext fsTarget) { + String homeDir = fsTarget.getHomeDirectory().toUri().getPath(); + int indexOf2ndSlash = homeDir.indexOf('/', 1); + if (indexOf2ndSlash >0) { + linkUpFirstComponents(conf, homeDir, fsTarget, "home dir"); + } else { // home dir is at root. Just link the home dir itse + URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri(); + ConfigUtil.addLink(conf, homeDir, linkTarget); + Log.info("Added link for home dir " + homeDir + "->" + linkTarget); + } + // Now set the root of the home dir for viewfs + String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath(); + ConfigUtil.setHomeDirConf(conf, homeDirRoot); + Log.info("Home dir base for viewfs" + homeDirRoot); + } + + /* + * Set up link in config for first component of path to the same + * in the target file system. + */ + static void linkUpFirstComponents(Configuration conf, String path, + FileContext fsTarget, String info) { + int indexOf2ndSlash = path.indexOf('/', 1); + String firstComponent = path.substring(0, indexOf2ndSlash); + URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri(); + ConfigUtil.addLink(conf, firstComponent, linkTarget); + Log.info("Added link for " + info + " " + + firstComponent + "->" + linkTarget); + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java index f947e02efb4..9ee1516863b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/nativeio/TestNativeIO.java @@ -61,7 +61,7 @@ public class TestNativeIO { public void testFstat() throws Exception { FileOutputStream fos = new FileOutputStream( new File(TEST_DIR, "testfstat")); - NativeIO.Stat stat = NativeIO.fstat(fos.getFD()); + NativeIO.Stat stat = NativeIO.getFstat(fos.getFD()); fos.close(); LOG.info("Stat: " + String.valueOf(stat)); @@ -93,7 +93,7 @@ public class TestNativeIO { long et = Time.now() + 5000; while (Time.now() < et) { try { - NativeIO.Stat stat = NativeIO.fstat(fos.getFD()); + NativeIO.Stat stat = NativeIO.getFstat(fos.getFD()); assertEquals(System.getProperty("user.name"), stat.getOwner()); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); @@ -125,7 +125,7 @@ public class TestNativeIO { new File(TEST_DIR, "testfstat2")); fos.close(); try { - NativeIO.Stat stat = NativeIO.fstat(fos.getFD()); + NativeIO.Stat stat = NativeIO.getFstat(fos.getFD()); } catch (NativeIOException nioe) { LOG.info("Got expected exception", nioe); assertEquals(Errno.EBADF, nioe.getErrno()); @@ -283,4 +283,14 @@ public class TestNativeIO { assertEquals(expected, perms.toShort()); } + @Test + public void testGetUserName() throws IOException { + assertFalse(NativeIO.getUserName(0).isEmpty()); + } + + @Test + public void testGetGroupName() throws IOException { + assertFalse(NativeIO.getGroupName(0).isEmpty()); + } + } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java index 7abd6e9dacb..be3064d20b3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java @@ -27,12 +27,13 @@ import java.io.IOException; import java.lang.annotation.Annotation; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; +import java.security.Security; import java.util.Collection; import java.util.Set; import java.util.regex.Pattern; -import javax.security.sasl.Sasl; - +import javax.security.auth.callback.*; +import javax.security.sasl.*; import junit.framework.Assert; import org.apache.commons.logging.Log; @@ -43,14 +44,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Client.ConnectionId; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.KerberosInfo; -import org.apache.hadoop.security.SaslInputStream; -import org.apache.hadoop.security.SaslRpcClient; -import org.apache.hadoop.security.SaslRpcServer; -import org.apache.hadoop.security.SecurityInfo; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.TestUserGroupInformation; -import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.*; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.security.token.Token; @@ -58,8 +53,10 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenInfo; import org.apache.hadoop.security.token.TokenSelector; import org.apache.hadoop.security.token.SecretManager.InvalidToken; + import org.apache.log4j.Level; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; /** Unit tests for using Sasl over RPC. */ @@ -74,14 +71,22 @@ public class TestSaslRPC { static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab"; static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR"; static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR"; - private static Configuration conf; + static Boolean forceSecretManager = null; + + @BeforeClass + public static void setupKerb() { + System.setProperty("java.security.krb5.kdc", ""); + System.setProperty("java.security.krb5.realm", "NONE"); + Security.addProvider(new SaslPlainServer.SecurityProvider()); + } @Before public void setup() { conf = new Configuration(); SecurityUtil.setAuthenticationMethod(KERBEROS, conf); UserGroupInformation.setConfiguration(conf); + forceSecretManager = null; } static { @@ -265,16 +270,6 @@ public class TestSaslRPC { } } - @Test - public void testSecureToInsecureRpc() throws Exception { - SecurityUtil.setAuthenticationMethod(AuthenticationMethod.SIMPLE, conf); - Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class) - .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) - .setNumHandlers(5).setVerbose(true).build(); - TestTokenSecretManager sm = new TestTokenSecretManager(); - doDigestRpc(server, sm); - } - @Test public void testErrorMessage() throws Exception { BadTokenSecretManager sm = new BadTokenSecretManager(); @@ -455,6 +450,120 @@ public class TestSaslRPC { System.out.println("Test is successful."); } + @Test + public void testSaslPlainServer() throws IOException { + runNegotiation( + new TestPlainCallbacks.Client("user", "pass"), + new TestPlainCallbacks.Server("user", "pass")); + } + + @Test + public void testSaslPlainServerBadPassword() throws IOException { + SaslException e = null; + try { + runNegotiation( + new TestPlainCallbacks.Client("user", "pass1"), + new TestPlainCallbacks.Server("user", "pass2")); + } catch (SaslException se) { + e = se; + } + assertNotNull(e); + assertEquals("PLAIN auth failed: wrong password", e.getMessage()); + } + + + private void runNegotiation(CallbackHandler clientCbh, + CallbackHandler serverCbh) + throws SaslException { + String mechanism = AuthMethod.PLAIN.getMechanismName(); + + SaslClient saslClient = Sasl.createSaslClient( + new String[]{ mechanism }, null, null, null, null, clientCbh); + assertNotNull(saslClient); + + SaslServer saslServer = Sasl.createSaslServer( + mechanism, null, "localhost", null, serverCbh); + assertNotNull("failed to find PLAIN server", saslServer); + + byte[] response = saslClient.evaluateChallenge(new byte[0]); + assertNotNull(response); + assertTrue(saslClient.isComplete()); + + response = saslServer.evaluateResponse(response); + assertNull(response); + assertTrue(saslServer.isComplete()); + assertNotNull(saslServer.getAuthorizationID()); + } + + static class TestPlainCallbacks { + public static class Client implements CallbackHandler { + String user = null; + String password = null; + + Client(String user, String password) { + this.user = user; + this.password = password; + } + + @Override + public void handle(Callback[] callbacks) + throws UnsupportedCallbackException { + for (Callback callback : callbacks) { + if (callback instanceof NameCallback) { + ((NameCallback) callback).setName(user); + } else if (callback instanceof PasswordCallback) { + ((PasswordCallback) callback).setPassword(password.toCharArray()); + } else { + throw new UnsupportedCallbackException(callback, + "Unrecognized SASL PLAIN Callback"); + } + } + } + } + + public static class Server implements CallbackHandler { + String user = null; + String password = null; + + Server(String user, String password) { + this.user = user; + this.password = password; + } + + @Override + public void handle(Callback[] callbacks) + throws UnsupportedCallbackException, SaslException { + NameCallback nc = null; + PasswordCallback pc = null; + AuthorizeCallback ac = null; + + for (Callback callback : callbacks) { + if (callback instanceof NameCallback) { + nc = (NameCallback)callback; + assertEquals(user, nc.getName()); + } else if (callback instanceof PasswordCallback) { + pc = (PasswordCallback)callback; + if (!password.equals(new String(pc.getPassword()))) { + throw new IllegalArgumentException("wrong password"); + } + } else if (callback instanceof AuthorizeCallback) { + ac = (AuthorizeCallback)callback; + assertEquals(user, ac.getAuthorizationID()); + assertEquals(user, ac.getAuthenticationID()); + ac.setAuthorized(true); + ac.setAuthorizedID(ac.getAuthenticationID()); + } else { + throw new UnsupportedCallbackException(callback, + "Unsupported SASL PLAIN Callback"); + } + } + assertNotNull(nc); + assertNotNull(pc); + assertNotNull(ac); + } + } + } + private static Pattern BadToken = Pattern.compile(".*DIGEST-MD5: digest response format violation.*"); private static Pattern KrbFailed = @@ -462,6 +571,8 @@ public class TestSaslRPC { "Failed to specify server's Kerberos principal name.*"); private static Pattern Denied = Pattern.compile(".*Authorization .* is enabled .*"); + private static Pattern NoDigest = + Pattern.compile(".*Server is not configured to do DIGEST auth.*"); /* * simple server @@ -478,6 +589,9 @@ public class TestSaslRPC { // Tokens are ignored because client is reverted to simple assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, true)); assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true)); + forceSecretManager = true; + assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, true)); + assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true)); } @Test @@ -485,6 +599,9 @@ public class TestSaslRPC { // Tokens are ignored because client is reverted to simple assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, false)); assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false)); + forceSecretManager = true; + assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, false)); + assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false)); } /* @@ -501,12 +618,19 @@ public class TestSaslRPC { // can use tokens regardless of auth assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, KERBEROS, true)); assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, true)); + // can't fallback to simple when using kerberos w/o tokens + forceSecretManager = false; + assertAuthEquals(NoDigest, getAuthMethod(SIMPLE, KERBEROS, true)); + assertAuthEquals(NoDigest, getAuthMethod(KERBEROS, KERBEROS, true)); } @Test public void testKerberosServerWithInvalidTokens() throws Exception { assertAuthEquals(BadToken, getAuthMethod(SIMPLE, KERBEROS, false)); assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, false)); + forceSecretManager = false; + assertAuthEquals(NoDigest, getAuthMethod(SIMPLE, KERBEROS, true)); + assertAuthEquals(NoDigest, getAuthMethod(KERBEROS, KERBEROS, true)); } @@ -539,21 +663,45 @@ public class TestSaslRPC { final boolean useToken, final boolean useValidToken) throws Exception { - Configuration serverConf = new Configuration(conf); + String currentUser = UserGroupInformation.getCurrentUser().getUserName(); + + final Configuration serverConf = new Configuration(conf); SecurityUtil.setAuthenticationMethod(serverAuth, serverConf); UserGroupInformation.setConfiguration(serverConf); - TestTokenSecretManager sm = new TestTokenSecretManager(); - Server server = new RPC.Builder(serverConf).setProtocol(TestSaslProtocol.class) + final UserGroupInformation serverUgi = + UserGroupInformation.createRemoteUser(currentUser + "-SERVER"); + serverUgi.setAuthenticationMethod(serverAuth); + + final TestTokenSecretManager sm = new TestTokenSecretManager(); + boolean useSecretManager = (serverAuth != SIMPLE); + if (forceSecretManager != null) { + useSecretManager &= forceSecretManager.booleanValue(); + } + final SecretManager serverSm = useSecretManager ? sm : null; + + Server server = serverUgi.doAs(new PrivilegedExceptionAction() { + @Override + public Server run() throws IOException { + Server server = new RPC.Builder(serverConf) + .setProtocol(TestSaslProtocol.class) .setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0) .setNumHandlers(5).setVerbose(true) - .setSecretManager((serverAuth != SIMPLE) ? sm : null) + .setSecretManager(serverSm) .build(); - server.start(); + server.start(); + return server; + } + }); + final Configuration clientConf = new Configuration(conf); + SecurityUtil.setAuthenticationMethod(clientAuth, clientConf); + UserGroupInformation.setConfiguration(clientConf); + final UserGroupInformation clientUgi = - UserGroupInformation.createRemoteUser( - UserGroupInformation.getCurrentUser().getUserName()+"-CLIENT"); + UserGroupInformation.createRemoteUser(currentUser + "-CLIENT"); + clientUgi.setAuthenticationMethod(clientAuth); + final InetSocketAddress addr = NetUtils.getConnectAddress(server); if (useToken) { TestTokenIdentifier tokenId = new TestTokenIdentifier( @@ -568,10 +716,6 @@ public class TestSaslRPC { clientUgi.addToken(token); } - final Configuration clientConf = new Configuration(conf); - SecurityUtil.setAuthenticationMethod(clientAuth, clientConf); - UserGroupInformation.setConfiguration(clientConf); - try { return clientUgi.doAs(new PrivilegedExceptionAction() { @Override @@ -581,6 +725,12 @@ public class TestSaslRPC { proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class, TestSaslProtocol.versionID, addr, clientConf); + proxy.ping(); + // verify sasl completed + if (serverAuth != SIMPLE) { + assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth"); + } + // make sure the other side thinks we are who we said we are!!! assertEquals(clientUgi.getUserName(), proxy.getAuthUser()); return proxy.getAuthMethod().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b2766bd3b0c..2e407129b86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -157,6 +157,8 @@ Trunk (Unreleased) HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo) + HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm) + OPTIMIZATIONS BUG FIXES @@ -223,9 +225,6 @@ Trunk (Unreleased) HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049. (acmurthy via eli) - HDFS-3625. Fix TestBackupNode by properly initializing edit log during - startup. (Junping Du via todd) - HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd) HDFS-3827. TestHASafeMode#assertSafemode method should be made static. @@ -249,6 +248,9 @@ Trunk (Unreleased) HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and lastDeletedReport should be volatile. (Jing Zhao via suresh) + HDFS-4165. Faulty sanity check in FsDirectory.unprotectedSetQuota. + (Binglin Chang via suresh) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. @@ -459,6 +461,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in C/C++. (Binglin Chang via suresh) + HDFS-4048. Use ERROR instead of INFO for volume failure logs. + (Stephen Chu via eli) + OPTIMIZATIONS BUG FIXES @@ -561,6 +566,17 @@ Release 2.0.3-alpha - Unreleased HDFS-3979. For hsync, datanode should wait for the local sync to complete before sending ack. (Lars Hofhansl via szetszwo) + HDFS-3625. Fix TestBackupNode by properly initializing edit log during + startup. (Junping Du via todd) + + HDFS-4138. BackupNode startup fails due to uninitialized edit log. + (Kihwal Lee via shv) + + HDFS-3810. Implement format() for BKJM (Ivan Kelly via umamahesh) + + HDFS-4162. Some malformed and unquoted HTML strings are returned from + datanode web ui. (Darek Dagit via suresh) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -1942,6 +1958,9 @@ Release 0.23.5 - UNRELEASED INCOMPATIBLE CHANGES + HDFS-4080. Add a separate logger for block state change logs to enable turning + off those logs. (Kihwal Lee via suresh) + NEW FEATURES IMPROVEMENTS @@ -1950,6 +1969,8 @@ Release 0.23.5 - UNRELEASED HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn) + HDFS-3990. NN's health report has severe performance problems (daryn) + BUG FIXES HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java index 62dbbd031f4..67f0b4b2293 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java @@ -39,6 +39,7 @@ import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.ZooDefs.Ids; import org.apache.zookeeper.AsyncCallback.StringCallback; +import org.apache.zookeeper.ZKUtil; import java.util.Collection; import java.util.Collections; @@ -46,6 +47,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.io.IOException; import java.net.URI; @@ -142,13 +144,16 @@ public class BookKeeperJournalManager implements JournalManager { private final Configuration conf; private final BookKeeper bkc; private final CurrentInprogress ci; + private final String basePath; private final String ledgerPath; + private final String versionPath; private final MaxTxId maxTxId; private final int ensembleSize; private final int quorumSize; private final String digestpw; private final CountDownLatch zkConnectLatch; private final NamespaceInfo nsInfo; + private boolean initialized = false; private LedgerHandle currentLedger = null; /** @@ -160,16 +165,16 @@ public class BookKeeperJournalManager implements JournalManager { this.nsInfo = nsInfo; String zkConnect = uri.getAuthority().replace(";", ","); - String zkPath = uri.getPath(); + basePath = uri.getPath(); ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE, BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT); quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE, BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT); - ledgerPath = zkPath + "/ledgers"; - String maxTxIdPath = zkPath + "/maxtxid"; - String currentInprogressNodePath = zkPath + "/CurrentInprogress"; - String versionPath = zkPath + "/version"; + ledgerPath = basePath + "/ledgers"; + String maxTxIdPath = basePath + "/maxtxid"; + String currentInprogressNodePath = basePath + "/CurrentInprogress"; + versionPath = basePath + "/version"; digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW, BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT); @@ -180,47 +185,7 @@ public class BookKeeperJournalManager implements JournalManager { if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) { throw new IOException("Error connecting to zookeeper"); } - if (zkc.exists(zkPath, false) == null) { - zkc.create(zkPath, new byte[] {'0'}, - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - } - Stat versionStat = zkc.exists(versionPath, false); - if (versionStat != null) { - byte[] d = zkc.getData(versionPath, false, versionStat); - VersionProto.Builder builder = VersionProto.newBuilder(); - TextFormat.merge(new String(d, UTF_8), builder); - if (!builder.isInitialized()) { - throw new IOException("Invalid/Incomplete data in znode"); - } - VersionProto vp = builder.build(); - - // There's only one version at the moment - assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION; - - NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo()); - - if (nsInfo.getNamespaceID() != readns.getNamespaceID() || - !nsInfo.clusterID.equals(readns.getClusterID()) || - !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) { - String err = String.format("Environment mismatch. Running process %s" - +", stored in ZK %s", nsInfo, readns); - LOG.error(err); - throw new IOException(err); - } - } else if (nsInfo.getNamespaceID() > 0) { - VersionProto.Builder builder = VersionProto.newBuilder(); - builder.setNamespaceInfo(PBHelper.convert(nsInfo)) - .setLayoutVersion(BKJM_LAYOUT_VERSION); - byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8); - zkc.create(versionPath, data, - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - } - - if (zkc.exists(ledgerPath, false) == null) { - zkc.create(ledgerPath, new byte[] {'0'}, - Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - } prepareBookKeeperEnv(); bkc = new BookKeeper(new ClientConfiguration(), zkc); } catch (KeeperException e) { @@ -244,6 +209,7 @@ public class BookKeeperJournalManager implements JournalManager { BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT); final CountDownLatch zkPathLatch = new CountDownLatch(1); + final AtomicBoolean success = new AtomicBoolean(false); StringCallback callback = new StringCallback() { @Override public void processResult(int rc, String path, Object ctx, String name) { @@ -251,22 +217,23 @@ public class BookKeeperJournalManager implements JournalManager { || KeeperException.Code.NODEEXISTS.intValue() == rc) { LOG.info("Successfully created bookie available path : " + zkAvailablePath); - zkPathLatch.countDown(); + success.set(true); } else { KeeperException.Code code = KeeperException.Code.get(rc); - LOG - .error("Error : " + LOG.error("Error : " + KeeperException.create(code, path).getMessage() + ", failed to create bookie available path : " + zkAvailablePath); } + zkPathLatch.countDown(); } }; ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null); try { - if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)) { + if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS) + || !success.get()) { throw new IOException("Couldn't create bookie available path :" + zkAvailablePath + ", timed out " + zkc.getSessionTimeout() + " millis"); @@ -281,19 +248,101 @@ public class BookKeeperJournalManager implements JournalManager { @Override public void format(NamespaceInfo ns) throws IOException { - // Currently, BKJM automatically formats itself when first accessed. - // TODO: change over to explicit formatting so that the admin can - // clear out the BK storage when reformatting a cluster. - LOG.info("Not formatting " + this + " - BKJM does not currently " + - "support reformatting. If it has not been used before, it will" + - "be formatted automatically upon first use."); + try { + // delete old info + Stat baseStat = null; + Stat ledgerStat = null; + if ((baseStat = zkc.exists(basePath, false)) != null) { + if ((ledgerStat = zkc.exists(ledgerPath, false)) != null) { + for (EditLogLedgerMetadata l : getLedgerList(true)) { + try { + bkc.deleteLedger(l.getLedgerId()); + } catch (BKException.BKNoSuchLedgerExistsException bke) { + LOG.warn("Ledger " + l.getLedgerId() + " does not exist;" + + " Cannot delete."); + } + } + } + ZKUtil.deleteRecursive(zkc, basePath); + } + + // should be clean now. + zkc.create(basePath, new byte[] {'0'}, + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + + VersionProto.Builder builder = VersionProto.newBuilder(); + builder.setNamespaceInfo(PBHelper.convert(ns)) + .setLayoutVersion(BKJM_LAYOUT_VERSION); + + byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8); + zkc.create(versionPath, data, + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + + zkc.create(ledgerPath, new byte[] {'0'}, + Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); + } catch (KeeperException ke) { + LOG.error("Error accessing zookeeper to format", ke); + throw new IOException("Error accessing zookeeper to format", ke); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new IOException("Interrupted during format", ie); + } catch (BKException bke) { + throw new IOException("Error cleaning up ledgers during format", bke); + } } @Override public boolean hasSomeData() throws IOException { - // Don't confirm format on BKJM, since format() is currently a - // no-op anyway - return false; + try { + return zkc.exists(basePath, false) != null; + } catch (KeeperException ke) { + throw new IOException("Couldn't contact zookeeper", ke); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new IOException("Interrupted while checking for data", ie); + } + } + + synchronized private void checkEnv() throws IOException { + if (!initialized) { + try { + Stat versionStat = zkc.exists(versionPath, false); + if (versionStat == null) { + throw new IOException("Environment not initialized. " + +"Have you forgotten to format?"); + } + byte[] d = zkc.getData(versionPath, false, versionStat); + + VersionProto.Builder builder = VersionProto.newBuilder(); + TextFormat.merge(new String(d, UTF_8), builder); + if (!builder.isInitialized()) { + throw new IOException("Invalid/Incomplete data in znode"); + } + VersionProto vp = builder.build(); + + // There's only one version at the moment + assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION; + + NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo()); + + if (nsInfo.getNamespaceID() != readns.getNamespaceID() || + !nsInfo.clusterID.equals(readns.getClusterID()) || + !nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) { + String err = String.format("Environment mismatch. Running process %s" + +", stored in ZK %s", nsInfo, readns); + LOG.error(err); + throw new IOException(err); + } + + ci.init(); + initialized = true; + } catch (KeeperException ke) { + throw new IOException("Cannot access ZooKeeper", ke); + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + throw new IOException("Interrupted while checking environment", ie); + } + } } /** @@ -307,6 +356,8 @@ public class BookKeeperJournalManager implements JournalManager { */ @Override public EditLogOutputStream startLogSegment(long txId) throws IOException { + checkEnv(); + if (txId <= maxTxId.get()) { throw new IOException("We've already seen " + txId + ". A new stream cannot be created with it"); @@ -384,6 +435,8 @@ public class BookKeeperJournalManager implements JournalManager { @Override public void finalizeLogSegment(long firstTxId, long lastTxId) throws IOException { + checkEnv(); + String inprogressPath = inprogressZNode(firstTxId); try { Stat inprogressStat = zkc.exists(inprogressPath, false); @@ -537,6 +590,8 @@ public class BookKeeperJournalManager implements JournalManager { @Override public void recoverUnfinalizedSegments() throws IOException { + checkEnv(); + synchronized (this) { try { List children = zkc.getChildren(ledgerPath, false); @@ -589,6 +644,8 @@ public class BookKeeperJournalManager implements JournalManager { @Override public void purgeLogsOlderThan(long minTxIdToKeep) throws IOException { + checkEnv(); + for (EditLogLedgerMetadata l : getLedgerList(false)) { if (l.getLastTxId() < minTxIdToKeep) { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java index 8477f7c4e56..32d65cbf8d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/CurrentInprogress.java @@ -56,6 +56,9 @@ class CurrentInprogress { CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException { this.currentInprogressNode = lockpath; this.zkc = zkc; + } + + void init() throws IOException { try { Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode, false); @@ -96,15 +99,14 @@ class CurrentInprogress { this.versionNumberForPermission); } catch (KeeperException e) { throw new IOException("Exception when setting the data " - + "[layout version number,hostname,inprogressNode path]= [" + content - + "] to CurrentInprogress. ", e); + + "[" + content + "] to CurrentInprogress. ", e); } catch (InterruptedException e) { throw new IOException("Interrupted while setting the data " - + "[layout version number,hostname,inprogressNode path]= [" + content - + "] to CurrentInprogress", e); + + "[" + content + "] to CurrentInprogress", e); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Updated data[" + content + "] to CurrentInprogress"); } - LOG.info("Updated data[layout version number,hostname,inprogressNode path]" - + "= [" + content + "] to CurrentInprogress"); } /** @@ -136,7 +138,7 @@ class CurrentInprogress { } return builder.build().getPath(); } else { - LOG.info("No data available in CurrentInprogress"); + LOG.debug("No data available in CurrentInprogress"); } return null; } @@ -152,7 +154,7 @@ class CurrentInprogress { throw new IOException( "Interrupted when setting the data to CurrentInprogress node", e); } - LOG.info("Cleared the data from CurrentInprogress"); + LOG.debug("Cleared the data from CurrentInprogress"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java index 4ea5074092c..f3f6ce5674f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperConfiguration.java @@ -149,6 +149,7 @@ public class TestBookKeeperConfiguration { bkjm = new BookKeeperJournalManager(conf, URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"), nsi); + bkjm.format(nsi); Assert.assertNotNull("Bookie available path : " + bkAvailablePath + " doesn't exists", zkc.exists(bkAvailablePath, false)); } @@ -166,6 +167,7 @@ public class TestBookKeeperConfiguration { bkjm = new BookKeeperJournalManager(conf, URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"), nsi); + bkjm.format(nsi); Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH + " doesn't exists", zkc.exists(BK_ROOT_PATH, false)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java index 954f2a54098..9da904007d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperJournalManager.java @@ -29,8 +29,16 @@ import org.mockito.Mockito; import java.io.IOException; import java.net.URI; import java.util.List; +import java.util.ArrayList; import java.util.Random; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Callable; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; @@ -90,6 +98,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1 ; i <= 100; i++) { @@ -112,6 +121,8 @@ public class TestBookKeeperJournalManager { BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi); + bkjm.format(nsi); + EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1 ; i <= 100; i++) { FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance(); @@ -130,6 +141,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi); + bkjm.format(nsi); long txid = 1; for (long i = 0; i < 3; i++) { @@ -167,6 +179,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi); + bkjm.format(nsi); long txid = 1; for (long i = 0; i < 3; i++) { @@ -208,6 +221,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi); + bkjm.format(nsi); long txid = 1; long start = txid; @@ -266,6 +280,7 @@ public class TestBookKeeperJournalManager { BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi); + bkjm1.format(nsi); BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi); @@ -288,6 +303,7 @@ public class TestBookKeeperJournalManager { BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-simpleread"), nsi); + bkjm.format(nsi); final long numTransactions = 10000; EditLogOutputStream out = bkjm.startLogSegment(1); @@ -315,6 +331,7 @@ public class TestBookKeeperJournalManager { BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"), nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1 ; i <= 100; i++) { @@ -365,6 +382,7 @@ public class TestBookKeeperJournalManager { BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"), nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(txid); for (long i = 1 ; i <= 3; i++) { @@ -450,6 +468,7 @@ public class TestBookKeeperJournalManager { BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"), nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(txid); for (long i = 1 ; i <= 3; i++) { @@ -500,6 +519,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1; i <= 100; i++) { @@ -541,6 +561,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1; i <= 100; i++) { @@ -583,6 +604,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1; i <= 100; i++) { @@ -622,6 +644,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi); + bkjm.format(nsi); EditLogOutputStream out = bkjm.startLogSegment(1); for (long i = 1; i <= 100; i++) { @@ -669,6 +692,7 @@ public class TestBookKeeperJournalManager { NamespaceInfo nsi = newNSInfo(); BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri, nsi); + bkjm.format(nsi); try { // start new inprogress log segment with txid=1 @@ -697,6 +721,81 @@ public class TestBookKeeperJournalManager { } } + private enum ThreadStatus { + COMPLETED, GOODEXCEPTION, BADEXCEPTION; + }; + + /** + * Tests that concurrent calls to format will still allow one to succeed. + */ + @Test + public void testConcurrentFormat() throws Exception { + final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat"); + final NamespaceInfo nsi = newNSInfo(); + + // populate with data first + BookKeeperJournalManager bkjm + = new BookKeeperJournalManager(conf, uri, nsi); + bkjm.format(nsi); + for (int i = 1; i < 100*2; i += 2) { + bkjm.startLogSegment(i); + bkjm.finalizeLogSegment(i, i+1); + } + bkjm.close(); + + final int numThreads = 40; + List> threads + = new ArrayList>(); + final CyclicBarrier barrier = new CyclicBarrier(numThreads); + + for (int i = 0; i < numThreads; i++) { + threads.add(new Callable() { + public ThreadStatus call() { + BookKeeperJournalManager bkjm = null; + try { + bkjm = new BookKeeperJournalManager(conf, uri, nsi); + barrier.await(); + bkjm.format(nsi); + return ThreadStatus.COMPLETED; + } catch (IOException ioe) { + LOG.info("Exception formatting ", ioe); + return ThreadStatus.GOODEXCEPTION; + } catch (InterruptedException ie) { + LOG.error("Interrupted. Something is broken", ie); + Thread.currentThread().interrupt(); + return ThreadStatus.BADEXCEPTION; + } catch (Exception e) { + LOG.error("Some other bad exception", e); + return ThreadStatus.BADEXCEPTION; + } finally { + if (bkjm != null) { + try { + bkjm.close(); + } catch (IOException ioe) { + LOG.error("Error closing journal manager", ioe); + } + } + } + } + }); + } + ExecutorService service = Executors.newFixedThreadPool(numThreads); + List> statuses = service.invokeAll(threads, 60, + TimeUnit.SECONDS); + int numCompleted = 0; + for (Future s : statuses) { + assertTrue(s.isDone()); + assertTrue("Thread threw invalid exception", + s.get() == ThreadStatus.COMPLETED + || s.get() == ThreadStatus.GOODEXCEPTION); + if (s.get() == ThreadStatus.COMPLETED) { + numCompleted++; + } + } + LOG.info("Completed " + numCompleted + " formats"); + assertTrue("No thread managed to complete formatting", numCompleted > 0); + } + private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm, int startTxid, int endTxid) throws IOException, KeeperException, InterruptedException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java index 00497b7798f..169a8a8f691 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestCurrentInprogress.java @@ -118,6 +118,7 @@ public class TestCurrentInprogress { public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception { String data = "inprogressNode"; CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH); + ci.init(); ci.update(data); String inprogressNodePath = ci.read(); assertEquals("Not returning inprogressZnode", "inprogressNode", @@ -131,6 +132,7 @@ public class TestCurrentInprogress { @Test public void testReadShouldReturnNullAfterClear() throws Exception { CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH); + ci.init(); ci.update("myInprogressZnode"); ci.read(); ci.clear(); @@ -146,6 +148,7 @@ public class TestCurrentInprogress { public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead() throws Exception { CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH); + ci.init(); ci.update("myInprogressZnode"); assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci .read()); @@ -154,4 +157,4 @@ public class TestCurrentInprogress { ci.update("myInprogressZnode"); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java index ad1567fd39b..2a0578ca93f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java @@ -38,7 +38,8 @@ public class DatanodeID implements Comparable { public static final DatanodeID[] EMPTY_ARRAY = {}; private String ipAddr; // IP address - private String hostName; // hostname + private String hostName; // hostname claimed by datanode + private String peerHostName; // hostname from the actual connection private String storageID; // unique per cluster storageID private int xferPort; // data streaming port private int infoPort; // info server port @@ -51,6 +52,7 @@ public class DatanodeID implements Comparable { from.getXferPort(), from.getInfoPort(), from.getIpcPort()); + this.peerHostName = from.getPeerHostName(); } /** @@ -76,6 +78,10 @@ public class DatanodeID implements Comparable { this.ipAddr = ipAddr; } + public void setPeerHostName(String peerHostName) { + this.peerHostName = peerHostName; + } + public void setStorageID(String storageID) { this.storageID = storageID; } @@ -94,6 +100,13 @@ public class DatanodeID implements Comparable { return hostName; } + /** + * @return hostname from the actual connection + */ + public String getPeerHostName() { + return peerHostName; + } + /** * @return IP:xferPort string */ @@ -202,6 +215,7 @@ public class DatanodeID implements Comparable { public void updateRegInfo(DatanodeID nodeReg) { ipAddr = nodeReg.getIpAddr(); hostName = nodeReg.getHostName(); + peerHostName = nodeReg.getPeerHostName(); xferPort = nodeReg.getXferPort(); infoPort = nodeReg.getInfoPort(); ipcPort = nodeReg.getIpcPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index e70f4690efc..91629d930c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -35,6 +35,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -230,6 +231,7 @@ public class JournalNode implements Tool, Configurable { } public static void main(String[] args) throws Exception { + StringUtils.startupShutdownMessage(JournalNode.class, args, LOG); System.exit(ToolRunner.run(new JournalNode(), args)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java index 5c0db1bb9b4..36b3598b2c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java @@ -233,7 +233,7 @@ public class BlockInfoUnderConstruction extends BlockInfo { setBlockUCState(BlockUCState.UNDER_RECOVERY); blockRecoveryId = recoveryId; if (replicas.size() == 0) { - NameNode.stateChangeLog.warn("BLOCK*" + NameNode.blockStateChangeLog.warn("BLOCK*" + " BlockInfoUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed."); } @@ -245,7 +245,7 @@ public class BlockInfoUnderConstruction extends BlockInfo { primaryNodeIndex = j; DatanodeDescriptor primary = replicas.get(j).getExpectedLocation(); primary.addBlockToBeRecovered(this); - NameNode.stateChangeLog.info("BLOCK* " + this + NameNode.blockStateChangeLog.info("BLOCK* " + this + " recovery started, primary=" + primary); return; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index b542c176ebb..6b30e55dac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -83,6 +83,7 @@ import com.google.common.collect.Sets; public class BlockManager { static final Log LOG = LogFactory.getLog(BlockManager.class); + static final Log blockLog = NameNode.blockStateChangeLog; /** Default load factor of map */ public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f; @@ -872,7 +873,7 @@ public class BlockManager { final long size) throws UnregisteredNodeException { final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode); if (node == null) { - NameNode.stateChangeLog.warn("BLOCK* getBlocks: " + blockLog.warn("BLOCK* getBlocks: " + "Asking for blocks from an unrecorded node " + datanode); throw new HadoopIllegalArgumentException( "Datanode " + datanode + " not found."); @@ -950,7 +951,7 @@ public class BlockManager { datanodes.append(node).append(" "); } if (datanodes.length() != 0) { - NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " " + blockLog.info("BLOCK* addToInvalidates: " + b + " " + datanodes); } } @@ -971,7 +972,7 @@ public class BlockManager { // ignore the request for now. This could happen when BlockScanner // thread of Datanode reports bad block before Block reports are sent // by the Datanode on startup - NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: " + blockLog.info("BLOCK* findAndMarkBlockAsCorrupt: " + blk + " not found"); return; } @@ -988,7 +989,7 @@ public class BlockManager { BlockCollection bc = b.corrupted.getBlockCollection(); if (bc == null) { - NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + b + blockLog.info("BLOCK markBlockAsCorrupt: " + b + " cannot be marked as corrupt as it does not belong to any file"); addToInvalidates(b.corrupted, node); return; @@ -1013,7 +1014,7 @@ public class BlockManager { */ private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn ) throws IOException { - NameNode.stateChangeLog.info("BLOCK* invalidateBlock: " + b + " on " + dn); + blockLog.info("BLOCK* invalidateBlock: " + b + " on " + dn); DatanodeDescriptor node = getDatanodeManager().getDatanode(dn); if (node == null) { throw new IOException("Cannot invalidate " + b @@ -1023,7 +1024,7 @@ public class BlockManager { // Check how many copies we have of the block NumberReplicas nr = countNodes(b.stored); if (nr.replicasOnStaleNodes() > 0) { - NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " + + blockLog.info("BLOCK* invalidateBlocks: postponing " + "invalidation of " + b + " on " + dn + " because " + nr.replicasOnStaleNodes() + " replica(s) are located on nodes " + "with potentially out-of-date block reports"); @@ -1033,12 +1034,12 @@ public class BlockManager { // If we have at least one copy on a live node, then we can delete it. addToInvalidates(b.corrupted, dn); removeStoredBlock(b.stored, node); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* invalidateBlocks: " + b + " on " + dn + " listed for deletion."); } } else { - NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b + blockLog.info("BLOCK* invalidateBlocks: " + b + " on " + dn + " is the only copy and was not deleted"); } } @@ -1160,7 +1161,7 @@ public class BlockManager { (blockHasEnoughRacks(block)) ) { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); - NameNode.stateChangeLog.info("BLOCK* Removing " + block + blockLog.info("BLOCK* Removing " + block + " from neededReplications as it has enough replicas"); continue; } @@ -1235,7 +1236,7 @@ public class BlockManager { neededReplications.remove(block, priority); // remove from neededReplications neededReplications.decrementReplicationIndex(priority); rw.targets = null; - NameNode.stateChangeLog.info("BLOCK* Removing " + block + blockLog.info("BLOCK* Removing " + block + " from neededReplications as it has enough replicas"); continue; } @@ -1261,8 +1262,8 @@ public class BlockManager { // The reason we use 'pending' is so we can retry // replications that fail after an appropriate amount of time. pendingReplications.increment(block, targets.length); - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(blockLog.isDebugEnabled()) { + blockLog.debug( "BLOCK* block " + block + " is moved from neededReplications to pendingReplications"); } @@ -1278,7 +1279,7 @@ public class BlockManager { namesystem.writeUnlock(); } - if (NameNode.stateChangeLog.isInfoEnabled()) { + if (blockLog.isInfoEnabled()) { // log which blocks have been scheduled for replication for(ReplicationWork rw : work){ DatanodeDescriptor[] targets = rw.targets; @@ -1288,13 +1289,13 @@ public class BlockManager { targetList.append(' '); targetList.append(targets[k]); } - NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode + blockLog.info("BLOCK* ask " + rw.srcNode + " to replicate " + rw.block + " to " + targetList); } } } - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(blockLog.isDebugEnabled()) { + blockLog.debug( "BLOCK* neededReplications = " + neededReplications.size() + " pendingReplications = " + pendingReplications.size()); } @@ -1504,7 +1505,7 @@ public class BlockManager { // To minimize startup time, we discard any second (or later) block reports // that we receive while still in startup phase. if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) { - NameNode.stateChangeLog.info("BLOCK* processReport: " + blockLog.info("BLOCK* processReport: " + "discarded non-initial block report from " + nodeID + " because namenode still in startup phase"); return; @@ -1536,7 +1537,7 @@ public class BlockManager { // Log the block report processing stats from Namenode perspective NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime)); - NameNode.stateChangeLog.info("BLOCK* processReport: from " + blockLog.info("BLOCK* processReport: from " + nodeID + ", blocks: " + newReport.getNumberOfBlocks() + ", processing time: " + (endTime - startTime) + " msecs"); } @@ -1596,7 +1597,7 @@ public class BlockManager { addStoredBlock(b, node, null, true); } for (Block b : toInvalidate) { - NameNode.stateChangeLog.info("BLOCK* processReport: " + blockLog.info("BLOCK* processReport: " + b + " on " + node + " size " + b.getNumBytes() + " does not belong to any file"); addToInvalidates(b, node); @@ -2034,7 +2035,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } if (storedBlock == null || storedBlock.getBlockCollection() == null) { // If this block does not belong to anyfile, then we are done. - NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on " + blockLog.info("BLOCK* addStoredBlock: " + block + " on " + node + " size " + block.getNumBytes() + " but it does not belong to any file"); // we could add this block to invalidate set of this datanode. @@ -2056,7 +2057,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } } else { curReplicaDelta = 0; - NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: " + blockLog.warn("BLOCK* addStoredBlock: " + "Redundant addStoredBlock request received for " + storedBlock + " on " + node + " size " + storedBlock.getNumBytes()); } @@ -2115,7 +2116,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { - if (!NameNode.stateChangeLog.isInfoEnabled()) { + if (!blockLog.isInfoEnabled()) { return; } @@ -2126,7 +2127,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block storedBlock.appendStringTo(sb); sb.append(" size " ) .append(storedBlock.getNumBytes()); - NameNode.stateChangeLog.info(sb); + blockLog.info(sb); } /** * Invalidate corrupt replicas. @@ -2153,7 +2154,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block try { invalidateBlock(new BlockToMarkCorrupt(blk, null), node); } catch (IOException e) { - NameNode.stateChangeLog.info("invalidateCorruptReplicas " + blockLog.info("invalidateCorruptReplicas " + "error in deleting bad block " + blk + " on " + node, e); gotException = true; } @@ -2391,7 +2392,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block // upon giving instructions to the namenode. // addToInvalidates(b, cur); - NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: " + blockLog.info("BLOCK* chooseExcessReplicates: " +"("+cur+", "+b+") is added to invalidated blocks set"); } } @@ -2405,8 +2406,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } if (excessBlocks.add(block)) { excessBlocksCount++; - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:" + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* addToExcessReplicate:" + " (" + dn + ", " + block + ") is added to excessReplicateMap"); } @@ -2418,15 +2419,15 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block * removed block is still valid. */ public void removeStoredBlock(Block block, DatanodeDescriptor node) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* removeStoredBlock: " + block + " from " + node); } assert (namesystem.hasWriteLock()); { if (!blocksMap.removeNode(block, node)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* removeStoredBlock: " + block + " has already been removed from node " + node); } return; @@ -2453,8 +2454,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if (excessBlocks != null) { if (excessBlocks.remove(block)) { excessBlocksCount--; - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: " + if(blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* removeStoredBlock: " + block + " is removed from excessBlocks"); } if (excessBlocks.size() == 0) { @@ -2497,7 +2498,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block if (delHint != null && delHint.length() != 0) { delHintNode = datanodeManager.getDatanode(delHint); if (delHintNode == null) { - NameNode.stateChangeLog.warn("BLOCK* blockReceived: " + block + blockLog.warn("BLOCK* blockReceived: " + block + " is expected to be removed from an unrecorded node " + delHint); } } @@ -2532,7 +2533,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block addStoredBlock(b, node, delHintNode, true); } for (Block b : toInvalidate) { - NameNode.stateChangeLog.info("BLOCK* addBlock: block " + blockLog.info("BLOCK* addBlock: block " + b + " on " + node + " size " + b.getNumBytes() + " does not belong to any file"); addToInvalidates(b, node); @@ -2558,7 +2559,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block try { final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID); if (node == null || !node.isAlive) { - NameNode.stateChangeLog + blockLog .warn("BLOCK* processIncrementalBlockReport" + " is received from dead or unregistered node " + nodeID); @@ -2585,19 +2586,19 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block String msg = "Unknown block status code reported by " + nodeID + ": " + rdbi; - NameNode.stateChangeLog.warn(msg); + blockLog.warn(msg); assert false : msg; // if assertions are enabled, throw. break; } - if (NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug("BLOCK* block " + if (blockLog.isDebugEnabled()) { + blockLog.debug("BLOCK* block " + (rdbi.getStatus()) + ": " + rdbi.getBlock() + " is received from " + nodeID); } } } finally { namesystem.writeUnlock(); - NameNode.stateChangeLog + blockLog .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from " + nodeID + " receiving: " + receiving + ", " @@ -2890,8 +2891,8 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } finally { namesystem.writeUnlock(); } - if (NameNode.stateChangeLog.isInfoEnabled()) { - NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName() + if (blockLog.isInfoEnabled()) { + blockLog.info("BLOCK* " + getClass().getSimpleName() + ": ask " + dn + " to delete " + toInvalidate); } return toInvalidate.size(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java index 440e3d40569..4613199ee6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java @@ -63,13 +63,13 @@ public class CorruptReplicasMap{ if (!nodes.contains(dn)) { nodes.add(dn); - NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ + NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ blk.getBlockName() + " added as corrupt on " + dn + " by " + Server.getRemoteIp() + reasonText); } else { - NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ + NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+ "duplicate requested for " + blk.getBlockName() + " to add as corrupt " + "on " + dn + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 23013d7d911..804fdf21105 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -540,28 +540,16 @@ public class DatanodeManager { private static boolean checkInList(final DatanodeID node, final Set hostsList, final boolean isExcludeList) { - final InetAddress iaddr; - - try { - iaddr = InetAddress.getByName(node.getIpAddr()); - } catch (UnknownHostException e) { - LOG.warn("Unknown IP: " + node.getIpAddr(), e); - return isExcludeList; - } - // if include list is empty, host is in include list if ( (!isExcludeList) && (hostsList.isEmpty()) ){ return true; } - return // compare ipaddress(:port) - (hostsList.contains(iaddr.getHostAddress().toString())) - || (hostsList.contains(iaddr.getHostAddress().toString() + ":" - + node.getXferPort())) - // compare hostname(:port) - || (hostsList.contains(iaddr.getHostName())) - || (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort())) - || ((node instanceof DatanodeInfo) && hostsList - .contains(((DatanodeInfo) node).getHostName())); + for (String name : getNodeNamesForHostFiltering(node)) { + if (hostsList.contains(name)) { + return true; + } + } + return false; } /** @@ -644,16 +632,20 @@ public class DatanodeManager { */ public void registerDatanode(DatanodeRegistration nodeReg) throws DisallowedDatanodeException { - String dnAddress = Server.getRemoteAddress(); - if (dnAddress == null) { - // Mostly called inside an RPC. - // But if not, use address passed by the data-node. - dnAddress = nodeReg.getIpAddr(); + InetAddress dnAddress = Server.getRemoteIp(); + if (dnAddress != null) { + // Mostly called inside an RPC, update ip and peer hostname + String hostname = dnAddress.getHostName(); + String ip = dnAddress.getHostAddress(); + if (hostname.equals(ip)) { + LOG.warn("Unresolved datanode registration from " + ip); + throw new DisallowedDatanodeException(nodeReg); + } + // update node registration with the ip and hostname from rpc request + nodeReg.setIpAddr(ip); + nodeReg.setPeerHostName(hostname); } - // Update the IP to the address of the RPC request that is - // registering this datanode. - nodeReg.setIpAddr(dnAddress); nodeReg.setExportedKeys(blockManager.getBlockKeys()); // Checks if the node is not on the hosts list. If it is not, then @@ -1033,19 +1025,8 @@ public class DatanodeManager { if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) { nodes.add(dn); } - // Remove any nodes we know about from the map - try { - InetAddress inet = InetAddress.getByName(dn.getIpAddr()); - // compare hostname(:port) - mustList.remove(inet.getHostName()); - mustList.remove(inet.getHostName()+":"+dn.getXferPort()); - // compare ipaddress(:port) - mustList.remove(inet.getHostAddress().toString()); - mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort()); - } catch (UnknownHostException e) { - mustList.remove(dn.getName()); - mustList.remove(dn.getIpAddr()); - LOG.warn(e); + for (String name : getNodeNamesForHostFiltering(dn)) { + mustList.remove(name); } } } @@ -1066,6 +1047,25 @@ public class DatanodeManager { return nodes; } + private static List getNodeNamesForHostFiltering(DatanodeID node) { + String ip = node.getIpAddr(); + String regHostName = node.getHostName(); + int xferPort = node.getXferPort(); + + List names = new ArrayList(); + names.add(ip); + names.add(ip + ":" + xferPort); + names.add(regHostName); + names.add(regHostName + ":" + xferPort); + + String peerHostName = node.getPeerHostName(); + if (peerHostName != null) { + names.add(peerHostName); + names.add(peerHostName + ":" + xferPort); + } + return names; + } + private void setDatanodeDead(DatanodeDescriptor node) { node.setLastUpdate(0); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java index 2a10ee2253c..841ca41755f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java @@ -86,7 +86,7 @@ class InvalidateBlocks { if (set.add(block)) { numBlocks++; if (log) { - NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName() + NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName() + ": add " + block + " to " + datanode); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java index 0759d533633..779b445cece 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/UnderReplicatedBlocks.java @@ -190,8 +190,8 @@ class UnderReplicatedBlocks implements Iterable { int priLevel = getPriority(block, curReplicas, decomissionedReplicas, expectedReplicas); if(priorityQueues.get(priLevel).add(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.add:" + block + " has only " + curReplicas @@ -233,8 +233,8 @@ class UnderReplicatedBlocks implements Iterable { boolean remove(Block block, int priLevel) { if(priLevel >= 0 && priLevel < LEVEL && priorityQueues.get(priLevel).remove(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: " + "Removing block " + block + " from priority queue "+ priLevel); @@ -245,8 +245,8 @@ class UnderReplicatedBlocks implements Iterable { // not found in the queue for the given priority level. for (int i = 0; i < LEVEL; i++) { if (priorityQueues.get(i).remove(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.remove: " + "Removing block " + block + " from priority queue "+ i); @@ -296,8 +296,8 @@ class UnderReplicatedBlocks implements Iterable { remove(block, oldPri); } if(priorityQueues.get(curPri).add(block)) { - if(NameNode.stateChangeLog.isDebugEnabled()) { - NameNode.stateChangeLog.debug( + if(NameNode.blockStateChangeLog.isDebugEnabled()) { + NameNode.blockStateChangeLog.debug( "BLOCK* NameSystem.UnderReplicationBlock.update:" + block + " has only "+ curReplicas diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java index a2e0f501bde..3d03447a6c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java @@ -408,15 +408,15 @@ public class JspHelper { if (!parts[i].equals("")) { tempPath.append(parts[i]); out.print("" + parts[i] + "" + Path.SEPARATOR); + out.print("\">" + HtmlQuoting.quoteHtmlChars(parts[i]) + "" + Path.SEPARATOR); tempPath.append(Path.SEPARATOR); } } if(parts.length > 0) { - out.print(parts[parts.length-1]); + out.print(HtmlQuoting.quoteHtmlChars(parts[parts.length-1])); } } catch (UnsupportedEncodingException ex) { @@ -431,16 +431,16 @@ public class JspHelper { String nnAddress) throws IOException { out.print("
"); out.print("Goto : "); - out.print(""); - out.print(""); + out.print(""); + out.print(""); out.print(""); + + "value=\"" + namenodeInfoPort + "\"/>"); if (UserGroupInformation.isSecurityEnabled()) { out.print(""); + + "\" type=\"hidden\" value=\"" + tokenString + "\"/>"); } out.print(""); + + "value=\"" + nnAddress + "\"/>"); out.print("
"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 4a019b73fbe..cef7d237c24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -433,7 +433,7 @@ public abstract class Storage extends StorageInfo { if (!root.exists()) { // storage directory does not exist if (startOpt != StartupOption.FORMAT) { - LOG.info("Storage directory " + rootPath + " does not exist"); + LOG.warn("Storage directory " + rootPath + " does not exist"); return StorageState.NON_EXISTENT; } LOG.info(rootPath + " does not exist. Creating ..."); @@ -442,15 +442,15 @@ public abstract class Storage extends StorageInfo { } // or is inaccessible if (!root.isDirectory()) { - LOG.info(rootPath + "is not a directory"); + LOG.warn(rootPath + "is not a directory"); return StorageState.NON_EXISTENT; } if (!root.canWrite()) { - LOG.info("Cannot access storage directory " + rootPath); + LOG.warn("Cannot access storage directory " + rootPath); return StorageState.NON_EXISTENT; } } catch(SecurityException ex) { - LOG.info("Cannot access storage directory " + rootPath, ex); + LOG.warn("Cannot access storage directory " + rootPath, ex); return StorageState.NON_EXISTENT; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java index d19e54e1122..98dceceb5d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -119,7 +120,7 @@ public class DatanodeJspHelper { String target = dir; final HdfsFileStatus targetStatus = dfs.getFileInfo(target); if (targetStatus == null) { // not exists - out.print("

File or directory : " + target + " does not exist

"); + out.print("

File or directory : " + StringEscapeUtils.escapeHtml(target) + " does not exist

"); JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target, nnAddr); } else { @@ -203,7 +204,7 @@ public class DatanodeJspHelper { + JspHelper.getDelegationTokenUrlParam(tokenString) + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr); cols[0] = "" - + localFileName + ""; + + HtmlQuoting.quoteHtmlChars(localFileName) + ""; cols[5] = lsDateFormat.format(new Date((files[i] .getModificationTime()))); cols[6] = files[i].getPermission().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java index 107ce5d2f8c..66ecdbc8c1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java @@ -137,7 +137,7 @@ class FsVolumeList { if (removedVols != null && removedVols.size() > 0) { // Replace volume list volumes = Collections.unmodifiableList(volumeList); - FsDatasetImpl.LOG.info("Completed checkDirs. Removed " + removedVols.size() + FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size() + " volumes. Current volumes: " + this); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java index cfc841902c2..0d93227d70c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java @@ -78,10 +78,6 @@ public class BackupNode extends NameNode { String nnHttpAddress; /** Checkpoint manager */ Checkpointer checkpointManager; - /** ClusterID to which BackupNode belongs to */ - String clusterId; - /** Block pool Id of the peer namenode of this BackupNode */ - String blockPoolId; BackupNode(Configuration conf, NamenodeRole role) throws IOException { super(conf, role); @@ -145,6 +141,7 @@ public class BackupNode extends NameNode { CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT); NamespaceInfo nsInfo = handshake(conf); super.initialize(conf); + namesystem.setBlockPoolId(nsInfo.getBlockPoolID()); if (false == namesystem.isInSafeMode()) { namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER); @@ -154,9 +151,6 @@ public class BackupNode extends NameNode { // therefore lease hard limit should never expire. namesystem.leaseManager.setLeasePeriod( HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE); - - clusterId = nsInfo.getClusterID(); - blockPoolId = nsInfo.getBlockPoolID(); // register with the active name-node registerWith(nsInfo); @@ -219,7 +213,7 @@ public class BackupNode extends NameNode { } /* @Override */// NameNode - public boolean setSafeMode(@SuppressWarnings("unused") SafeModeAction action) + public boolean setSafeMode(SafeModeAction action) throws IOException { throw new UnsupportedActionException("setSafeMode"); } @@ -415,14 +409,6 @@ public class BackupNode extends NameNode { return nsInfo; } - String getBlockPoolId() { - return blockPoolId; - } - - String getClusterId() { - return clusterId; - } - @Override protected NameNodeHAContext createHAContext() { return new BNHAContext(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 36fbaad24e2..af5831ce7b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1938,9 +1938,9 @@ public class FSDirectory implements Closeable { assert hasWriteLock(); // sanity check if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET && - nsQuota < HdfsConstants.QUOTA_RESET) || + nsQuota != HdfsConstants.QUOTA_RESET) || (dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET && - dsQuota < HdfsConstants.QUOTA_RESET)) { + dsQuota != HdfsConstants.QUOTA_RESET)) { throw new IllegalArgumentException("Illegal value for nsQuota or " + "dsQuota : " + nsQuota + " and " + dsQuota); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 9f25ab92e2a..d86e5c7276e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3550,7 +3550,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, @Metric({"TransactionsSinceLastLogRoll", "Number of transactions since last edit log roll"}) public long getTransactionsSinceLastLogRoll() { - if (isInStandbyState()) { + if (isInStandbyState() || !getEditLog().isSegmentOpen()) { return 0; } else { return getEditLog().getLastWrittenTxId() - diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index fb5c88d8bbc..309811c242f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -227,6 +227,7 @@ public class NameNode { public static final int DEFAULT_PORT = 8020; public static final Log LOG = LogFactory.getLog(NameNode.class.getName()); public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange"); + public static final Log blockStateChangeLog = LogFactory.getLog("BlockStateChange"); public static final HAState ACTIVE_STATE = new ActiveState(); public static final HAState STANDBY_STATE = new StandbyState(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index d72d4823aa1..8e013004e75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -132,6 +132,7 @@ class NameNodeRpcServer implements NamenodeProtocols { private static final Log LOG = NameNode.LOG; private static final Log stateChangeLog = NameNode.stateChangeLog; + private static final Log blockStateChangeLog = NameNode.blockStateChangeLog; // Dependencies from other parts of NN. protected final FSNamesystem namesystem; @@ -889,8 +890,8 @@ class NameNodeRpcServer implements NamenodeProtocols { String poolId, StorageBlockReport[] reports) throws IOException { verifyRequest(nodeReg); BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks()); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.blockReport: " + if(blockStateChangeLog.isDebugEnabled()) { + blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + " " + blist.getNumberOfBlocks() + " blocks"); } @@ -905,8 +906,8 @@ class NameNodeRpcServer implements NamenodeProtocols { public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException { verifyRequest(nodeReg); - if(stateChangeLog.isDebugEnabled()) { - stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " + if(blockStateChangeLog.isDebugEnabled()) { + blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: " +"from "+nodeReg+" "+receivedAndDeletedBlocks.length +" blocks."); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/Federation.apt.vm rename to hadoop-hdfs-project/hadoop-hdfs/src/site/apt/Federation.apt.vm diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm rename to hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithNFS.apt.vm diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm rename to hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HDFSHighAvailabilityWithQJM.apt.vm diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebHDFS.apt.vm b/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm similarity index 100% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/WebHDFS.apt.vm rename to hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index a6625431fdc..c0b3994aed9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -46,6 +46,7 @@ import org.junit.Test; public class TestDatanodeDeath { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); + ((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java index 63371ec4741..934342eea9f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.Assert.*; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import java.net.InetSocketAddress; +import java.security.Permission; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.common.IncorrectVersionException; import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; @@ -46,6 +47,64 @@ public class TestDatanodeRegistration { public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class); + private static class MonitorDNS extends SecurityManager { + int lookups = 0; + @Override + public void checkPermission(Permission perm) {} + @Override + public void checkConnect(String host, int port) { + if (port == -1) { + lookups++; + } + } + } + + /** + * Ensure the datanode manager does not do host lookup after registration, + * especially for node reports. + * @throws Exception + */ + @Test + public void testDNSLookups() throws Exception { + MonitorDNS sm = new MonitorDNS(); + System.setSecurityManager(sm); + + MiniDFSCluster cluster = null; + try { + HdfsConfiguration conf = new HdfsConfiguration(); + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build(); + cluster.waitActive(); + + int initialLookups = sm.lookups; + assertTrue("dns security manager is active", initialLookups != 0); + + DatanodeManager dm = + cluster.getNamesystem().getBlockManager().getDatanodeManager(); + + // make sure no lookups occur + dm.refreshNodes(conf); + assertEquals(initialLookups, sm.lookups); + + dm.refreshNodes(conf); + assertEquals(initialLookups, sm.lookups); + + // ensure none of the reports trigger lookups + dm.getDatanodeListForReport(DatanodeReportType.ALL); + assertEquals(initialLookups, sm.lookups); + + dm.getDatanodeListForReport(DatanodeReportType.LIVE); + assertEquals(initialLookups, sm.lookups); + + dm.getDatanodeListForReport(DatanodeReportType.DEAD); + assertEquals(initialLookups, sm.lookups); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + System.setSecurityManager(null); + } + } + /** * Regression test for HDFS-894 ensures that, when datanodes * are restarted, the new IPC port is registered with the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index f488040c492..d33052e4c6b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -51,6 +51,7 @@ public class TestFileAppend2 { { ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); + ((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java index bad1fff3ef2..ab6ed12492e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java @@ -19,13 +19,20 @@ package org.apache.hadoop.hdfs.server.common; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doAnswer; import java.io.IOException; +import java.io.StringReader; import java.net.InetSocketAddress; import java.util.ArrayList; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; +import javax.servlet.jsp.JspWriter; +import javax.xml.parsers.DocumentBuilder; +import javax.xml.parsers.DocumentBuilderFactory; +import javax.xml.parsers.ParserConfigurationException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -46,10 +53,17 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.junit.Assert; import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; +import org.xml.sax.InputSource; +import org.xml.sax.SAXException; + public class TestJspHelper { private Configuration conf = new HdfsConfiguration(); + private String jspWriterOutput = ""; public static class DummySecretManager extends AbstractDelegationTokenSecretManager { @@ -368,7 +382,33 @@ public class TestJspHelper { ae.getMessage()); } } - + + @Test + public void testPrintGotoFormWritesValidXML() throws IOException, + ParserConfigurationException, SAXException { + JspWriter mockJspWriter = mock(JspWriter.class); + ArgumentCaptor arg = ArgumentCaptor.forClass(String.class); + doAnswer(new Answer() { + @Override + public Object answer(InvocationOnMock invok) { + Object[] args = invok.getArguments(); + jspWriterOutput += (String) args[0]; + return null; + } + }).when(mockJspWriter).print(arg.capture()); + + jspWriterOutput = ""; + + JspHelper.printGotoForm(mockJspWriter, 424242, "a token string", + "foobar/file", "0.0.0.0"); + + DocumentBuilder parser = + DocumentBuilderFactory.newInstance().newDocumentBuilder(); + InputSource is = new InputSource(); + is.setCharacterStream(new StringReader(jspWriterOutput)); + parser.parse(is); + } + private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) { HttpServletRequest request = mock(HttpServletRequest.class); when(request.getParameter(UserParam.NAME)).thenReturn(user); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 644796d4bd8..985cd28a6f6 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -194,6 +194,9 @@ Release 2.0.3-alpha - Unreleased MAPREDUCE-1806. CombineFileInputFormat does not work with paths not on default FS. (Gera Shegalov via tucu) + MAPREDUCE-4777. In TestIFile, testIFileReaderWithCodec relies on + testIFileWriterWithCodec. (Sandy Ryza via tomwhite) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -584,6 +587,10 @@ Release 0.23.5 - UNRELEASED MAPREDUCE-4752. Reduce MR AM memory usage through String Interning (Robert Evans via tgraves) + MAPREDUCE-4266. remove Ant remnants from MR (tgraves via bobby) + + MAPREDUCE-4666. JVM metrics for history server (jlowe via jeagles) + OPTIMIZATIONS BUG FIXES @@ -634,6 +641,15 @@ Release 0.23.5 - UNRELEASED MAPREDUCE-4771. KeyFieldBasedPartitioner not partitioning properly when configured (jlowe via bobby) + + MAPREDUCE-4772. Fetch failures can take way too long for a map to be + restarted (bobby) + + MAPREDUCE-4782. NLineInputFormat skips first line of last InputSplit + (Mark Fuhs via bobby) + + MAPREDUCE-4774. JobImpl does not handle asynchronous task events in FAILED + state (jlowe via bobby) Release 0.23.4 - UNRELEASED diff --git a/hadoop-mapreduce-project/build-utils.xml b/hadoop-mapreduce-project/build-utils.xml deleted file mode 100644 index 9f19d761d32..00000000000 --- a/hadoop-mapreduce-project/build-utils.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - - - - - - - - - - - - diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml deleted file mode 100644 index 286af16530b..00000000000 --- a/hadoop-mapreduce-project/build.xml +++ /dev/null @@ -1,1912 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
-
- - - -
- - - - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - - -
-
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - Tests failed! - - - - - - - - - - - - - - - - Tests failed! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -

CheckStyle Audit

Designed for use with CheckStyle and Ant.
-
- - - -
- - - -
- - - - -
- - - - - - - - - -

Files

- - - - - - - - - - - - - - -
NameErrors
-
- - - - -

File

- - - - - - - - - - - - - - -
Error DescriptionLine
- Back to top -
- - - -

Summary

- - - - - - - - - - - - -
FilesErrors
-
- - - - a - b - - - - - diff --git a/hadoop-mapreduce-project/src/test/checkstyle.xml b/hadoop-mapreduce-project/src/test/checkstyle.xml deleted file mode 100644 index 5fb0c472707..00000000000 --- a/hadoop-mapreduce-project/src/test/checkstyle.xml +++ /dev/null @@ -1,187 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hadoop-mapreduce-project/src/test/commit-tests b/hadoop-mapreduce-project/src/test/commit-tests deleted file mode 100644 index 277cb7d1779..00000000000 --- a/hadoop-mapreduce-project/src/test/commit-tests +++ /dev/null @@ -1,45 +0,0 @@ -**/TestCollect.java -**/TestCommandLineJobSubmission.java -**/TestComparators.java -**/TestCounters.java -**/TestFileInputFormat.java -**/TestFileInputFormatPathFilter.java -**/TestFileOutputCommitter.java -**/TestFileOutputFormat.java -**/TestGetSplitHosts.java -**/TestIFileStreams.java -**/TestIndexCache.java -**/TestJavaSerialization.java -**/TestJobHistoryParsing.java -**/TestJobHistoryVersion.java -**/TestJobInProgress.java -**/TestJobQueueTaskScheduler.java -**/TestKillCompletedJob.java -**/TestLostTracker.java -**/TestMapCollection.java -**/TestMapOutputType.java -**/TestMapRed.java -**/TestMapReduceLocal.java -**/TestMiniMRDFSCaching.java -**/TestQueueAclsForCurrentUser.java -**/TestRackAwareTaskPlacement.java -**/TestReduceFetchFromPartialMem.java -**/TestReduceTask.java -**/TestSequenceFileAsBinaryInputFormat.java -**/TestSequenceFileAsBinaryOutputFormat.java -**/TestSequenceFileInputFormat.java -**/TestSeveral.java -**/TestSpilledRecordsCounter.java -**/TestSpeculativeExecution.java -**/TestTaskLimits.java -**/TestTextInputFormat.java -**/TestTextOutputFormat.java -**/TestTrackerBlacklistAcrossJobs.java -**/TestTaskTrackerBlacklisting.java -**/TestTaskTrackerLocalization -**/TestTrackerDistributedCacheManager -**/TestQueueManager -**/TestContainerQueue -**/TestCapacityScheduler -**/TestRefreshOfQueues -**/TestQueueManagerRefresh \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/empty-file b/hadoop-mapreduce-project/src/test/empty-file deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-mapreduce-project/src/test/fi-site.xml b/hadoop-mapreduce-project/src/test/fi-site.xml deleted file mode 100644 index 42bae52195e..00000000000 --- a/hadoop-mapreduce-project/src/test/fi-site.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - - - - fi.* - 0.00 - - Default probability level for all injected faults specified - as a floating number between 0 and 1.00 - - - diff --git a/hadoop-mapreduce-project/src/test/findbugsExcludeFile.xml b/hadoop-mapreduce-project/src/test/findbugsExcludeFile.xml deleted file mode 100644 index 28edfbfe363..00000000000 --- a/hadoop-mapreduce-project/src/test/findbugsExcludeFile.xml +++ /dev/null @@ -1,391 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hadoop-mapreduce-project/src/test/hadoop-policy.xml b/hadoop-mapreduce-project/src/test/hadoop-policy.xml deleted file mode 100644 index 09352749b73..00000000000 --- a/hadoop-mapreduce-project/src/test/hadoop-policy.xml +++ /dev/null @@ -1,114 +0,0 @@ - - - - - - - - - - security.client.protocol.acl - * - ACL for ClientProtocol, which is used by user code - via the DistributedFileSystem. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.client.datanode.protocol.acl - * - ACL for ClientDatanodeProtocol, the client-to-datanode protocol - for block recovery. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.datanode.protocol.acl - * - ACL for DatanodeProtocol, which is used by datanodes to - communicate with the namenode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.inter.datanode.protocol.acl - * - ACL for InterDatanodeProtocol, the inter-datanode protocol - for updating generation timestamp. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.namenode.protocol.acl - * - ACL for NamenodeProtocol, the protocol used by the secondary - namenode to communicate with the namenode. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.inter.tracker.protocol.acl - * - ACL for InterTrackerProtocol, used by the tasktrackers to - communicate with the jobtracker. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.job.submission.protocol.acl - * - ACL for JobSubmissionProtocol, used by job clients to - communciate with the jobtracker for job submission, querying job status etc. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.task.umbilical.protocol.acl - * - ACL for TaskUmbilicalProtocol, used by the map and reduce - tasks to communicate with the parent tasktracker. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - - security.refresh.policy.protocol.acl - ${user.name} - ACL for RefreshAuthorizationPolicyProtocol, used by the - dfsadmin and mradmin commands to refresh the security policy in-effect. - The ACL is a comma-separated list of user and group names. The user and - group list is separated by a blank. For e.g. "alice,bob users,wheel". - A special value of "*" means all users are allowed. - - - diff --git a/hadoop-mapreduce-project/src/test/krb5.conf b/hadoop-mapreduce-project/src/test/krb5.conf deleted file mode 100644 index 9f4b9ad7cd2..00000000000 --- a/hadoop-mapreduce-project/src/test/krb5.conf +++ /dev/null @@ -1,28 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -[libdefaults] - default_realm = APACHE.ORG - udp_preference_limit = 1 - extra_addresses = 127.0.0.1 -[realms] - APACHE.ORG = { - admin_server = localhost:88 - kdc = localhost:88 - } -[domain_realm] - localhost = APACHE.ORG diff --git a/hadoop-mapreduce-project/src/test/log4j.properties b/hadoop-mapreduce-project/src/test/log4j.properties deleted file mode 100644 index 531b68b5a9f..00000000000 --- a/hadoop-mapreduce-project/src/test/log4j.properties +++ /dev/null @@ -1,19 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# log4j configuration used during build and unit tests - -log4j.rootLogger=info,stdout -log4j.threshhold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-mapreduce-project/src/test/mapred-site.xml b/hadoop-mapreduce-project/src/test/mapred-site.xml deleted file mode 100644 index 4874e61be54..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred-site.xml +++ /dev/null @@ -1,55 +0,0 @@ - - - - - - - - - - - mapreduce.task.io.sort.mb - 10 - - - hadoop.security.authentication - simple - - - mapreduce.jobtracker.hosts.exclude.filename - hosts.exclude - - - - mapreduce.jobtracker.retirejobs - false - - - - mapred.child.java.opts - -Xmx200m -Djava.net.preferIPv4Stack=true - - - mapreduce.jobtracker.persist.jobstatus.active - false - - - mapreduce.task.local.output.class - org.apache.hadoop.mapred.MROutputFiles - - diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java deleted file mode 100644 index 2d443821566..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/cli/TestMRCLI.java +++ /dev/null @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.cli; - -import org.apache.hadoop.cli.util.*; -import org.apache.hadoop.cli.util.CommandExecutor.Result; -import org.apache.hadoop.tools.HadoopArchives; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.tools.MRAdmin; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.authorize.HadoopPolicyProvider; -import org.apache.hadoop.security.authorize.PolicyProvider; -import org.apache.hadoop.util.ToolRunner; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; -import org.xml.sax.SAXException; - -public class TestMRCLI extends TestHDFSCLI { - - protected MiniMRCluster mrCluster = null; - protected String jobtracker = null; - private JobConf mrConf; - - @Before - public void setUp() throws Exception { - super.setUp(); - conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, - HadoopPolicyProvider.class, PolicyProvider.class); - mrConf = new JobConf(conf); - mrCluster = new MiniMRCluster(1, dfsCluster.getFileSystem().getUri().toString(), 1, - null, null, mrConf); - jobtracker = mrCluster.createJobConf().get(JTConfig.JT_IPC_ADDRESS, "local"); - } - - @After - public void tearDown() throws Exception { - mrCluster.shutdown(); - super.tearDown(); - } - - @Override - protected TestConfigFileParser getConfigParser() { - return new TestConfigFileParserMR(); - } - - protected String getTestFile() { - return "testMRConf.xml"; - } - - @Override - protected String expandCommand(final String cmd) { - String expCmd = cmd; - expCmd = expCmd.replaceAll("JOBTRACKER", jobtracker); - expCmd = super.expandCommand(expCmd); - return expCmd; - } - - @Override - protected Result execute(CLICommand cmd) throws Exception { - if (cmd.getType() instanceof CLICommandMRAdmin) - return new TestMRCLI.MRCmdExecutor(jobtracker).executeCommand(cmd.getCmd()); - else if (cmd.getType() instanceof CLICommandArchive) - return new TestMRCLI.ArchiveCmdExecutor(namenode, mrConf).executeCommand(cmd.getCmd()); - else - return super.execute(cmd); - } - - public static class MRCmdExecutor extends CommandExecutor { - private String jobtracker = null; - public MRCmdExecutor(String jobtracker) { - this.jobtracker = jobtracker; - } - @Override - protected void execute(final String cmd) throws Exception{ - MRAdmin mradmin = new MRAdmin(); - String[] args = getCommandAsArgs(cmd, "JOBTRACKER", jobtracker); - ToolRunner.run(mradmin, args); - } - - } - - public static class ArchiveCmdExecutor extends CommandExecutor { - private String namenode = null; - private JobConf jobConf = null; - public ArchiveCmdExecutor(String namenode, JobConf jobConf) { - this.namenode = namenode; - this.jobConf = jobConf; - } - @Override - protected void execute(final String cmd) throws Exception { - HadoopArchives archive = new HadoopArchives(jobConf); - String[] args = getCommandAsArgs(cmd, "NAMENODE", namenode); - ToolRunner.run(archive, args); - } - } - - @Test - @Ignore - @Override - public void testAll () { - super.testAll(); - } - - class TestConfigFileParserMR extends CLITestHelper.TestConfigFileParser { - @Override - public void endElement(String uri, String localName, String qName) - throws SAXException { - if (qName.equals("mr-admin-command")) { - if (testCommands != null) { - testCommands.add(new CLITestCmdMR(charString, - new CLICommandMRAdmin())); - } else if (cleanupCommands != null) { - cleanupCommands.add(new CLITestCmdMR(charString, - new CLICommandMRAdmin())); - } - } else if (qName.equals("archive-command")) { - if (testCommands != null) { - testCommands.add(new CLITestCmdMR(charString, - new CLICommandArchive())); - } else if (cleanupCommands != null) { - cleanupCommands.add(new CLITestCmdMR(charString, - new CLICommandArchive())); - } - } else { - super.endElement(uri, localName, qName); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java deleted file mode 100644 index 13f831c3364..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/ControlledMapReduceJob.java +++ /dev/null @@ -1,578 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.Random; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.SequenceFile.CompressionType; -import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -/** - * A Controlled Map/Reduce Job. The tasks are controlled by the presence of - * particularly named files in the directory signalFileDir on the file-system - * that the job is configured to work with. Tasks get scheduled by the - * scheduler, occupy the slots on the TaskTrackers and keep running till the - * user gives a signal via files whose names are of the form MAPS_[0-9]* and - * REDUCES_[0-9]*. For e.g., whenever the map tasks see that a file name MAPS_5 - * is created in the singalFileDir, all the maps whose TaskAttemptIDs are below - * 4 get finished. At any time, there should be only one MAPS_[0-9]* file and - * only one REDUCES_[0-9]* file in the singnalFileDir. In the beginning MAPS_0 - * and REDUCE_0 files are present, and further signals are given by renaming - * these files. - * - */ -class ControlledMapReduceJob extends Configured implements Tool, - Mapper, - Reducer, - Partitioner, - InputFormat { - - static final Log LOG = LogFactory.getLog(ControlledMapReduceJob.class); - - private FileSystem fs = null; - private int taskNumber; - - private static ArrayList signalFileDirCache = new ArrayList(); - - private Path signalFileDir; - { - Random random = new Random(); - signalFileDir = new Path("signalFileDir-" + random.nextLong()); - while (signalFileDirCache.contains(signalFileDir)) { - signalFileDir = new Path("signalFileDir-" + random.nextLong()); - } - signalFileDirCache.add(signalFileDir); - } - - private long mapsFinished = 0; - private long reducesFinished = 0; - - private RunningJob rJob = null; - - private int numMappers; - private int numReducers; - - private final String MAP_SIGFILE_PREFIX = "MAPS_"; - private final String REDUCE_SIGFILE_PREFIX = "REDUCES_"; - - private void initialize() - throws IOException { - fs = FileSystem.get(getConf()); - fs.mkdirs(signalFileDir); - writeFile(new Path(signalFileDir, MAP_SIGFILE_PREFIX + mapsFinished)); - writeFile(new Path(signalFileDir, REDUCE_SIGFILE_PREFIX + reducesFinished)); - } - - /** - * Finish N number of maps/reduces. - * - * @param isMap - * @param noOfTasksToFinish - * @throws IOException - */ - public void finishNTasks(boolean isMap, int noOfTasksToFinish) - throws IOException { - if (noOfTasksToFinish < 0) { - throw new IOException( - "Negative values for noOfTasksToFinish not acceptable"); - } - - if (noOfTasksToFinish == 0) { - return; - } - - LOG.info("Going to finish off " + noOfTasksToFinish); - String PREFIX = isMap ? MAP_SIGFILE_PREFIX : REDUCE_SIGFILE_PREFIX; - long tasksFinished = isMap ? mapsFinished : reducesFinished; - Path oldSignalFile = - new Path(signalFileDir, PREFIX + String.valueOf(tasksFinished)); - Path newSignalFile = - new Path(signalFileDir, PREFIX - + String.valueOf(tasksFinished + noOfTasksToFinish)); - fs.rename(oldSignalFile, newSignalFile); - if (isMap) { - mapsFinished += noOfTasksToFinish; - } else { - reducesFinished += noOfTasksToFinish; - } - LOG.info("Successfully sent signal to finish off " + noOfTasksToFinish); - } - - /** - * Finished all tasks of type determined by isMap - * - * @param isMap - * @throws IOException - */ - public void finishAllTasks(boolean isMap) - throws IOException { - finishNTasks(isMap, (isMap ? numMappers : numReducers)); - } - - /** - * Finish the job - * - * @throws IOException - */ - public void finishJob() - throws IOException { - finishAllTasks(true); - finishAllTasks(false); - } - - /** - * Wait till noOfTasksToBeRunning number of tasks of type specified by isMap - * started running. This currently uses a jip object and directly uses its api - * to determine the number of tasks running. - * - *

- * - * TODO: It should eventually use a JobID and then get the information from - * the JT to check the number of running tasks. - * - * @param jip - * @param isMap - * @param noOfTasksToBeRunning - */ - static void waitTillNTasksStartRunning(JobInProgress jip, boolean isMap, - int noOfTasksToBeRunning) - throws InterruptedException { - int numTasks = 0; - while (numTasks != noOfTasksToBeRunning) { - Thread.sleep(1000); - numTasks = isMap ? jip.runningMaps() : jip.runningReduces(); - LOG.info("Waiting till " + noOfTasksToBeRunning - + (isMap ? " map" : " reduce") + " tasks of the job " - + jip.getJobID() + " start running. " + numTasks - + " tasks already started running."); - } - } - - /** - * Make sure that the number of tasks of type specified by isMap running in - * the given job is the same as noOfTasksToBeRunning - * - *

- * - * TODO: It should eventually use a JobID and then get the information from - * the JT to check the number of running tasks. - * - * @param jip - * @param isMap - * @param noOfTasksToBeRunning - */ - static void assertNumTasksRunning(JobInProgress jip, boolean isMap, - int noOfTasksToBeRunning) - throws Exception { - if ((isMap ? jip.runningMaps() : jip.runningReduces()) != noOfTasksToBeRunning) { - throw new Exception("Number of tasks running is not " - + noOfTasksToBeRunning); - } - } - - /** - * Wait till noOfTasksToFinish number of tasks of type specified by isMap - * are finished. This currently uses a jip object and directly uses its api to - * determine the number of tasks finished. - * - *

- * - * TODO: It should eventually use a JobID and then get the information from - * the JT to check the number of finished tasks. - * - * @param jip - * @param isMap - * @param noOfTasksToFinish - * @throws InterruptedException - */ - static void waitTillNTotalTasksFinish(JobInProgress jip, boolean isMap, - int noOfTasksToFinish) - throws InterruptedException { - int noOfTasksAlreadyFinished = 0; - while (noOfTasksAlreadyFinished < noOfTasksToFinish) { - Thread.sleep(1000); - noOfTasksAlreadyFinished = - (isMap ? jip.finishedMaps() : jip.finishedReduces()); - LOG.info("Waiting till " + noOfTasksToFinish - + (isMap ? " map" : " reduce") + " tasks of the job " - + jip.getJobID() + " finish. " + noOfTasksAlreadyFinished - + " tasks already got finished."); - } - } - - /** - * Have all the tasks of type specified by isMap finished in this job? - * - * @param jip - * @param isMap - * @return true if finished, false otherwise - */ - static boolean haveAllTasksFinished(JobInProgress jip, boolean isMap) { - return ((isMap ? jip.runningMaps() : jip.runningReduces()) == 0); - } - - private void writeFile(Path name) - throws IOException { - Configuration conf = new Configuration(false); - SequenceFile.Writer writer = - SequenceFile.createWriter(fs, conf, name, BytesWritable.class, - BytesWritable.class, CompressionType.NONE); - writer.append(new BytesWritable(), new BytesWritable()); - writer.close(); - } - - @Override - public void configure(JobConf conf) { - try { - signalFileDir = new Path(conf.get("signal.dir.path")); - numReducers = conf.getNumReduceTasks(); - fs = FileSystem.get(conf); - String taskAttemptId = conf.get(JobContext.TASK_ATTEMPT_ID); - if (taskAttemptId != null) { - TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskAttemptId); - taskNumber = taskAttemptID.getTaskID().getId(); - } - } catch (IOException ioe) { - LOG.warn("Caught exception " + ioe); - } - } - - private FileStatus[] listSignalFiles(FileSystem fileSys, final boolean isMap) - throws IOException { - return fileSys.globStatus(new Path(signalFileDir.toString() + "/*"), - new PathFilter() { - @Override - public boolean accept(Path path) { - if (isMap && path.getName().startsWith(MAP_SIGFILE_PREFIX)) { - LOG.debug("Found signal file : " + path.getName()); - return true; - } else if (!isMap - && path.getName().startsWith(REDUCE_SIGFILE_PREFIX)) { - LOG.debug("Found signal file : " + path.getName()); - return true; - } - LOG.info("Didn't find any relevant signal files."); - return false; - } - }); - } - - @Override - public void map(NullWritable key, NullWritable value, - OutputCollector output, Reporter reporter) - throws IOException { - LOG.info(taskNumber + " has started."); - FileStatus[] files = listSignalFiles(fs, true); - String[] sigFileComps = files[0].getPath().getName().split("_"); - String signalType = sigFileComps[0]; - int noOfTasks = Integer.parseInt(sigFileComps[1]); - - while (!signalType.equals("MAPS") || taskNumber + 1 > noOfTasks) { - LOG.info("Signal type found : " + signalType - + " .Number of tasks to be finished by this signal : " + noOfTasks - + " . My id : " + taskNumber); - LOG.info(taskNumber + " is still alive."); - try { - reporter.progress(); - Thread.sleep(1000); - } catch (InterruptedException ie) { - LOG.info(taskNumber + " is still alive."); - break; - } - files = listSignalFiles(fs, true); - sigFileComps = files[0].getPath().getName().split("_"); - signalType = sigFileComps[0]; - noOfTasks = Integer.parseInt(sigFileComps[1]); - } - LOG.info("Signal type found : " + signalType - + " .Number of tasks to be finished by this signal : " + noOfTasks - + " . My id : " + taskNumber); - // output numReduce number of random values, so that - // each reducer will get one key each. - for (int i = 0; i < numReducers; i++) { - output.collect(new IntWritable(i), NullWritable.get()); - } - - LOG.info(taskNumber + " is finished."); - } - - @Override - public void reduce(IntWritable key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { - LOG.info(taskNumber + " has started."); - FileStatus[] files = listSignalFiles(fs, false); - String[] sigFileComps = files[0].getPath().getName().split("_"); - String signalType = sigFileComps[0]; - int noOfTasks = Integer.parseInt(sigFileComps[1]); - - while (!signalType.equals("REDUCES") || taskNumber + 1 > noOfTasks) { - LOG.info("Signal type found : " + signalType - + " .Number of tasks to be finished by this signal : " + noOfTasks - + " . My id : " + taskNumber); - LOG.info(taskNumber + " is still alive."); - try { - reporter.progress(); - Thread.sleep(1000); - } catch (InterruptedException ie) { - LOG.info(taskNumber + " is still alive."); - break; - } - files = listSignalFiles(fs, false); - sigFileComps = files[0].getPath().getName().split("_"); - signalType = sigFileComps[0]; - noOfTasks = Integer.parseInt(sigFileComps[1]); - } - LOG.info("Signal type found : " + signalType - + " .Number of tasks to be finished by this signal : " + noOfTasks - + " . My id : " + taskNumber); - LOG.info(taskNumber + " is finished."); - } - - @Override - public void close() - throws IOException { - // nothing - } - - public JobID getJobId() { - if (rJob == null) { - return null; - } - return rJob.getID(); - } - - public int run(int numMapper, int numReducer) - throws IOException { - JobConf conf = - getControlledMapReduceJobConf(getConf(), numMapper, numReducer); - JobClient client = new JobClient(conf); - rJob = client.submitJob(conf); - while (!rJob.isComplete()) { - try { - Thread.sleep(1000); - } catch (InterruptedException ie) { - break; - } - } - if (rJob.isSuccessful()) { - return 0; - } - return 1; - } - - private JobConf getControlledMapReduceJobConf(Configuration clusterConf, - int numMapper, int numReducer) - throws IOException { - setConf(clusterConf); - initialize(); - JobConf conf = new JobConf(getConf(), ControlledMapReduceJob.class); - conf.setJobName("ControlledJob"); - conf.set("signal.dir.path", signalFileDir.toString()); - conf.setNumMapTasks(numMapper); - conf.setNumReduceTasks(numReducer); - conf.setMapperClass(ControlledMapReduceJob.class); - conf.setMapOutputKeyClass(IntWritable.class); - conf.setMapOutputValueClass(NullWritable.class); - conf.setReducerClass(ControlledMapReduceJob.class); - conf.setOutputKeyClass(NullWritable.class); - conf.setOutputValueClass(NullWritable.class); - conf.setInputFormat(ControlledMapReduceJob.class); - FileInputFormat.addInputPath(conf, new Path("ignored")); - conf.setOutputFormat(NullOutputFormat.class); - conf.setMapSpeculativeExecution(false); - conf.setReduceSpeculativeExecution(false); - - // Set the following for reduce tasks to be able to be started running - // immediately along with maps. - conf.set(JobContext.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, String.valueOf(0)); - - return conf; - } - - @Override - public int run(String[] args) - throws Exception { - numMappers = Integer.parseInt(args[0]); - numReducers = Integer.parseInt(args[1]); - return run(numMappers, numReducers); - } - - @Override - public int getPartition(IntWritable k, NullWritable v, int numPartitions) { - return k.get() % numPartitions; - } - - @Override - public RecordReader getRecordReader( - InputSplit split, JobConf job, Reporter reporter) { - LOG.debug("Inside RecordReader.getRecordReader"); - return new RecordReader() { - private int pos = 0; - - public void close() { - // nothing - } - - public NullWritable createKey() { - return NullWritable.get(); - } - - public NullWritable createValue() { - return NullWritable.get(); - } - - public long getPos() { - return pos; - } - - public float getProgress() { - return pos * 100; - } - - public boolean next(NullWritable key, NullWritable value) { - if (pos++ == 0) { - LOG.debug("Returning the next record"); - return true; - } - LOG.debug("No more records. Returning none."); - return false; - } - }; - } - - @Override - public InputSplit[] getSplits(JobConf job, int numSplits) { - LOG.debug("Inside InputSplit.getSplits"); - InputSplit[] ret = new InputSplit[numSplits]; - for (int i = 0; i < numSplits; ++i) { - ret[i] = new EmptySplit(); - } - return ret; - } - - public static class EmptySplit implements InputSplit { - public void write(DataOutput out) - throws IOException { - } - - public void readFields(DataInput in) - throws IOException { - } - - public long getLength() { - return 0L; - } - - public String[] getLocations() { - return new String[0]; - } - } - - static class ControlledMapReduceJobRunner extends Thread { - private JobConf conf; - private ControlledMapReduceJob job; - private JobID jobID; - - private int numMappers; - private int numReducers; - - public ControlledMapReduceJobRunner() { - this(new JobConf(), 5, 5); - } - - public ControlledMapReduceJobRunner(JobConf cnf, int numMap, int numRed) { - this.conf = cnf; - this.numMappers = numMap; - this.numReducers = numRed; - } - - public ControlledMapReduceJob getJob() { - while (job == null) { - try { - Thread.sleep(1000); - } catch (InterruptedException ie) { - LOG.info(ControlledMapReduceJobRunner.class.getName() - + " is interrupted."); - break; - } - } - return job; - } - - public JobID getJobID() - throws IOException { - ControlledMapReduceJob job = getJob(); - JobID id = job.getJobId(); - while (id == null) { - id = job.getJobId(); - try { - Thread.sleep(1000); - } catch (InterruptedException ie) { - LOG.info(ControlledMapReduceJobRunner.class.getName() - + " is interrupted."); - break; - } - } - return id; - } - - @Override - public void run() { - if (job != null) { - LOG.warn("Job is already running."); - return; - } - try { - job = new ControlledMapReduceJob(); - int ret = - ToolRunner.run(this.conf, job, new String[] { - String.valueOf(numMappers), String.valueOf(numReducers) }); - LOG.info("Return value for the job : " + ret); - } catch (Exception e) { - LOG.warn("Caught exception : " + StringUtils.stringifyException(e)); - } - } - - static ControlledMapReduceJobRunner getControlledMapReduceJobRunner( - JobConf conf, int numMappers, int numReducers) { - return new ControlledMapReduceJobRunner(conf, numMappers, numReducers); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java deleted file mode 100644 index 6c16e564faa..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/DummyMemoryCalculatorPlugin.java +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import org.apache.hadoop.util.MemoryCalculatorPlugin; - -/** - * Plugin class to test virtual and physical memories reported by TT. Use - * configuration items {@link #MAXVMEM_TESTING_PROPERTY} and - * {@link #MAXPMEM_TESTING_PROPERTY} to tell TT the total vmem and the total - * pmem. - */ -public class DummyMemoryCalculatorPlugin extends MemoryCalculatorPlugin { - - /** max vmem on the TT */ - public static final String MAXVMEM_TESTING_PROPERTY = - "mapred.tasktracker.maxvmem.testing"; - /** max pmem on the TT */ - public static final String MAXPMEM_TESTING_PROPERTY = - "mapred.tasktracker.maxpmem.testing"; - - /** {@inheritDoc} */ - @Override - public long getVirtualMemorySize() { - return getConf().getLong(MAXVMEM_TESTING_PROPERTY, -1); - } - - /** {@inheritDoc} */ - @Override - public long getPhysicalMemorySize() { - return getConf().getLong(MAXPMEM_TESTING_PROPERTY, -1); - } -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java deleted file mode 100644 index 82d4b83a34c..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/DummyTaskTrackerInstrumentation.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; - -/** - * Mock instrumentation class used in TaskTrackerInstrumentation tests. - * This class just records whether each instrumentation method was called. - */ -public class DummyTaskTrackerInstrumentation - extends TaskTrackerInstrumentation -{ - boolean completeTaskCalled = false; - boolean timedoutTaskCalled = false; - boolean taskFailedPingCalled = false; - boolean reportTaskLaunchCalled = false; - boolean reportTaskEndCalled = false; - boolean statusUpdateCalled = false; - - public DummyTaskTrackerInstrumentation(TaskTracker tt) { - super(tt); - } - - @Override - public void completeTask(TaskAttemptID t) { - completeTaskCalled = true; - } - - @Override - public void timedoutTask(TaskAttemptID t) { - timedoutTaskCalled = true; - } - - @Override - public void taskFailedPing(TaskAttemptID t) { - taskFailedPingCalled = true; - } - - @Override - public void reportTaskLaunch(TaskAttemptID t, File stdout, File stderr) { - reportTaskLaunchCalled = true; - } - - @Override - public void reportTaskEnd(TaskAttemptID t) { - reportTaskEndCalled = true; - } - - @Override - public void statusUpdate(Task t, TaskStatus s) { - statusUpdateCalled = true; - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java deleted file mode 100644 index 545482c32f8..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/FakeObjectUtilities.java +++ /dev/null @@ -1,622 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.Collection; -import java.util.Iterator; - -import javax.security.auth.login.LoginException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.TaskStatus.Phase; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.jobhistory.HistoryEvent; -import org.apache.hadoop.mapreduce.jobhistory.JobHistory; -import org.apache.hadoop.mapreduce.split.JobSplit; -import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; - -/** - * Utilities used in unit test. - * - */ -public class FakeObjectUtilities { - - static final Log LOG = LogFactory.getLog(FakeObjectUtilities.class); - - private static String jtIdentifier = "test"; - private static int jobCounter; - - /** - * A Fake JobTracker class for use in Unit Tests - */ - static class FakeJobTracker extends JobTracker { - - int totalSlots; - private String[] trackers; - - FakeJobTracker(JobConf conf, Clock clock, String[] tts) throws IOException, - InterruptedException, LoginException { - super(conf, clock); - this.trackers = tts; - //initialize max{Map/Reduce} task capacities to twice the clustersize - totalSlots = trackers.length * 4; - } - @Override - public ClusterStatus getClusterStatus(boolean detailed) { - return new ClusterStatus( - taskTrackers().size() - getBlacklistedTrackerCount(), - getBlacklistedTrackerCount(), 0, 0, 0, totalSlots/2, totalSlots/2, - JobTrackerStatus.RUNNING, 0); - } - - public void setNumSlots(int totalSlots) { - this.totalSlots = totalSlots; - } - } - - static class FakeJobInProgress extends JobInProgress { - @SuppressWarnings("deprecation") - FakeJobInProgress(JobConf jobConf, JobTracker tracker) throws IOException { - super(new JobID(jtIdentifier, ++jobCounter), jobConf, tracker); - Path jobFile = new Path("Dummy"); - this.profile = new JobProfile(jobConf.getUser(), getJobID(), - jobFile.toString(), null, jobConf.getJobName(), - jobConf.getQueueName()); - this.jobHistory = new FakeJobHistory(); - } - - @Override - public synchronized void initTasks() throws IOException { - - TaskSplitMetaInfo[] taskSplitMetaInfo = createSplits(jobId); - numMapTasks = taskSplitMetaInfo.length; - createMapTasks(null, taskSplitMetaInfo); - nonRunningMapCache = createCache(taskSplitMetaInfo, maxLevel); - createReduceTasks(null); - tasksInited.set(true); - this.status.setRunState(JobStatus.RUNNING); - } - - @Override - TaskSplitMetaInfo [] createSplits(org.apache.hadoop.mapreduce.JobID jobId){ - TaskSplitMetaInfo[] splits = - new TaskSplitMetaInfo[numMapTasks]; - for (int i = 0; i < numMapTasks; i++) { - splits[i] = JobSplit.EMPTY_TASK_SPLIT; - } - return splits; - } - - @Override - protected void createMapTasks(String ignored, TaskSplitMetaInfo[] splits) { - maps = new TaskInProgress[numMapTasks]; - for (int i = 0; i < numMapTasks; i++) { - maps[i] = new TaskInProgress(getJobID(), "test", - splits[i], jobtracker, getJobConf(), this, i, 1); - } - } - - @Override - protected void createReduceTasks(String ignored) { - reduces = new TaskInProgress[numReduceTasks]; - for (int i = 0; i < numReduceTasks; i++) { - reduces[i] = new TaskInProgress(getJobID(), "test", - numMapTasks, i, - jobtracker, getJobConf(), this, 1); - nonRunningReduces.add(reduces[i]); - } - } - - private TaskAttemptID findTask(String trackerName, String trackerHost, - Collection nonRunningTasks, - Collection runningTasks, TaskType taskType) - throws IOException { - TaskInProgress tip = null; - Iterator iter = nonRunningTasks.iterator(); - //look for a non-running task first - while (iter.hasNext()) { - TaskInProgress t = iter.next(); - if (t.isRunnable() && !t.isRunning()) { - runningTasks.add(t); - iter.remove(); - tip = t; - break; - } - } - if (tip == null) { - if (getJobConf().getSpeculativeExecution()) { - tip = findSpeculativeTask(runningTasks, trackerName, trackerHost, - taskType); - } - } - if (tip != null) { - TaskAttemptID tId = tip.getTaskToRun(trackerName).getTaskID(); - if (tip.isMapTask()) { - scheduleMap(tip); - } else { - scheduleReduce(tip); - } - //Set it to RUNNING - makeRunning(tId, tip, trackerName); - return tId; - } - return null; - } - - public TaskAttemptID findMapTask(String trackerName) - throws IOException { - return findTask(trackerName, - JobInProgress.convertTrackerNameToHostName(trackerName), - nonLocalMaps, nonLocalRunningMaps, TaskType.MAP); - } - - public TaskAttemptID findReduceTask(String trackerName) - throws IOException { - return findTask(trackerName, - JobInProgress.convertTrackerNameToHostName(trackerName), - nonRunningReduces, runningReduces, TaskType.REDUCE); - } - - public void finishTask(TaskAttemptID taskId) { - TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId); - TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId, - 1.0f, 1, TaskStatus.State.SUCCEEDED, "", "", - tip.machineWhereTaskRan(taskId), - tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters()); - updateTaskStatus(tip, status); - } - - private void makeRunning(TaskAttemptID taskId, TaskInProgress tip, - String taskTracker) { - addRunningTaskToTIP(tip, taskId, new TaskTrackerStatus(taskTracker, - JobInProgress.convertTrackerNameToHostName(taskTracker)), true); - - TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId, - 0.0f, 1, TaskStatus.State.RUNNING, "", "", taskTracker, - tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters()); - updateTaskStatus(tip, status); - } - - public void progressMade(TaskAttemptID taskId, float progress) { - TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId); - TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId, - progress, 1, TaskStatus.State.RUNNING, "", "", - tip.machineWhereTaskRan(taskId), - tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters()); - updateTaskStatus(tip, status); - } - - public void failTask(TaskAttemptID taskId) { - TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId); - TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId, - 1.0f, 1, TaskStatus.State.FAILED, "", "", tip - .machineWhereTaskRan(taskId), tip.isMapTask() ? Phase.MAP - : Phase.REDUCE, new Counters()); - updateTaskStatus(tip, status); - } - - public void killTask(TaskAttemptID taskId) { - TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId); - TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId, - 1.0f, 1, TaskStatus.State.KILLED, "", "", tip - .machineWhereTaskRan(taskId), tip.isMapTask() ? Phase.MAP - : Phase.REDUCE, new Counters()); - updateTaskStatus(tip, status); - } - - public void cleanUpMetrics() { - } - - public void setClusterSize(int clusterSize) { - super.setClusterSize(clusterSize); - } - } - - static short sendHeartBeat(JobTracker jt, TaskTrackerStatus status, - boolean initialContact, boolean acceptNewTasks, - String tracker, short responseId) - throws IOException { - if (status == null) { - status = new TaskTrackerStatus(tracker, - JobInProgress.convertTrackerNameToHostName(tracker)); - - } - jt.heartbeat(status, false, initialContact, acceptNewTasks, responseId); - return ++responseId ; - } - - static void establishFirstContact(JobTracker jt, String tracker) - throws IOException { - sendHeartBeat(jt, null, true, false, tracker, (short) 0); - } - - static class FakeTaskInProgress extends TaskInProgress { - - public FakeTaskInProgress(JobID jobId, String jobFile, int numMaps, - int partition, JobTracker jobTracker, JobConf conf, JobInProgress job, - int numSlotsRequired) { - super(jobId, jobFile, numMaps, partition, jobTracker, conf, job, - numSlotsRequired); - } - - public FakeTaskInProgress(JobID jobId, String jobFile, TaskSplitMetaInfo emptySplit, - JobTracker jobTracker, JobConf jobConf, - JobInProgress job, int partition, int numSlotsRequired) { - super(jobId, jobFile, emptySplit, jobTracker, jobConf, job, - partition, numSlotsRequired); - } - - @Override - synchronized boolean updateStatus(TaskStatus status) { - TaskAttemptID taskid = status.getTaskID(); - taskStatuses.put(taskid, status); - return false; - } - } - - static class FakeJobHistory extends JobHistory { - @Override - public void init(JobTracker jt, - JobConf conf, - String hostname, - long jobTrackerStartTime) throws IOException { } - - @Override - public void initDone(JobConf conf, FileSystem fs) throws IOException { } - - @Override - public void markCompleted(org.apache.hadoop.mapreduce.JobID id) - throws IOException { } - - @Override - public void shutDown() { } - - @Override - public void - logEvent(HistoryEvent event, org.apache.hadoop.mapreduce.JobID id) { } - - @Override - public void closeWriter(org.apache.hadoop.mapreduce.JobID id) { } - } - - static class FakeJobTrackerMetricsInst extends JobTrackerInstrumentation { - public FakeJobTrackerMetricsInst(JobTracker tracker, JobConf conf) { - super(tracker, conf); - } - - int numMapTasksLaunched = 0; - int numMapTasksCompleted = 0; - int numMapTasksFailed = 0; - int numReduceTasksLaunched = 0; - int numReduceTasksCompleted = 0; - int numReduceTasksFailed = 0; - int numJobsSubmitted = 0; - int numJobsCompleted = 0; - int numWaitingMaps = 0; - int numWaitingReduces = 0; - int numSpeculativeMaps = 0; - int numSpeculativeReduces = 0; - int numDataLocalMaps = 0; - int numRackLocalMaps = 0; - - //Cluster status fields. - volatile int numMapSlots = 0; - volatile int numReduceSlots = 0; - int numBlackListedMapSlots = 0; - int numBlackListedReduceSlots = 0; - - int numReservedMapSlots = 0; - int numReservedReduceSlots = 0; - int numOccupiedMapSlots = 0; - int numOccupiedReduceSlots = 0; - - int numJobsFailed = 0; - int numJobsKilled = 0; - - int numJobsPreparing = 0; - int numJobsRunning = 0; - - int numRunningMaps = 0; - int numRunningReduces = 0; - - int numMapTasksKilled = 0; - int numReduceTasksKilled = 0; - - int numTrackers = 0; - int numTrackersBlackListed = 0; - - int numTrackersDecommissioned = 0; - - long numHeartbeats = 0; - - @Override - public synchronized void launchMap(TaskAttemptID taskAttemptID) { - ++numMapTasksLaunched; - decWaitingMaps(taskAttemptID.getJobID(), 1); - } - - @Override - public synchronized void completeMap(TaskAttemptID taskAttemptID) { - ++numMapTasksCompleted; - } - - @Override - public synchronized void failedMap(TaskAttemptID taskAttemptID) { - ++numMapTasksFailed; - addWaitingMaps(taskAttemptID.getJobID(), 1); - } - - @Override - public synchronized void launchReduce(TaskAttemptID taskAttemptID) { - ++numReduceTasksLaunched; - decWaitingReduces(taskAttemptID.getJobID(), 1); - } - - @Override - public synchronized void completeReduce(TaskAttemptID taskAttemptID) { - ++numReduceTasksCompleted; - } - - @Override - public synchronized void failedReduce(TaskAttemptID taskAttemptID) { - ++numReduceTasksFailed; - addWaitingReduces(taskAttemptID.getJobID(), 1); - } - - @Override - public synchronized void submitJob(JobConf conf, JobID id) { - ++numJobsSubmitted; - } - - @Override - public synchronized void completeJob(JobConf conf, JobID id) { - ++numJobsCompleted; - } - - @Override - public synchronized void addWaitingMaps(JobID id, int task) { - numWaitingMaps += task; - } - - @Override - public synchronized void decWaitingMaps(JobID id, int task) { - numWaitingMaps -= task; - } - - @Override - public synchronized void addWaitingReduces(JobID id, int task) { - numWaitingReduces += task; - } - - @Override - public synchronized void decWaitingReduces(JobID id, int task){ - numWaitingReduces -= task; - } - - @Override - public void setMapSlots(int slots) { - numMapSlots = slots; - } - - @Override - public void setReduceSlots(int slots) { - numReduceSlots = slots; - } - - @Override - public synchronized void addBlackListedMapSlots(int slots){ - numBlackListedMapSlots += slots; - } - - @Override - public synchronized void decBlackListedMapSlots(int slots){ - numBlackListedMapSlots -= slots; - } - - @Override - public synchronized void addBlackListedReduceSlots(int slots){ - numBlackListedReduceSlots += slots; - } - - @Override - public synchronized void decBlackListedReduceSlots(int slots){ - numBlackListedReduceSlots -= slots; - } - - @Override - public synchronized void addReservedMapSlots(int slots) - { - numReservedMapSlots += slots; - } - - @Override - public synchronized void decReservedMapSlots(int slots) - { - numReservedMapSlots -= slots; - } - - @Override - public synchronized void addReservedReduceSlots(int slots) - { - numReservedReduceSlots += slots; - } - - @Override - public synchronized void decReservedReduceSlots(int slots) - { - numReservedReduceSlots -= slots; - } - - @Override - public synchronized void addOccupiedMapSlots(int slots) - { - numOccupiedMapSlots += slots; - } - - @Override - public synchronized void decOccupiedMapSlots(int slots) - { - numOccupiedMapSlots -= slots; - } - - @Override - public synchronized void addOccupiedReduceSlots(int slots) - { - numOccupiedReduceSlots += slots; - } - - @Override - public synchronized void decOccupiedReduceSlots(int slots) - { - numOccupiedReduceSlots -= slots; - } - - @Override - public synchronized void failedJob(JobConf conf, JobID id) - { - numJobsFailed++; - } - - @Override - public synchronized void killedJob(JobConf conf, JobID id) - { - numJobsKilled++; - } - - @Override - public synchronized void addPrepJob(JobConf conf, JobID id) - { - numJobsPreparing++; - } - - @Override - public synchronized void decPrepJob(JobConf conf, JobID id) - { - numJobsPreparing--; - } - - @Override - public synchronized void addRunningJob(JobConf conf, JobID id) - { - numJobsRunning++; - } - - @Override - public synchronized void decRunningJob(JobConf conf, JobID id) - { - numJobsRunning--; - } - - @Override - public synchronized void addRunningMaps(int task) - { - numRunningMaps += task; - } - - @Override - public synchronized void decRunningMaps(int task) - { - numRunningMaps -= task; - } - - @Override - public synchronized void addRunningReduces(int task) - { - numRunningReduces += task; - } - - @Override - public synchronized void decRunningReduces(int task) - { - numRunningReduces -= task; - } - - @Override - public synchronized void killedMap(TaskAttemptID taskAttemptID) - { - numMapTasksKilled++; - } - - @Override - public synchronized void killedReduce(TaskAttemptID taskAttemptID) - { - numReduceTasksKilled++; - } - - @Override - public synchronized void addTrackers(int trackers) - { - numTrackers += trackers; - } - - @Override - public synchronized void decTrackers(int trackers) - { - numTrackers -= trackers; - } - - @Override - public synchronized void addBlackListedTrackers(int trackers) - { - numTrackersBlackListed += trackers; - } - - @Override - public synchronized void decBlackListedTrackers(int trackers) - { - numTrackersBlackListed -= trackers; - } - - @Override - public synchronized void setDecommissionedTrackers(int trackers) - { - numTrackersDecommissioned = trackers; - } - - @Override - public synchronized void heartbeat() { - ++numHeartbeats; - } - - @Override - public synchronized void speculateReduce(TaskAttemptID taskAttemptID) { - ++numSpeculativeReduces; - } - - @Override - public synchronized void speculateMap(TaskAttemptID taskAttemptID) { - ++numSpeculativeMaps; - } - - @Override - public synchronized void launchDataLocalMap(TaskAttemptID taskAttemptID) { - ++numDataLocalMaps; - } - - @Override - public synchronized void launchRackLocalMap(TaskAttemptID taskAttemptID) { - ++numRackLocalMaps; - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java deleted file mode 100644 index c102e8f8626..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/HadoopTestCase.java +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import junit.framework.TestCase; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.MRConfig; - -import java.io.File; -import java.io.IOException; - -/** - * Abstract Test case class to run MR in local or cluster mode and in local FS - * or DFS. - * - * The Hadoop instance is started and stopped on each test method. - * - * If using DFS the filesystem is reformated at each start (test method). - * - * Job Configurations should be created using a configuration returned by the - * 'createJobConf()' method. - */ -public abstract class HadoopTestCase extends TestCase { - public static final int LOCAL_MR = 1; - public static final int CLUSTER_MR = 2; - public static final int LOCAL_FS = 4; - public static final int DFS_FS = 8; - - private boolean localMR; - private boolean localFS; - - private int taskTrackers; - private int dataNodes; - - /** - * Creates a testcase for local or cluster MR using DFS. - * - * The DFS will be formatted regardless if there was one or not before in the - * given location. - * - * @param mrMode indicates if the MR should be local (LOCAL_MR) or cluster - * (CLUSTER_MR) - * @param fsMode indicates if the FS should be local (LOCAL_FS) or DFS (DFS_FS) - * - * local FS when using relative PATHs) - * - * @param taskTrackers number of task trackers to start when using cluster - * - * @param dataNodes number of data nodes to start when using DFS - * - * @throws IOException thrown if the base directory cannot be set. - */ - public HadoopTestCase(int mrMode, int fsMode, int taskTrackers, int dataNodes) - throws IOException { - if (mrMode != LOCAL_MR && mrMode != CLUSTER_MR) { - throw new IllegalArgumentException( - "Invalid MapRed mode, must be LOCAL_MR or CLUSTER_MR"); - } - if (fsMode != LOCAL_FS && fsMode != DFS_FS) { - throw new IllegalArgumentException( - "Invalid FileSystem mode, must be LOCAL_FS or DFS_FS"); - } - if (taskTrackers < 1) { - throw new IllegalArgumentException( - "Invalid taskTrackers value, must be greater than 0"); - } - if (dataNodes < 1) { - throw new IllegalArgumentException( - "Invalid dataNodes value, must be greater than 0"); - } - localMR = (mrMode == LOCAL_MR); - localFS = (fsMode == LOCAL_FS); - /* - JobConf conf = new JobConf(); - fsRoot = conf.get("hadoop.tmp.dir"); - - if (fsRoot == null) { - throw new IllegalArgumentException( - "hadoop.tmp.dir is not defined"); - } - - fsRoot = fsRoot.replace(' ', '+') + "/fs"; - - File file = new File(fsRoot); - if (!file.exists()) { - if (!file.mkdirs()) { - throw new RuntimeException("Could not create FS base path: " + file); - } - } - */ - this.taskTrackers = taskTrackers; - this.dataNodes = dataNodes; - } - - /** - * Indicates if the MR is running in local or cluster mode. - * - * @return returns TRUE if the MR is running locally, FALSE if running in - * cluster mode. - */ - public boolean isLocalMR() { - return localMR; - } - - /** - * Indicates if the filesystem is local or DFS. - * - * @return returns TRUE if the filesystem is local, FALSE if it is DFS. - */ - public boolean isLocalFS() { - return localFS; - } - - - private MiniDFSCluster dfsCluster = null; - private MiniMRCluster mrCluster = null; - private FileSystem fileSystem = null; - - /** - * Creates Hadoop instance based on constructor configuration before - * a test case is run. - * - * @throws Exception - */ - protected void setUp() throws Exception { - super.setUp(); - if (localFS) { - fileSystem = FileSystem.getLocal(new JobConf()); - } - else { - dfsCluster = new MiniDFSCluster(new JobConf(), dataNodes, true, null); - fileSystem = dfsCluster.getFileSystem(); - } - if (localMR) { - } - else { - //noinspection deprecation - mrCluster = new MiniMRCluster(taskTrackers, fileSystem.getUri().toString(), 1); - } - } - - /** - * Destroys Hadoop instance based on constructor configuration after - * a test case is run. - * - * @throws Exception - */ - protected void tearDown() throws Exception { - try { - if (mrCluster != null) { - mrCluster.shutdown(); - } - } - catch (Exception ex) { - System.out.println(ex); - } - try { - if (dfsCluster != null) { - dfsCluster.shutdown(); - } - } - catch (Exception ex) { - System.out.println(ex); - } - super.tearDown(); - } - - /** - * Returns the Filesystem in use. - * - * TestCases should use this Filesystem as it - * is properly configured with the workingDir for relative PATHs. - * - * @return the filesystem used by Hadoop. - */ - protected FileSystem getFileSystem() { - return fileSystem; - } - - /** - * Returns a job configuration preconfigured to run against the Hadoop - * managed by the testcase. - * @return configuration that works on the testcase Hadoop instance - */ - protected JobConf createJobConf() { - if (localMR) { - JobConf conf = new JobConf(); - conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME); - return conf; - } - else { - return mrCluster.createJobConf(); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java deleted file mode 100644 index 86980bb73d6..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/MiniMRCluster.java +++ /dev/null @@ -1,778 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Date; -import java.util.Iterator; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.net.NetworkTopology; -import org.apache.hadoop.net.StaticMapping; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * This class creates a single-process Map-Reduce cluster for junit testing. - * One thread is created for each server. - */ -public class MiniMRCluster { - private static final Log LOG = LogFactory.getLog(MiniMRCluster.class); - - private Thread jobTrackerThread; - private JobTrackerRunner jobTracker; - - private int jobTrackerPort = 0; - private int taskTrackerPort = 0; - private int jobTrackerInfoPort = 0; - private int numTaskTrackers; - - private List taskTrackerList = new ArrayList(); - private List taskTrackerThreadList = new ArrayList(); - - private String namenode; - private UserGroupInformation ugi = null; - private JobConf conf; - private int numTrackerToExclude; - - private JobConf job; - private Clock clock; - - /** - * An inner class that runs a job tracker. - */ - public class JobTrackerRunner implements Runnable { - private JobTracker tracker = null; - private volatile boolean isActive = true; - - JobConf jc = null; - Clock clock = JobTracker.DEFAULT_CLOCK; - - public JobTrackerRunner(JobConf conf) { - jc = conf; - } - - public JobTrackerRunner(JobConf conf, Clock clock) { - jc = conf; - this.clock = clock; - } - - public boolean isUp() { - return (tracker != null); - } - - public boolean isActive() { - return isActive; - } - - public int getJobTrackerPort() { - return tracker.getTrackerPort(); - } - - public int getJobTrackerInfoPort() { - return tracker.getInfoPort(); - } - - public JobTracker getJobTracker() { - return tracker; - } - - /** - * Create the job tracker and run it. - */ - public void run() { - try { - jc = (jc == null) ? createJobConf() : createJobConf(jc); - File f = new File("build/test/mapred/local").getAbsoluteFile(); - jc.set(MRConfig.LOCAL_DIR, f.getAbsolutePath()); - jc.setClass("topology.node.switch.mapping.impl", - StaticMapping.class, DNSToSwitchMapping.class); - final String id = - new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date()); - if (ugi == null) { - ugi = UserGroupInformation.getLoginUser(); - } - tracker = ugi.doAs(new PrivilegedExceptionAction() { - public JobTracker run() throws InterruptedException, IOException { - return JobTracker.startTracker(jc, clock, id); - } - }); - tracker.offerService(); - } catch (Throwable e) { - LOG.error("Job tracker crashed", e); - isActive = false; - } - } - - /** - * Shutdown the job tracker and wait for it to finish. - */ - public void shutdown() { - try { - if (tracker != null) { - tracker.stopTracker(); - } - } catch (Throwable e) { - LOG.error("Problem shutting down job tracker", e); - } - isActive = false; - } - } - - /** - * An inner class to run the task tracker. - */ - class TaskTrackerRunner implements Runnable { - volatile TaskTracker tt; - int trackerId; - // the localDirs for this taskTracker - String[] localDirs; - volatile boolean isInitialized = false; - volatile boolean isDead = false; - int numDir; - - TaskTrackerRunner(int trackerId, int numDir, String hostname, - JobConf cfg) - throws IOException { - this.trackerId = trackerId; - this.numDir = numDir; - localDirs = new String[numDir]; - final JobConf conf; - if (cfg == null) { - conf = createJobConf(); - } else { - conf = createJobConf(cfg); - } - if (hostname != null) { - conf.set(TTConfig.TT_HOST_NAME, hostname); - } - conf.set(TTConfig.TT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.set(TTConfig.TT_REPORT_ADDRESS, - "127.0.0.1:" + taskTrackerPort); - File localDirBase = - new File(conf.get(MRConfig.LOCAL_DIR)).getAbsoluteFile(); - localDirBase.mkdirs(); - StringBuffer localPath = new StringBuffer(); - for(int i=0; i < numDir; ++i) { - File ttDir = new File(localDirBase, - Integer.toString(trackerId) + "_" + i); - if (!ttDir.mkdirs()) { - if (!ttDir.isDirectory()) { - throw new IOException("Mkdirs failed to create " + ttDir); - } - } - localDirs[i] = ttDir.toString(); - if (i != 0) { - localPath.append(","); - } - localPath.append(localDirs[i]); - } - conf.set(MRConfig.LOCAL_DIR, localPath.toString()); - LOG.info(MRConfig.LOCAL_DIR + " is " + localPath); - try { - tt = ugi.doAs(new PrivilegedExceptionAction() { - public TaskTracker run() throws InterruptedException, IOException { - return createTaskTracker(conf); - } - }); - isInitialized = true; - } catch (Throwable e) { - isDead = true; - tt = null; - LOG.error("task tracker " + trackerId + " crashed", e); - } - } - - /** - * Creates a default {@link TaskTracker} using the conf passed. - */ - TaskTracker createTaskTracker(JobConf conf) - throws IOException, InterruptedException { - return new TaskTracker(conf); - } - - /** - * Create and run the task tracker. - */ - public void run() { - try { - if (tt != null) { - tt.run(); - } - } catch (Throwable e) { - isDead = true; - tt = null; - LOG.error("task tracker " + trackerId + " crashed", e); - } - } - - /** - * Get the local dir for this TaskTracker. - * This is there so that we do not break - * previous tests. - * @return the absolute pathname - */ - public String getLocalDir() { - return localDirs[0]; - } - - public String[] getLocalDirs(){ - return localDirs; - } - - public TaskTracker getTaskTracker() { - return tt; - } - - /** - * Shut down the server and wait for it to finish. - */ - public void shutdown() { - if (tt != null) { - try { - tt.shutdown(); - } catch (Throwable e) { - LOG.error("task tracker " + trackerId + " could not shut down", - e); - } - } - } - } - - /** - * Get the local directory for the Nth task tracker - * @param taskTracker the index of the task tracker to check - * @return the absolute pathname of the local dir - */ - public String getTaskTrackerLocalDir(int taskTracker) { - return (taskTrackerList.get(taskTracker)).getLocalDir(); - } - - /** - * Get all the local directories for the Nth task tracker - * @param taskTracker the index of the task tracker to check - * @return array of local dirs - */ - public String[] getTaskTrackerLocalDirs(int taskTracker) { - return (taskTrackerList.get(taskTracker)).getLocalDirs(); - } - - public JobTrackerRunner getJobTrackerRunner() { - return jobTracker; - } - - TaskTrackerRunner getTaskTrackerRunner(int id) { - return taskTrackerList.get(id); - } - /** - * Get the number of task trackers in the cluster - */ - public int getNumTaskTrackers() { - return taskTrackerList.size(); - } - - /** - * Sets inline cleanup threads to all task trackers sothat deletion of - * temporary files/dirs happen inline - */ - public void setInlineCleanupThreads() { - for (int i = 0; i < getNumTaskTrackers(); i++) { - getTaskTrackerRunner(i).getTaskTracker().setCleanupThread( - new UtilsForTests.InlineCleanupQueue()); - } - } - - /** - * Wait until the system is idle. - */ - public void waitUntilIdle() { - waitTaskTrackers(); - - JobClient client; - try { - client = new JobClient(job); - ClusterStatus status = client.getClusterStatus(); - while(status.getTaskTrackers() + numTrackerToExclude - < taskTrackerList.size()) { - for(TaskTrackerRunner runner : taskTrackerList) { - if(runner.isDead) { - throw new RuntimeException("TaskTracker is dead"); - } - } - Thread.sleep(1000); - status = client.getClusterStatus(); - } - } - catch (IOException ex) { - throw new RuntimeException(ex); - } - catch (InterruptedException ex) { - throw new RuntimeException(ex); - } - - } - - private void waitTaskTrackers() { - for(Iterator itr= taskTrackerList.iterator(); itr.hasNext();) { - TaskTrackerRunner runner = itr.next(); - while (!runner.isDead && (!runner.isInitialized || !runner.tt.isIdle())) { - if (!runner.isInitialized) { - LOG.info("Waiting for task tracker to start."); - } else { - LOG.info("Waiting for task tracker " + runner.tt.getName() + - " to be idle."); - } - try { - Thread.sleep(1000); - } catch (InterruptedException ie) {} - } - } - } - - /** - * Get the actual rpc port used. - */ - public int getJobTrackerPort() { - return jobTrackerPort; - } - - public JobConf createJobConf() { - return createJobConf(new JobConf()); - } - - public JobConf createJobConf(JobConf conf) { - if(conf == null) { - conf = new JobConf(); - } - return configureJobConf(conf, namenode, jobTrackerPort, jobTrackerInfoPort, - ugi); - } - - static JobConf configureJobConf(JobConf conf, String namenode, - int jobTrackerPort, int jobTrackerInfoPort, - UserGroupInformation ugi) { - JobConf result = new JobConf(conf); - FileSystem.setDefaultUri(result, namenode); - result.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); - result.set(JTConfig.JT_IPC_ADDRESS, "localhost:"+jobTrackerPort); - result.set(JTConfig.JT_HTTP_ADDRESS, - "127.0.0.1:" + jobTrackerInfoPort); - // for debugging have all task output sent to the test output - JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL); - return result; - } - - /** - * Create the config and the cluster. - * @param numTaskTrackers no. of tasktrackers in the cluster - * @param namenode the namenode - * @param numDir no. of directories - * @throws IOException - */ - public MiniMRCluster(int numTaskTrackers, String namenode, int numDir, - String[] racks, String[] hosts) throws IOException { - this(0, 0, numTaskTrackers, namenode, numDir, racks, hosts); - } - - /** - * Create the config and the cluster. - * @param numTaskTrackers no. of tasktrackers in the cluster - * @param namenode the namenode - * @param numDir no. of directories - * @param racks Array of racks - * @param hosts Array of hosts in the corresponding racks - * @param conf Default conf for the jobtracker - * @throws IOException - */ - public MiniMRCluster(int numTaskTrackers, String namenode, int numDir, - String[] racks, String[] hosts, JobConf conf) - throws IOException { - this(0, 0, numTaskTrackers, namenode, numDir, racks, hosts, null, conf); - } - - /** - * Create the config and the cluster. - * @param numTaskTrackers no. of tasktrackers in the cluster - * @param namenode the namenode - * @param numDir no. of directories - * @throws IOException - */ - public MiniMRCluster(int numTaskTrackers, String namenode, int numDir) - throws IOException { - this(0, 0, numTaskTrackers, namenode, numDir); - } - - public MiniMRCluster(int jobTrackerPort, - int taskTrackerPort, - int numTaskTrackers, - String namenode, - int numDir) - throws IOException { - this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, - numDir, null); - } - - public MiniMRCluster(int jobTrackerPort, - int taskTrackerPort, - int numTaskTrackers, - String namenode, - int numDir, - String[] racks) throws IOException { - this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, - numDir, racks, null); - } - - public MiniMRCluster(int jobTrackerPort, - int taskTrackerPort, - int numTaskTrackers, - String namenode, - int numDir, - String[] racks, String[] hosts) throws IOException { - this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, - numDir, racks, hosts, null); - } - - public MiniMRCluster(int jobTrackerPort, int taskTrackerPort, - int numTaskTrackers, String namenode, - int numDir, String[] racks, String[] hosts, UserGroupInformation ugi - ) throws IOException { - this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, - numDir, racks, hosts, ugi, null); - } - - public MiniMRCluster(int jobTrackerPort, int taskTrackerPort, - int numTaskTrackers, String namenode, - int numDir, String[] racks, String[] hosts, UserGroupInformation ugi, - JobConf conf) throws IOException { - this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir, - racks, hosts, ugi, conf, 0); - } - - public MiniMRCluster(int jobTrackerPort, int taskTrackerPort, - int numTaskTrackers, String namenode, - int numDir, String[] racks, String[] hosts, UserGroupInformation ugi, - JobConf conf, int numTrackerToExclude) throws IOException { - this(jobTrackerPort, taskTrackerPort, numTaskTrackers, namenode, numDir, - racks, hosts, ugi, conf, numTrackerToExclude, new Clock()); - } - - public MiniMRCluster(int jobTrackerPort, int taskTrackerPort, - int numTaskTrackers, String namenode, - int numDir, String[] racks, String[] hosts, UserGroupInformation ugi, - JobConf conf, int numTrackerToExclude, Clock clock) throws IOException { - if (racks != null && racks.length < numTaskTrackers) { - LOG.error("Invalid number of racks specified. It should be at least " + - "equal to the number of tasktrackers"); - shutdown(); - } - if (hosts != null && numTaskTrackers > hosts.length ) { - throw new IllegalArgumentException( "The length of hosts [" + hosts.length - + "] is less than the number of tasktrackers [" + numTaskTrackers + "]."); - } - - //Generate rack names if required - if (racks == null) { - System.out.println("Generating rack names for tasktrackers"); - racks = new String[numTaskTrackers]; - for (int i=0; i < racks.length; ++i) { - racks[i] = NetworkTopology.DEFAULT_RACK; - } - } - - //Generate some hostnames if required - if (hosts == null) { - System.out.println("Generating host names for tasktrackers"); - hosts = new String[numTaskTrackers]; - for (int i = 0; i < numTaskTrackers; i++) { - hosts[i] = "host" + i + ".foo.com"; - } - } - this.jobTrackerPort = jobTrackerPort; - this.taskTrackerPort = taskTrackerPort; - this.jobTrackerInfoPort = 0; - this.numTaskTrackers = 0; - this.namenode = namenode; - this.ugi = ugi; - this.conf = conf; // this is the conf the mr starts with - this.numTrackerToExclude = numTrackerToExclude; - this.clock = clock; - - // start the jobtracker - startJobTracker(); - - // Create the TaskTrackers - for (int idx = 0; idx < numTaskTrackers; idx++) { - String rack = null; - String host = null; - if (racks != null) { - rack = racks[idx]; - } - if (hosts != null) { - host = hosts[idx]; - } - - startTaskTracker(host, rack, idx, numDir); - } - - this.job = createJobConf(conf); - waitUntilIdle(); - } - - public UserGroupInformation getUgi() { - return ugi; - } - - /** - * Get the task completion events - */ - public TaskCompletionEvent[] getTaskCompletionEvents(JobID id, int from, - int max) - throws IOException { - return jobTracker.getJobTracker().getTaskCompletionEvents(id, from, max); - } - - /** - * Change the job's priority - * - * @throws IOException - * @throws AccessControlException - */ - public void setJobPriority(JobID jobId, JobPriority priority) - throws AccessControlException, IOException { - jobTracker.getJobTracker().setJobPriority(jobId, priority); - } - - /** - * Get the job's priority - */ - public JobPriority getJobPriority(JobID jobId) { - return jobTracker.getJobTracker().getJob(jobId).getPriority(); - } - - /** - * Get the job finish time - */ - public long getJobFinishTime(JobID jobId) { - return jobTracker.getJobTracker().getJob(jobId).getFinishTime(); - } - - /** - * Init the job - */ - public void initializeJob(JobID jobId) throws IOException { - JobInProgress job = jobTracker.getJobTracker().getJob(jobId); - jobTracker.getJobTracker().initJob(job); - } - - /** - * Get the events list at the tasktracker - */ - public MapTaskCompletionEventsUpdate - getMapTaskCompletionEventsUpdates(int index, JobID jobId, int max) - throws IOException { - String jtId = jobTracker.getJobTracker().getTrackerIdentifier(); - TaskAttemptID dummy = - new TaskAttemptID(jtId, jobId.getId(), TaskType.REDUCE, 0, 0); - return taskTrackerList.get(index).getTaskTracker() - .getMapCompletionEvents(jobId, 0, max, - dummy); - } - - /** - * Get jobtracker conf - */ - public JobConf getJobTrackerConf() { - return this.conf; - } - - - public int getFaultCount(String hostName) { - return jobTracker.getJobTracker().getFaultCount(hostName); - } - - /** - * Start the jobtracker. - */ - public void startJobTracker() { - startJobTracker(true); - } - - public void startJobTracker(boolean wait) { - // Create the JobTracker - jobTracker = new JobTrackerRunner(conf, clock); - jobTrackerThread = new Thread(jobTracker); - - jobTrackerThread.start(); - - if (!wait) { - return; - } - - while (jobTracker.isActive() && !jobTracker.isUp()) { - try { // let daemons get started - Thread.sleep(1000); - } catch(InterruptedException e) { - } - } - - // is the jobtracker has started then wait for it to init - ClusterStatus status = null; - if (jobTracker.isUp()) { - status = jobTracker.getJobTracker().getClusterStatus(false); - while (jobTracker.isActive() && status.getJobTrackerStatus() - == JobTrackerStatus.INITIALIZING) { - try { - LOG.info("JobTracker still initializing. Waiting."); - Thread.sleep(1000); - } catch(InterruptedException e) {} - status = jobTracker.getJobTracker().getClusterStatus(false); - } - } - - if (!jobTracker.isActive()) { - // return if jobtracker has crashed - return; - } - - // Set the configuration for the task-trackers - this.jobTrackerPort = jobTracker.getJobTrackerPort(); - this.jobTrackerInfoPort = jobTracker.getJobTrackerInfoPort(); - } - - /** - * Kill the jobtracker. - */ - public void stopJobTracker() { - //jobTracker.exit(-1); - jobTracker.shutdown(); - - jobTrackerThread.interrupt(); - try { - jobTrackerThread.join(); - } catch (InterruptedException ex) { - LOG.error("Problem waiting for job tracker to finish", ex); - } - } - - /** - * Kill the tasktracker. - */ - public void stopTaskTracker(int id) { - TaskTrackerRunner tracker = taskTrackerList.remove(id); - tracker.shutdown(); - - Thread thread = taskTrackerThreadList.remove(id); - - try { - thread.join(); - // This will break the wait until idle loop - tracker.isDead = true; - --numTaskTrackers; - } catch (InterruptedException ex) { - LOG.error("Problem waiting for task tracker to finish", ex); - } - } - - /** - * Start the tasktracker. - */ - public void startTaskTracker(String host, String rack, int idx, int numDir) - throws IOException { - if (rack != null) { - StaticMapping.addNodeToRack(host, rack); - } - if (host != null) { - NetUtils.addStaticResolution(host, "localhost"); - } - TaskTrackerRunner taskTracker; - taskTracker = new TaskTrackerRunner(idx, numDir, host, conf); - - addTaskTracker(taskTracker); - } - - /** - * Add a task-tracker to the Mini-MR cluster. - */ - void addTaskTracker(TaskTrackerRunner taskTracker) { - Thread taskTrackerThread = new Thread(taskTracker); - taskTrackerList.add(taskTracker); - taskTrackerThreadList.add(taskTrackerThread); - taskTrackerThread.start(); - ++numTaskTrackers; - } - - /** - * Get the tasktrackerID in MiniMRCluster with given trackerName. - */ - int getTaskTrackerID(String trackerName) { - for (int id=0; id < numTaskTrackers; id++) { - if (taskTrackerList.get(id).getTaskTracker().getName().equals( - trackerName)) { - return id; - } - } - return -1; - } - - /** - * Shut down the servers. - */ - public void shutdown() { - try { - waitTaskTrackers(); - for (int idx = 0; idx < numTaskTrackers; idx++) { - TaskTrackerRunner taskTracker = taskTrackerList.get(idx); - Thread taskTrackerThread = taskTrackerThreadList.get(idx); - taskTracker.shutdown(); - try { - taskTrackerThread.join(); - } catch (InterruptedException ex) { - LOG.error("Problem shutting down task tracker", ex); - } - } - stopJobTracker(); - } finally { - File configDir = new File("build", "minimr"); - File siteFile = new File(configDir, "mapred-site.xml"); - siteFile.delete(); - } - } - - public static void main(String[] args) throws IOException { - LOG.info("Bringing up Jobtracker and tasktrackers."); - MiniMRCluster mr = new MiniMRCluster(4, "file:///", 1); - LOG.info("JobTracker and TaskTrackers are up."); - mr.shutdown(); - LOG.info("JobTracker and TaskTrackers brought down."); - } -} - diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java deleted file mode 100644 index 026edfbddb0..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/NotificationTestCase.java +++ /dev/null @@ -1,224 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import org.mortbay.jetty.Server; -import org.mortbay.jetty.servlet.Context; -import org.mortbay.jetty.servlet.ServletHolder; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; - -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import javax.servlet.http.HttpServlet; -import javax.servlet.ServletException; -import java.io.IOException; -import java.io.DataOutputStream; - -/** - * Base class to test Job end notification in local and cluster mode. - * - * Starts up hadoop on Local or Cluster mode (by extending of the - * HadoopTestCase class) and it starts a servlet engine that hosts - * a servlet that will receive the notification of job finalization. - * - * The notification servlet returns a HTTP 400 the first time is called - * and a HTTP 200 the second time, thus testing retry. - * - * In both cases local file system is used (this is irrelevant for - * the tested functionality) - * - * - */ -public abstract class NotificationTestCase extends HadoopTestCase { - - protected NotificationTestCase(int mode) throws IOException { - super(mode, HadoopTestCase.LOCAL_FS, 1, 1); - } - - private int port; - private String contextPath = "/notification"; - private String servletPath = "/mapred"; - private Server webServer; - - private void startHttpServer() throws Exception { - - // Create the webServer - if (webServer != null) { - webServer.stop(); - webServer = null; - } - webServer = new Server(0); - - Context context = new Context(webServer, contextPath); - - // create servlet handler - context.addServlet(new ServletHolder(new NotificationServlet()), - servletPath); - - // Start webServer - webServer.start(); - port = webServer.getConnectors()[0].getLocalPort(); - - } - - private void stopHttpServer() throws Exception { - if (webServer != null) { - webServer.stop(); - webServer.destroy(); - webServer = null; - } - } - - public static class NotificationServlet extends HttpServlet { - public static int counter = 0; - private static final long serialVersionUID = 1L; - - protected void doGet(HttpServletRequest req, HttpServletResponse res) - throws ServletException, IOException { - switch (counter) { - case 0: - { - assertTrue(req.getQueryString().contains("SUCCEEDED")); - } - break; - case 2: - { - assertTrue(req.getQueryString().contains("KILLED")); - } - break; - case 4: - { - assertTrue(req.getQueryString().contains("FAILED")); - } - break; - } - if (counter % 2 == 0) { - res.sendError(HttpServletResponse.SC_BAD_REQUEST, "forcing error"); - } - else { - res.setStatus(HttpServletResponse.SC_OK); - } - counter++; - } - } - - private String getNotificationUrlTemplate() { - return "http://localhost:" + port + contextPath + servletPath + - "?jobId=$jobId&jobStatus=$jobStatus"; - } - - protected JobConf createJobConf() { - JobConf conf = super.createJobConf(); - conf.setJobEndNotificationURI(getNotificationUrlTemplate()); - conf.setInt(JobContext.MR_JOB_END_RETRY_ATTEMPTS, 3); - conf.setInt(JobContext.MR_JOB_END_RETRY_INTERVAL, 200); - return conf; - } - - - protected void setUp() throws Exception { - super.setUp(); - startHttpServer(); - } - - protected void tearDown() throws Exception { - stopHttpServer(); - super.tearDown(); - } - - public void testMR() throws Exception { - System.out.println(launchWordCount(this.createJobConf(), - "a b c d e f g h", 1, 1)); - Thread.sleep(2000); - assertEquals(2, NotificationServlet.counter); - - Path inDir = new Path("notificationjob/input"); - Path outDir = new Path("notificationjob/output"); - - // Hack for local FS that does not have the concept of a 'mounting point' - if (isLocalFS()) { - String localPathRoot = System.getProperty("test.build.data","/tmp") - .toString().replace(' ', '+');; - inDir = new Path(localPathRoot, inDir); - outDir = new Path(localPathRoot, outDir); - } - - // run a job with KILLED status - System.out.println(UtilsForTests.runJobKill(this.createJobConf(), inDir, - outDir).getID()); - Thread.sleep(2000); - assertEquals(4, NotificationServlet.counter); - - // run a job with FAILED status - System.out.println(UtilsForTests.runJobFail(this.createJobConf(), inDir, - outDir).getID()); - Thread.sleep(2000); - assertEquals(6, NotificationServlet.counter); - } - - private String launchWordCount(JobConf conf, - String input, - int numMaps, - int numReduces) throws IOException { - Path inDir = new Path("testing/wc/input"); - Path outDir = new Path("testing/wc/output"); - - // Hack for local FS that does not have the concept of a 'mounting point' - if (isLocalFS()) { - String localPathRoot = System.getProperty("test.build.data","/tmp") - .toString().replace(' ', '+');; - inDir = new Path(localPathRoot, inDir); - outDir = new Path(localPathRoot, outDir); - } - - FileSystem fs = FileSystem.get(conf); - fs.delete(outDir, true); - if (!fs.mkdirs(inDir)) { - throw new IOException("Mkdirs failed to create " + inDir.toString()); - } - { - DataOutputStream file = fs.create(new Path(inDir, "part-0")); - file.writeBytes(input); - file.close(); - } - conf.setJobName("wordcount"); - conf.setInputFormat(TextInputFormat.class); - - // the keys are words (strings) - conf.setOutputKeyClass(Text.class); - // the values are counts (ints) - conf.setOutputValueClass(IntWritable.class); - - conf.setMapperClass(WordCount.MapClass.class); - conf.setCombinerClass(WordCount.Reduce.class); - conf.setReducerClass(WordCount.Reduce.class); - - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReduces); - JobClient.runJob(conf); - return MapReduceTestUtil.readOutput(outDir, conf); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java deleted file mode 100644 index 4cb0fee616c..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/QueueManagerTestUtils.java +++ /dev/null @@ -1,358 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -//import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.QueueState; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.UserGroupInformation; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.transform.TransformerException; -import javax.xml.transform.Transformer; -import javax.xml.transform.TransformerFactory; -import javax.xml.transform.OutputKeys; -import javax.xml.transform.stream.StreamResult; -import javax.xml.transform.dom.DOMSource; - -import java.security.PrivilegedExceptionAction; -import java.util.Properties; -import java.util.Set; -import java.io.File; -import java.io.IOException; - -//@Private -public class QueueManagerTestUtils { - /** - * Queue-configuration file for tests that start a cluster and wish to modify - * the queue configuration. This file is always in the unit tests classpath, - * so QueueManager started through JobTracker will automatically pick this up. - */ - public static final String QUEUES_CONFIG_FILE_PATH = new File(System - .getProperty("test.build.extraconf", "build/test/extraconf"), - QueueManager.QUEUE_CONF_FILE_NAME).getAbsolutePath(); - - private static final Log LOG = LogFactory.getLog(QueueManagerTestUtils.class); - - /** - * Create and return a new instance of a DOM Document object to build a queue - * tree with. - * - * @return the created {@link Document} - * @throws Exception - */ - public static Document createDocument() throws Exception { - Document doc = DocumentBuilderFactory.newInstance().newDocumentBuilder() - .newDocument(); - return doc; - } - - public static void createSimpleDocument(Document doc) throws Exception { - Element queues = createQueuesNode(doc); - - // Create parent level queue q1. - Element q1 = createQueue(doc, "q1"); - Properties props = new Properties(); - props.setProperty("capacity", "10"); - props.setProperty("maxCapacity", "35"); - q1.appendChild(createProperties(doc, props)); - queues.appendChild(q1); - - // Create another parent level p1 - Element p1 = createQueue(doc, "p1"); - - // append child p11 to p1 - p1.appendChild(createQueue(doc, "p11")); - - Element p12 = createQueue(doc, "p12"); - - p12.appendChild(createState(doc, QueueState.STOPPED.getStateName())); - p12.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_SUBMIT_JOB_TAG, "u1")); - p12.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_ADMINISTER_JOB_TAG, "u2")); - - // append p12 to p1. - p1.appendChild(p12); - - queues.appendChild(p1); - } - - static void createSimpleDocumentWithAcls(Document doc) { - Element queues = createQueuesNode(doc); - - // Create parent level queue q1. - Element q1 = createQueue(doc, "q1"); - Properties props = new Properties(); - props.setProperty("capacity", "10"); - props.setProperty("maxCapacity", "35"); - q1.appendChild(createProperties(doc, props)); - queues.appendChild(q1); - - // Create another parent level p1 - Element p1 = createQueue(doc, "p1"); - - // append child p11 to p1 - Element p11 = createQueue(doc, "p11"); - p11.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_SUBMIT_JOB_TAG, "u1")); - p11.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_ADMINISTER_JOB_TAG, "u2")); - p1.appendChild(p11); - - // append child p12 to p1 - Element p12 = createQueue(doc, "p12"); - p12.appendChild(createState(doc, QueueState.RUNNING.getStateName())); - p12.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_SUBMIT_JOB_TAG, "*")); - p12.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_ADMINISTER_JOB_TAG, "*")); - p1.appendChild(p12); - - // append child p13 to p1 - Element p13 = createQueue(doc, "p13"); - p13.appendChild(createState(doc, QueueState.RUNNING.getStateName())); - p1.appendChild(p13); - - // append child p14 to p1 - Element p14 = createQueue(doc, "p14"); - p14.appendChild(createState(doc, QueueState.STOPPED.getStateName())); - p1.appendChild(p14); - - queues.appendChild(p1); - } - - /** - * Creates all given queues as 1st level queues(no nesting) - * @param doc the queues config document - * @param queueNames the queues to be added to the queues config document - * @param submitAcls acl-submit-job acls for each of the queues - * @param adminsAcls acl-administer-jobs acls for each of the queues - * @throws Exception - */ - public static void createSimpleDocument(Document doc, String[] queueNames, - String[] submitAcls, String[] adminsAcls) throws Exception { - - Element queues = createQueuesNode(doc); - - // Create all queues as 1st level queues(no nesting) - for (int i = 0; i < queueNames.length; i++) { - Element q = createQueue(doc, queueNames[i]); - - q.appendChild(createState(doc, QueueState.RUNNING.getStateName())); - q.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_SUBMIT_JOB_TAG, submitAcls[i])); - q.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_ADMINISTER_JOB_TAG, adminsAcls[i])); - queues.appendChild(q); - } - } - - /** - * Creates queues configuration file with given queues at 1st level(i.e. - * no nesting of queues) and with the given queue acls. - * @param queueNames queue names which are to be configured - * @param submitAclStrings acl-submit-job acls for each of the queues - * @param adminsAclStrings acl-administer-jobs acls for each of the queues - * @return Configuration the queues configuration - * @throws Exception - */ - public static void createQueuesConfigFile(String[] queueNames, - String[] submitAclStrings, String[] adminsAclStrings) - throws Exception { - if (queueNames.length > submitAclStrings.length || - queueNames.length > adminsAclStrings.length) { - LOG.error("Number of queues is more than acls given."); - return; - } - Document doc = createDocument(); - createSimpleDocument(doc, queueNames, submitAclStrings, adminsAclStrings); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - } - - public static void refreshSimpleDocument(Document doc) throws Exception { - Element queues = createQueuesNode(doc); - - // Create parent level queue q1. - Element q1 = createQueue(doc, "q1"); - Properties props = new Properties(); - props.setProperty("capacity", "70"); - props.setProperty("maxCapacity", "35"); - q1.appendChild(createProperties(doc, props)); - queues.appendChild(q1); - - // Create another parent level p1 - Element p1 = createQueue(doc, "p1"); - - // append child p11 to p1 - Element p11 = createQueue(doc, "p11"); - p11.appendChild(createState(doc, QueueState.STOPPED.getStateName())); - p1.appendChild(p11); - - Element p12 = createQueue(doc, "p12"); - - p12.appendChild(createState(doc, QueueState.RUNNING.getStateName())); - p12.appendChild(createAcls(doc, "acl-submit-job", "u3")); - p12.appendChild(createAcls(doc, "acl-administer-jobs", "u4")); - - // append p12 to p1. - p1.appendChild(p12); - - queues.appendChild(p1); - } - - /** - * Create the root element along with the - * element. - * - * @param doc - * @param enable - * @return the created element. - */ - public static Element createQueuesNode(Document doc) { - Element queues = doc.createElement("queues"); - doc.appendChild(queues); - return queues; - } - - public static void writeToFile(Document doc, String filePath) - throws TransformerException { - Transformer trans = TransformerFactory.newInstance().newTransformer(); - trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes"); - trans.setOutputProperty(OutputKeys.INDENT, "yes"); - DOMSource source = new DOMSource(doc); - trans.transform(source, new StreamResult(new File(filePath))); - } - - public static Element createQueue(Document doc, String name) { - Element queue = doc.createElement("queue"); - Element nameNode = doc.createElement("name"); - nameNode.setTextContent(name); - queue.appendChild(nameNode); - return queue; - } - - public static Element createAcls(Document doc, String aclName, - String listNames) { - Element acls = doc.createElement(aclName); - acls.setTextContent(listNames); - return acls; - } - - public static Element createState(Document doc, String state) { - Element stateElement = doc.createElement("state"); - stateElement.setTextContent(state); - return stateElement; - } - - public static Element createProperties(Document doc, Properties props) { - Element propsElement = doc.createElement("properties"); - if (props != null) { - Set propList = props.stringPropertyNames(); - for (String prop : propList) { - Element property = doc.createElement("property"); - property.setAttribute("key", prop); - property.setAttribute("value", (String) props.get(prop)); - propsElement.appendChild(property); - } - } - return propsElement; - } - - /** - * Delete queues configuration file if exists - */ - public static void deleteQueuesConfigFile() { - if (new File(QUEUES_CONFIG_FILE_PATH).exists()) { - new File(QUEUES_CONFIG_FILE_PATH).delete(); - } - } - - /** - * Write the given queueHierarchy to the given file. - * - * @param filePath - * - * @param rootQueues - * @throws Exception - */ - public static void writeQueueConfigurationFile(String filePath, - JobQueueInfo[] rootQueues) throws Exception { - Document doc = createDocument(); - Element queueElements = createQueuesNode(doc); - for (JobQueueInfo rootQ : rootQueues) { - queueElements.appendChild(QueueConfigurationParser.getQueueElement(doc, - rootQ)); - } - writeToFile(doc, filePath); - } - - static Job submitSleepJob(final int numMappers, final int numReducers, final long mapSleepTime, - final long reduceSleepTime, boolean shouldComplete, String userInfo, - String queueName, Configuration clientConf) throws IOException, - InterruptedException, ClassNotFoundException { - clientConf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME); - clientConf.set(JTConfig.JT_IPC_ADDRESS, "localhost:" - + miniMRCluster.getJobTrackerPort()); - UserGroupInformation ugi; - if (userInfo != null) { - String[] splits = userInfo.split(","); - String[] groups = new String[splits.length - 1]; - System.arraycopy(splits, 1, groups, 0, splits.length - 1); - ugi = UserGroupInformation.createUserForTesting(splits[0], groups); - } else { - ugi = UserGroupInformation.getCurrentUser(); - } - if (queueName != null) { - clientConf.set(JobContext.QUEUE_NAME, queueName); - } - final SleepJob sleep = new SleepJob(); - sleep.setConf(clientConf); - - Job job = ugi.doAs(new PrivilegedExceptionAction() { - public Job run() throws IOException { - return sleep.createJob(numMappers, numReducers, mapSleepTime, - (int) mapSleepTime, reduceSleepTime, (int) reduceSleepTime); - }}); - if (shouldComplete) { - job.waitForCompletion(false); - } else { - job.submit(); - // miniMRCluster.getJobTrackerRunner().getJobTracker().jobsToComplete()[] - Cluster cluster = new Cluster(miniMRCluster.createJobConf()); - JobStatus[] status = miniMRCluster.getJobTrackerRunner().getJobTracker() - .jobsToComplete(); - JobID id = status[status.length -1].getJobID(); - Job newJob = cluster.getJob(id); - cluster.close(); - return newJob; - } - return job; - } - - static MiniMRCluster miniMRCluster; -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java deleted file mode 100644 index 6cfcabc9b71..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/SortValidator.java +++ /dev/null @@ -1,597 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.*; -import java.net.URI; -import java.util.*; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.WritableComparator; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.mapred.lib.HashPartitioner; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.fs.*; - -/** - * A set of utilities to validate the sort of the map-reduce framework. - * This utility program has 2 main parts: - * 1. Checking the records' statistics - * a) Validates the no. of bytes and records in sort's input & output. - * b) Validates the xor of the md5's of each key/value pair. - * c) Ensures same key/value is present in both input and output. - * 2. Check individual records to ensure each record is present in both - * the input and the output of the sort (expensive on large data-sets). - * - * To run: bin/hadoop jar build/hadoop-examples.jar sortvalidate - * [-m maps] [-r reduces] [-deep] - * -sortInput sort-in-dir -sortOutput sort-out-dir - */ -public class SortValidator extends Configured implements Tool { - - static private final IntWritable sortInput = new IntWritable(1); - static private final IntWritable sortOutput = new IntWritable(2); - static public String SORT_REDUCES = - "mapreduce.sortvalidator.sort.reduce.tasks"; - static public String MAPS_PER_HOST = "mapreduce.sortvalidator.mapsperhost"; - static public String REDUCES_PER_HOST = - "mapreduce.sortvalidator.reducesperhost"; - static void printUsage() { - System.err.println("sortvalidate [-m ] [-r ] [-deep] " + - "-sortInput -sortOutput "); - System.exit(1); - } - - static private IntWritable deduceInputFile(JobConf job) { - Path[] inputPaths = FileInputFormat.getInputPaths(job); - Path inputFile = new Path(job.get(JobContext.MAP_INPUT_FILE)); - - // value == one for sort-input; value == two for sort-output - return (inputFile.getParent().equals(inputPaths[0])) ? - sortInput : sortOutput; - } - - static private byte[] pair(BytesWritable a, BytesWritable b) { - byte[] pairData = new byte[a.getLength()+ b.getLength()]; - System.arraycopy(a.getBytes(), 0, pairData, 0, a.getLength()); - System.arraycopy(b.getBytes(), 0, pairData, a.getLength(), b.getLength()); - return pairData; - } - - private static final PathFilter sortPathsFilter = new PathFilter() { - public boolean accept(Path path) { - return (path.getName().startsWith("part-")); - } - }; - - /** - * A simple map-reduce job which checks consistency of the - * MapReduce framework's sort by checking: - * a) Records are sorted correctly - * b) Keys are partitioned correctly - * c) The input and output have same no. of bytes and records. - * d) The input and output have the correct 'checksum' by xor'ing - * the md5 of each record. - * - */ - public static class RecordStatsChecker { - - /** - * Generic way to get raw data from a {@link Writable}. - */ - static class Raw { - /** - * Get raw data bytes from a {@link Writable} - * @param writable {@link Writable} object from whom to get the raw data - * @return raw data of the writable - */ - public byte[] getRawBytes(Writable writable) { - return writable.toString().getBytes(); - } - - /** - * Get number of raw data bytes of the {@link Writable} - * @param writable {@link Writable} object from whom to get the raw data - * length - * @return number of raw data bytes - */ - public int getRawBytesLength(Writable writable) { - return writable.toString().getBytes().length; - } - } - - /** - * Specialization of {@link Raw} for {@link BytesWritable}. - */ - static class RawBytesWritable extends Raw { - public byte[] getRawBytes(Writable bw) { - return ((BytesWritable)bw).getBytes(); - } - public int getRawBytesLength(Writable bw) { - return ((BytesWritable)bw).getLength(); - } - } - - /** - * Specialization of {@link Raw} for {@link Text}. - */ - static class RawText extends Raw { - public byte[] getRawBytes(Writable text) { - return ((Text)text).getBytes(); - } - public int getRawBytesLength(Writable text) { - return ((Text)text).getLength(); - } - } - - private static Raw createRaw(Class rawClass) { - if (rawClass == Text.class) { - return new RawText(); - } else if (rawClass == BytesWritable.class) { - System.err.println("Returning " + RawBytesWritable.class); - return new RawBytesWritable(); - } - return new Raw(); - } - - public static class RecordStatsWritable implements Writable { - private long bytes = 0; - private long records = 0; - private int checksum = 0; - - public RecordStatsWritable() {} - - public RecordStatsWritable(long bytes, long records, int checksum) { - this.bytes = bytes; - this.records = records; - this.checksum = checksum; - } - - public void write(DataOutput out) throws IOException { - WritableUtils.writeVLong(out, bytes); - WritableUtils.writeVLong(out, records); - WritableUtils.writeVInt(out, checksum); - } - - public void readFields(DataInput in) throws IOException { - bytes = WritableUtils.readVLong(in); - records = WritableUtils.readVLong(in); - checksum = WritableUtils.readVInt(in); - } - - public long getBytes() { return bytes; } - public long getRecords() { return records; } - public int getChecksum() { return checksum; } - } - - public static class Map extends MapReduceBase - implements Mapper { - - private IntWritable key = null; - private WritableComparable prevKey = null; - private Class keyClass; - private Partitioner partitioner = null; - private int partition = -1; - private int noSortReducers = -1; - private long recordId = -1; - - private Raw rawKey; - private Raw rawValue; - - public void configure(JobConf job) { - // 'key' == sortInput for sort-input; key == sortOutput for sort-output - key = deduceInputFile(job); - - if (key == sortOutput) { - partitioner = new HashPartitioner(); - - // Figure the 'current' partition and no. of reduces of the 'sort' - try { - URI inputURI = new URI(job.get(JobContext.MAP_INPUT_FILE)); - String inputFile = inputURI.getPath(); - // part file is of the form part-r-xxxxx - partition = Integer.valueOf(inputFile.substring( - inputFile.lastIndexOf("part") + 7)).intValue(); - noSortReducers = job.getInt(SORT_REDUCES, -1); - } catch (Exception e) { - System.err.println("Caught: " + e); - System.exit(-1); - } - } - } - - @SuppressWarnings("unchecked") - public void map(WritableComparable key, Writable value, - OutputCollector output, - Reporter reporter) throws IOException { - // Set up rawKey and rawValue on the first call to 'map' - if (recordId == -1) { - rawKey = createRaw(key.getClass()); - rawValue = createRaw(value.getClass()); - } - ++recordId; - - if (this.key == sortOutput) { - // Check if keys are 'sorted' if this - // record is from sort's output - if (prevKey == null) { - prevKey = key; - keyClass = prevKey.getClass(); - } else { - // Sanity check - if (keyClass != key.getClass()) { - throw new IOException("Type mismatch in key: expected " + - keyClass.getName() + ", received " + - key.getClass().getName()); - } - - // Check if they were sorted correctly - if (prevKey.compareTo(key) > 0) { - throw new IOException("The 'map-reduce' framework wrongly" + - " classifed (" + prevKey + ") > (" + - key + ") "+ "for record# " + recordId); - } - prevKey = key; - } - - // Check if the sorted output is 'partitioned' right - int keyPartition = - partitioner.getPartition(key, value, noSortReducers); - if (partition != keyPartition) { - throw new IOException("Partitions do not match for record# " + - recordId + " ! - '" + partition + "' v/s '" + - keyPartition + "'"); - } - } - - // Construct the record-stats and output (this.key, record-stats) - byte[] keyBytes = rawKey.getRawBytes(key); - int keyBytesLen = rawKey.getRawBytesLength(key); - byte[] valueBytes = rawValue.getRawBytes(value); - int valueBytesLen = rawValue.getRawBytesLength(value); - - int keyValueChecksum = - (WritableComparator.hashBytes(keyBytes, keyBytesLen) ^ - WritableComparator.hashBytes(valueBytes, valueBytesLen)); - - output.collect(this.key, - new RecordStatsWritable((keyBytesLen+valueBytesLen), - 1, keyValueChecksum) - ); - } - - } - - public static class Reduce extends MapReduceBase - implements Reducer { - - public void reduce(IntWritable key, Iterator values, - OutputCollector output, - Reporter reporter) throws IOException { - long bytes = 0; - long records = 0; - int xor = 0; - while (values.hasNext()) { - RecordStatsWritable stats = values.next(); - bytes += stats.getBytes(); - records += stats.getRecords(); - xor ^= stats.getChecksum(); - } - - output.collect(key, new RecordStatsWritable(bytes, records, xor)); - } - } - - public static class NonSplitableSequenceFileInputFormat - extends SequenceFileInputFormat { - protected boolean isSplitable(FileSystem fs, Path filename) { - return false; - } - } - - static void checkRecords(Configuration defaults, - Path sortInput, Path sortOutput) throws IOException { - FileSystem inputfs = sortInput.getFileSystem(defaults); - FileSystem outputfs = sortOutput.getFileSystem(defaults); - FileSystem defaultfs = FileSystem.get(defaults); - JobConf jobConf = new JobConf(defaults, RecordStatsChecker.class); - jobConf.setJobName("sortvalidate-recordstats-checker"); - - int noSortReduceTasks = - outputfs.listStatus(sortOutput, sortPathsFilter).length; - jobConf.setInt(SORT_REDUCES, noSortReduceTasks); - int noSortInputpaths = inputfs.listStatus(sortInput).length; - - jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class); - jobConf.setOutputFormat(SequenceFileOutputFormat.class); - - jobConf.setOutputKeyClass(IntWritable.class); - jobConf.setOutputValueClass(RecordStatsChecker.RecordStatsWritable.class); - - jobConf.setMapperClass(Map.class); - jobConf.setCombinerClass(Reduce.class); - jobConf.setReducerClass(Reduce.class); - - jobConf.setNumMapTasks(noSortReduceTasks); - jobConf.setNumReduceTasks(1); - - FileInputFormat.setInputPaths(jobConf, sortInput); - FileInputFormat.addInputPath(jobConf, sortOutput); - Path outputPath = new Path(new Path("/tmp", - "sortvalidate"), UUID.randomUUID().toString()); - if (defaultfs.exists(outputPath)) { - defaultfs.delete(outputPath, true); - } - FileOutputFormat.setOutputPath(jobConf, outputPath); - - // Uncomment to run locally in a single process - //job_conf.set(JTConfig.JT, "local"); - Path[] inputPaths = FileInputFormat.getInputPaths(jobConf); - System.out.println("\nSortValidator.RecordStatsChecker: Validate sort " + - "from " + inputPaths[0] + " (" + - noSortInputpaths + " files), " + - inputPaths[1] + " (" + - noSortReduceTasks + - " files) into " + - FileOutputFormat.getOutputPath(jobConf) + - " with 1 reducer."); - Date startTime = new Date(); - System.out.println("Job started: " + startTime); - JobClient.runJob(jobConf); - try { - Date end_time = new Date(); - System.out.println("Job ended: " + end_time); - System.out.println("The job took " + - (end_time.getTime() - startTime.getTime()) /1000 + " seconds."); - - // Check to ensure that the statistics of the - // framework's sort-input and sort-output match - SequenceFile.Reader stats = new SequenceFile.Reader(defaultfs, - new Path(outputPath, "part-00000"), defaults); - try { - IntWritable k1 = new IntWritable(); - IntWritable k2 = new IntWritable(); - RecordStatsWritable v1 = new RecordStatsWritable(); - RecordStatsWritable v2 = new RecordStatsWritable(); - if (!stats.next(k1, v1)) { - throw new IOException( - "Failed to read record #1 from reduce's output"); - } - if (!stats.next(k2, v2)) { - throw new IOException( - "Failed to read record #2 from reduce's output"); - } - - if ((v1.getBytes() != v2.getBytes()) || - (v1.getRecords() != v2.getRecords()) || - v1.getChecksum() != v2.getChecksum()) { - throw new IOException("(" + - v1.getBytes() + ", " + v1.getRecords() + ", " + v1.getChecksum() - + ") v/s (" + - v2.getBytes() + ", " + v2.getRecords() + ", " + v2.getChecksum() - + ")"); - } - } finally { - stats.close(); - } - } finally { - defaultfs.delete(outputPath, true); - } - } - - } - - /** - * A simple map-reduce task to check if the input and the output - * of the framework's sort is consistent by ensuring each record - * is present in both the input and the output. - * - */ - public static class RecordChecker { - - public static class Map extends MapReduceBase - implements Mapper { - - private IntWritable value = null; - - public void configure(JobConf job) { - // value == one for sort-input; value == two for sort-output - value = deduceInputFile(job); - } - - public void map(BytesWritable key, - BytesWritable value, - OutputCollector output, - Reporter reporter) throws IOException { - // newKey = (key, value) - BytesWritable keyValue = new BytesWritable(pair(key, value)); - - // output (newKey, value) - output.collect(keyValue, this.value); - } - } - - public static class Reduce extends MapReduceBase - implements Reducer { - - public void reduce(BytesWritable key, Iterator values, - OutputCollector output, - Reporter reporter) throws IOException { - int ones = 0; - int twos = 0; - while (values.hasNext()) { - IntWritable count = values.next(); - if (count.equals(sortInput)) { - ++ones; - } else if (count.equals(sortOutput)) { - ++twos; - } else { - throw new IOException("Invalid 'value' of " + count.get() + - " for (key,value): " + key.toString()); - } - } - - // Check to ensure there are equal no. of ones and twos - if (ones != twos) { - throw new IOException("Illegal ('one', 'two'): (" + ones + ", " + twos + - ") for (key, value): " + key.toString()); - } - } - } - - static void checkRecords(Configuration defaults, int noMaps, int noReduces, - Path sortInput, Path sortOutput) throws IOException { - JobConf jobConf = new JobConf(defaults, RecordChecker.class); - jobConf.setJobName("sortvalidate-record-checker"); - - jobConf.setInputFormat(SequenceFileInputFormat.class); - jobConf.setOutputFormat(SequenceFileOutputFormat.class); - - jobConf.setOutputKeyClass(BytesWritable.class); - jobConf.setOutputValueClass(IntWritable.class); - - jobConf.setMapperClass(Map.class); - jobConf.setReducerClass(Reduce.class); - - JobClient client = new JobClient(jobConf); - ClusterStatus cluster = client.getClusterStatus(); - if (noMaps == -1) { - noMaps = cluster.getTaskTrackers() * - jobConf.getInt(MAPS_PER_HOST, 10); - } - if (noReduces == -1) { - noReduces = (int) (cluster.getMaxReduceTasks() * 0.9); - String sortReduces = jobConf.get(REDUCES_PER_HOST); - if (sortReduces != null) { - noReduces = cluster.getTaskTrackers() * - Integer.parseInt(sortReduces); - } - } - jobConf.setNumMapTasks(noMaps); - jobConf.setNumReduceTasks(noReduces); - - FileInputFormat.setInputPaths(jobConf, sortInput); - FileInputFormat.addInputPath(jobConf, sortOutput); - Path outputPath = new Path("/tmp/sortvalidate/recordchecker"); - FileSystem fs = FileSystem.get(defaults); - if (fs.exists(outputPath)) { - fs.delete(outputPath, true); - } - FileOutputFormat.setOutputPath(jobConf, outputPath); - - // Uncomment to run locally in a single process - //job_conf.set(JTConfig.JT, "local"); - Path[] inputPaths = FileInputFormat.getInputPaths(jobConf); - System.out.println("\nSortValidator.RecordChecker: Running on " + - cluster.getTaskTrackers() + - " nodes to validate sort from " + - inputPaths[0] + ", " + - inputPaths[1] + " into " + - FileOutputFormat.getOutputPath(jobConf) + - " with " + noReduces + " reduces."); - Date startTime = new Date(); - System.out.println("Job started: " + startTime); - JobClient.runJob(jobConf); - Date end_time = new Date(); - System.out.println("Job ended: " + end_time); - System.out.println("The job took " + - (end_time.getTime() - startTime.getTime()) /1000 + " seconds."); - } - } - - - /** - * The main driver for sort-validator program. - * Invoke this method to submit the map/reduce job. - * @throws IOException When there is communication problems with the - * job tracker. - */ - public int run(String[] args) throws Exception { - Configuration defaults = getConf(); - - int noMaps = -1, noReduces = -1; - Path sortInput = null, sortOutput = null; - boolean deepTest = false; - for(int i=0; i < args.length; ++i) { - try { - if ("-m".equals(args[i])) { - noMaps = Integer.parseInt(args[++i]); - } else if ("-r".equals(args[i])) { - noReduces = Integer.parseInt(args[++i]); - } else if ("-sortInput".equals(args[i])){ - sortInput = new Path(args[++i]); - } else if ("-sortOutput".equals(args[i])){ - sortOutput = new Path(args[++i]); - } else if ("-deep".equals(args[i])) { - deepTest = true; - } else { - printUsage(); - return -1; - } - } catch (NumberFormatException except) { - System.err.println("ERROR: Integer expected instead of " + args[i]); - printUsage(); - return -1; - } catch (ArrayIndexOutOfBoundsException except) { - System.err.println("ERROR: Required parameter missing from " + - args[i-1]); - printUsage(); - return -1; - } - } - - // Sanity check - if (sortInput == null || sortOutput == null) { - printUsage(); - return -2; - } - - // Check if the records are consistent and sorted correctly - RecordStatsChecker.checkRecords(defaults, sortInput, sortOutput); - - // Check if the same records are present in sort's inputs & outputs - if (deepTest) { - RecordChecker.checkRecords(defaults, noMaps, noReduces, sortInput, - sortOutput); - } - - System.out.println("\nSUCCESS! Validated the MapReduce framework's 'sort'" + - " successfully."); - - return 0; - } - - public static void main(String[] args) throws Exception { - int res = ToolRunner.run(new Configuration(), new SortValidator(), args); - System.exit(res); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java deleted file mode 100644 index 92ce67bd3dd..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestAdminOperationsProtocolWithServiceAuthorization.java +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import org.apache.hadoop.mapred.tools.MRAdmin; -import org.apache.hadoop.security.authorize.PolicyProvider; -import org.apache.hadoop.security.authorize.ServiceAuthorizationManager; - -import junit.framework.TestCase; - -/** - * Test case to check if {@link AdminOperationsProtocol#refreshNodes()} and - * {@link AdminOperationsProtocol#refreshQueueAcls()} works with service-level - * authorization enabled i.e 'hadoop.security.authorization' set to true. - */ -public class TestAdminOperationsProtocolWithServiceAuthorization -extends TestCase { - public void testServiceLevelAuthorization() throws Exception { - MiniMRCluster mr = null; - try { - // Turn on service-level authorization - final JobConf conf = new JobConf(); - conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, - MapReducePolicyProvider.class, PolicyProvider.class); - conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, - true); - - // Start the mini mr cluster - mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); - - // Invoke MRAdmin commands - MRAdmin mrAdmin = new MRAdmin(mr.createJobConf()); - assertEquals(0, mrAdmin.run(new String[] { "-refreshQueues" })); - assertEquals(0, mrAdmin.run(new String[] { "-refreshNodes" })); - } finally { - if (mr != null) { - mr.shutdown(); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java deleted file mode 100644 index eb0f77c586a..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestClusterStatus.java +++ /dev/null @@ -1,270 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.ClusterMetrics; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -public class TestClusterStatus extends TestCase { - - private static String[] trackers = new String[] { "tracker_tracker1:1000", - "tracker_tracker2:1000", "tracker_tracker3:1000" }; - private static JobTracker jobTracker; - private static int mapSlotsPerTracker = 4; - private static int reduceSlotsPerTracker = 2; - private static MiniMRCluster mr; - private static FakeJobInProgress fakeJob = null; - private static Cluster cluster; - // heartbeat responseId. increment this after sending a heartbeat - private static short responseId = 1; - - public static Test suite() { - TestSetup setup = new TestSetup(new TestSuite(TestClusterStatus.class)) { - protected void setUp() throws Exception { - Configuration conf = new Configuration(); - conf.setClass(JTConfig.JT_TASK_SCHEDULER, FakeTaskScheduler.class, - TaskScheduler.class); - mr = new MiniMRCluster(0, "file:///", 1, null, null, new JobConf(conf)); - jobTracker = mr.getJobTrackerRunner().getJobTracker(); - for (String tracker : trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, tracker); - } - cluster = new Cluster(mr.createJobConf()); - } - - protected void tearDown() throws Exception { - cluster.close(); - mr.shutdown(); - } - }; - return setup; - } - - /** - * Fake scheduler to test reservations. - * - * The reservations are updated incrementally in each - * heartbeat to pass through the re-reservation logic. - */ - static class FakeTaskScheduler extends JobQueueTaskScheduler { - - private Map reservedCounts - = new HashMap(); - - public FakeTaskScheduler() { - super(); - } - - public List assignTasks(TaskTracker tt) { - int currCount = 1; - if (reservedCounts.containsKey(tt)) { - currCount = reservedCounts.get(tt) + 1; - } - reservedCounts.put(tt, currCount); - tt.reserveSlots(TaskType.MAP, fakeJob, currCount); - tt.reserveSlots(TaskType.REDUCE, fakeJob, currCount); - return new ArrayList(); - } - } - - private TaskTrackerStatus getTTStatus(String trackerName, - List taskStatuses) { - return new TaskTrackerStatus(trackerName, - JobInProgress.convertTrackerNameToHostName(trackerName), 0, - taskStatuses, 0, mapSlotsPerTracker, reduceSlotsPerTracker); - } - - public void testClusterMetrics() throws IOException, InterruptedException { - assertEquals("tasktracker count doesn't match", trackers.length, - cluster.getClusterStatus().getTaskTrackerCount()); - - List list = new ArrayList(); - - // create a map task status, which uses 2 slots. - int mapSlotsPerTask = 2; - addMapTaskAttemptToList(list, mapSlotsPerTask, TaskStatus.State.RUNNING); - - // create a reduce task status, which uses 1 slot. - int reduceSlotsPerTask = 1; - addReduceTaskAttemptToList(list, - reduceSlotsPerTask, TaskStatus.State.RUNNING); - - // create TaskTrackerStatus and send heartbeats - sendHeartbeats(list); - - // assert ClusterMetrics - ClusterMetrics metrics = cluster.getClusterStatus(); - assertEquals("occupied map slots do not match", mapSlotsPerTask, - metrics.getOccupiedMapSlots()); - assertEquals("occupied reduce slots do not match", reduceSlotsPerTask, - metrics.getOccupiedReduceSlots()); - assertEquals("map slot capacities do not match", - mapSlotsPerTracker * trackers.length, - metrics.getMapSlotCapacity()); - assertEquals("reduce slot capacities do not match", - reduceSlotsPerTracker * trackers.length, - metrics.getReduceSlotCapacity()); - assertEquals("running map tasks do not match", 1, - metrics.getRunningMaps()); - assertEquals("running reduce tasks do not match", 1, - metrics.getRunningReduces()); - - // assert the values in ClusterStatus also - assertEquals("running map tasks do not match", 1, - jobTracker.getClusterStatus().getMapTasks()); - assertEquals("running reduce tasks do not match", 1, - jobTracker.getClusterStatus().getReduceTasks()); - assertEquals("map slot capacities do not match", - mapSlotsPerTracker * trackers.length, - jobTracker.getClusterStatus().getMaxMapTasks()); - assertEquals("reduce slot capacities do not match", - reduceSlotsPerTracker * trackers.length, - jobTracker.getClusterStatus().getMaxReduceTasks()); - - // send a heartbeat finishing only a map and check - // counts are updated. - list.clear(); - addMapTaskAttemptToList(list, mapSlotsPerTask, TaskStatus.State.SUCCEEDED); - addReduceTaskAttemptToList(list, - reduceSlotsPerTask, TaskStatus.State.RUNNING); - sendHeartbeats(list); - metrics = jobTracker.getClusterMetrics(); - assertEquals(0, metrics.getOccupiedMapSlots()); - assertEquals(reduceSlotsPerTask, metrics.getOccupiedReduceSlots()); - - // send a heartbeat finishing the reduce task also. - list.clear(); - addReduceTaskAttemptToList(list, - reduceSlotsPerTask, TaskStatus.State.SUCCEEDED); - sendHeartbeats(list); - metrics = jobTracker.getClusterMetrics(); - assertEquals(0, metrics.getOccupiedReduceSlots()); - } - - private void sendHeartbeats(List list) throws IOException { - TaskTrackerStatus[] status = new TaskTrackerStatus[trackers.length]; - status[0] = getTTStatus(trackers[0], list); - status[1] = getTTStatus(trackers[1], new ArrayList()); - status[2] = getTTStatus(trackers[2], new ArrayList()); - for (int i = 0; i< trackers.length; i++) { - FakeObjectUtilities.sendHeartBeat(jobTracker, status[i], false, false, - trackers[i], responseId); - } - responseId++; - } - - private void addReduceTaskAttemptToList(List list, - int reduceSlotsPerTask, TaskStatus.State state) { - TaskStatus ts = TaskStatus.createTaskStatus(false, - new TaskAttemptID("jt", 1, TaskType.REDUCE, 0, 0), 0.0f, - reduceSlotsPerTask, - state, "", "", trackers[0], - TaskStatus.Phase.REDUCE, null); - list.add(ts); - } - - private void addMapTaskAttemptToList(List list, - int mapSlotsPerTask, TaskStatus.State state) { - TaskStatus ts = TaskStatus.createTaskStatus(true, - new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0), 0.0f, mapSlotsPerTask, - state, "", "", trackers[0], - TaskStatus.Phase.MAP, null); - list.add(ts); - } - - public void testReservedSlots() throws Exception { - Configuration conf = mr.createJobConf(); - conf.setInt(JobContext.NUM_MAPS, 1); - - Job job = Job.getInstance(conf); - job.setNumReduceTasks(1); - job.setSpeculativeExecution(false); - job.setJobSetupCleanupNeeded(false); - - //Set task tracker objects for reservation. - TaskTracker tt1 = jobTracker.getTaskTracker(trackers[0]); - TaskTracker tt2 = jobTracker.getTaskTracker(trackers[1]); - TaskTrackerStatus status1 = new TaskTrackerStatus( - trackers[0],JobInProgress.convertTrackerNameToHostName( - trackers[0]),0,new ArrayList(), 0, 2, 2); - TaskTrackerStatus status2 = new TaskTrackerStatus( - trackers[1],JobInProgress.convertTrackerNameToHostName( - trackers[1]),0,new ArrayList(), 0, 2, 2); - tt1.setStatus(status1); - tt2.setStatus(status2); - - fakeJob = new FakeJobInProgress(new JobConf(job.getConfiguration()), - jobTracker); - fakeJob.setClusterSize(3); - fakeJob.initTasks(); - - FakeObjectUtilities.sendHeartBeat(jobTracker, status1, false, - true, trackers[0], responseId); - FakeObjectUtilities.sendHeartBeat(jobTracker, status2, false, - true, trackers[1], responseId); - responseId++; - ClusterMetrics metrics = cluster.getClusterStatus(); - assertEquals("reserved map slots do not match", - 2, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 2, metrics.getReservedReduceSlots()); - - // redo to test re-reservations. - FakeObjectUtilities.sendHeartBeat(jobTracker, status1, false, - true, trackers[0], responseId); - FakeObjectUtilities.sendHeartBeat(jobTracker, status2, false, - true, trackers[1], responseId); - responseId++; - metrics = cluster.getClusterStatus(); - assertEquals("reserved map slots do not match", - 4, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 4, metrics.getReservedReduceSlots()); - - TaskAttemptID mTid = fakeJob.findMapTask(trackers[1]); - TaskAttemptID rTid = fakeJob.findReduceTask(trackers[1]); - - fakeJob.finishTask(mTid); - fakeJob.finishTask(rTid); - - assertEquals("Job didnt complete successfully complete", - fakeJob.getStatus().getRunState(), JobStatus.SUCCEEDED); - metrics = cluster.getClusterStatus(); - assertEquals("reserved map slots do not match", - 0, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 0, metrics.getReservedReduceSlots()); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java deleted file mode 100644 index bbe1c6f7313..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestCompositeTaskTrackerInstrumentation.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.junit.Test; -import static org.junit.Assert.*; - -public class TestCompositeTaskTrackerInstrumentation { - private static final Log LOG = LogFactory.getLog( - TestCompositeTaskTrackerInstrumentation.class); - - @Test - public void testCompositeInstrumentation() throws IOException { - // Create two instrumentation instances - TaskTracker tt = new TaskTracker(); - DummyTaskTrackerInstrumentation inst1 = - new DummyTaskTrackerInstrumentation(tt); - DummyTaskTrackerInstrumentation inst2 = - new DummyTaskTrackerInstrumentation(tt); - - // Add them to a composite object - ArrayList insts = - new ArrayList(); - insts.add(inst1); - insts.add(inst2); - CompositeTaskTrackerInstrumentation comp = - new CompositeTaskTrackerInstrumentation(tt, insts); - - // Create some dummy objects to pass to instrumentation methods - TaskAttemptID tid = new TaskAttemptID(); - File file = new File("file"); - Task task = new MapTask(); - TaskStatus status = new MapTaskStatus(); - - // Test that completeTask propagates to listeners - assertFalse(inst1.completeTaskCalled); - assertFalse(inst2.completeTaskCalled); - comp.completeTask(tid); - assertTrue(inst1.completeTaskCalled); - assertTrue(inst2.completeTaskCalled); - - // Test that timedoutTask propagates to listeners - assertFalse(inst1.timedoutTaskCalled); - assertFalse(inst2.timedoutTaskCalled); - comp.timedoutTask(tid); - assertTrue(inst1.timedoutTaskCalled); - assertTrue(inst2.timedoutTaskCalled); - - // Test that taskFailedPing propagates to listeners - assertFalse(inst1.taskFailedPingCalled); - assertFalse(inst2.taskFailedPingCalled); - comp.taskFailedPing(tid); - assertTrue(inst1.taskFailedPingCalled); - assertTrue(inst2.taskFailedPingCalled); - - // Test that reportTaskLaunch propagates to listeners - assertFalse(inst1.reportTaskLaunchCalled); - assertFalse(inst2.reportTaskLaunchCalled); - comp.reportTaskLaunch(tid, file, file); - assertTrue(inst1.reportTaskLaunchCalled); - assertTrue(inst2.reportTaskLaunchCalled); - - // Test that reportTaskEnd propagates to listeners - assertFalse(inst1.reportTaskEndCalled); - assertFalse(inst2.reportTaskEndCalled); - comp.reportTaskEnd(tid); - assertTrue(inst1.reportTaskEndCalled); - assertTrue(inst2.reportTaskEndCalled); - - // Test that statusUpdate propagates to listeners - assertFalse(inst1.statusUpdateCalled); - assertFalse(inst2.statusUpdateCalled); - comp.statusUpdate(task, status); - assertTrue(inst1.statusUpdateCalled); - assertTrue(inst2.statusUpdateCalled); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java deleted file mode 100644 index acf7242bd99..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestCompressedEmptyMapOutputs.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.examples.RandomWriter; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.util.ToolRunner; - -import junit.framework.TestCase; - -/** - * A JUnit test to test the Map-Reduce framework's sort in presence of - * null intermediate map-outputs, when compression is enabled for intermediate - * map-outputs. - */ -public class TestCompressedEmptyMapOutputs extends TestCase { - // Input/Output paths for sort - private static final Path SORT_INPUT_PATH = new Path("/sort/input"); - private static final Path SORT_OUTPUT_PATH = new Path("/sort/output"); - - // Knobs to control randomwriter; and hence sort - private static final int NUM_HADOOP_SLAVES = 3; - private static final int RW_BYTES_PER_MAP = 50000; - private static final int RW_MAPS_PER_HOST = 5; - - private static void runRandomWriter(JobConf job, Path sortInput) - throws Exception { - // Scale down the default settings for RandomWriter for the test-case - // Generates NUM_HADOOP_SLAVES * RW_MAPS_PER_HOST * RW_BYTES_PER_MAP -> 1MB - job.setInt(RandomWriter.BYTES_PER_MAP, RW_BYTES_PER_MAP); - job.setInt(RandomWriter.MAPS_PER_HOST, RW_MAPS_PER_HOST); - String[] rwArgs = {sortInput.toString()}; - - // Run RandomWriter - assertEquals(ToolRunner.run(job, new RandomWriter(), rwArgs), 0); - } - - - static class SinkMapper - extends MapReduceBase implements Mapper { - - public void map(K key, V val, - OutputCollector output, Reporter reporter) - throws IOException { - // Don't output anything! - if (false) output.collect(key, val); - } - } - - private static void runSort(JobConf jobConf, Path sortInput, Path sortOutput) - throws Exception { - // Set up the job - jobConf.setJobName("null-sorter"); - - jobConf.setMapperClass(SinkMapper.class); - jobConf.setReducerClass(IdentityReducer.class); - - jobConf.setNumReduceTasks(2); - - jobConf.setInputFormat(SequenceFileInputFormat.class); - jobConf.setOutputFormat(SequenceFileOutputFormat.class); - - jobConf.setOutputKeyClass(BytesWritable.class); - jobConf.setOutputValueClass(BytesWritable.class); - - FileInputFormat.setInputPaths(jobConf, sortInput); - FileOutputFormat.setOutputPath(jobConf, sortOutput); - - // Compress the intermediate map-outputs! - jobConf.setCompressMapOutput(true); - - // Run the job - JobClient.runJob(jobConf); - } - - public void testMapReduceSortWithCompressedEmptyMapOutputs() - throws Exception { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - Configuration conf = new Configuration(); - - // Start the mini-MR and mini-DFS clusters - dfs = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); - fileSys = dfs.getFileSystem(); - mr = new MiniMRCluster(NUM_HADOOP_SLAVES, fileSys.getUri().toString(), 1); - - // Run randomwriter to generate input for 'sort' - runRandomWriter(mr.createJobConf(), SORT_INPUT_PATH); - - // Run sort - runSort(mr.createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); - } - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java deleted file mode 100644 index f71a9a20829..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestControlledMapReduceJob.java +++ /dev/null @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.util.Properties; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.ControlledMapReduceJob.ControlledMapReduceJobRunner; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; - -/** - * Test to verify the controlled behavior of a ControlledMapReduceJob. - * - */ -public class TestControlledMapReduceJob extends ClusterMapReduceTestCase { - static final Log LOG = LogFactory.getLog(TestControlledMapReduceJob.class); - - /** - * Starts a job with 5 maps and 5 reduces. Then controls the finishing of - * tasks. Signals finishing tasks in batches and then verifies their - * completion. - * - * @throws Exception - */ - public void testControlledMapReduceJob() - throws Exception { - - Properties props = new Properties(); - props.setProperty(TTConfig.TT_MAP_SLOTS, "2"); - props.setProperty(TTConfig.TT_REDUCE_SLOTS, "2"); - startCluster(true, props); - LOG.info("Started the cluster"); - - ControlledMapReduceJobRunner jobRunner = - ControlledMapReduceJobRunner - .getControlledMapReduceJobRunner(createJobConf(), 7, 6); - jobRunner.start(); - ControlledMapReduceJob controlledJob = jobRunner.getJob(); - JobInProgress jip = - getMRCluster().getJobTrackerRunner().getJobTracker().getJob( - jobRunner.getJobID()); - - ControlledMapReduceJob.waitTillNTasksStartRunning(jip, true, 4); - LOG.info("Finishing 3 maps"); - controlledJob.finishNTasks(true, 3); - ControlledMapReduceJob.waitTillNTotalTasksFinish(jip, true, 3); - - ControlledMapReduceJob.waitTillNTasksStartRunning(jip, true, 4); - LOG.info("Finishing 4 more maps"); - controlledJob.finishNTasks(true, 4); - ControlledMapReduceJob.waitTillNTotalTasksFinish(jip, true, 7); - - ControlledMapReduceJob.waitTillNTasksStartRunning(jip, false, 4); - LOG.info("Finishing 2 reduces"); - controlledJob.finishNTasks(false, 2); - ControlledMapReduceJob.waitTillNTotalTasksFinish(jip, false, 2); - - ControlledMapReduceJob.waitTillNTasksStartRunning(jip, false, 4); - LOG.info("Finishing 4 more reduces"); - controlledJob.finishNTasks(false, 4); - ControlledMapReduceJob.waitTillNTotalTasksFinish(jip, false, 6); - - jobRunner.join(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java deleted file mode 100644 index 6c17da5b987..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestDebugScript.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; -import java.io.PrintWriter; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.security.Groups; -import org.apache.hadoop.security.ShellBasedUnixGroupsMapping; -import org.apache.hadoop.security.UserGroupInformation; -import static org.junit.Assert.*; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class TestDebugScript { - - // base directory which is used by the debug script - private static final String BASE_DIR = new File(System.getProperty( - "test.build.data", "/tmp")).getAbsolutePath(); - - // script directory which is passed as dummy input + where debugscript - // is written. - private static final String SCRIPT_DIR = new File(BASE_DIR, "debugscript") - .getAbsolutePath(); - - // script which is used as debug script. - private static final String SCRIPT_FILE = new File(SCRIPT_DIR, - "debugscript.sh").getAbsolutePath(); - - // making an assumption we have bash in path. Same as TaskLog. - // The debug script just accesses the stderr file of the task - // and does a 'cat' on it - private static final String SCRIPT_CONTENT = "cat $2"; - - @Before - public void setup() throws Exception { - setupDebugScriptDirs(); - } - - @After - public void tearDown() throws Exception { - cleanupDebugScriptDirs(); - } - - /** - * Cleanup method which is used to delete the files folder which are generated - * by the testcase. - * - */ - static void cleanupDebugScriptDirs() { - File scriptFile = new File(SCRIPT_FILE); - scriptFile.delete(); - File scriptDir = new File(SCRIPT_DIR); - scriptDir.delete(); - } - - /** - * Setup method which is used to create necessary files and folder for the - * testcase. - * - * @throws Exception - */ - static void setupDebugScriptDirs() throws Exception { - File scriptDir = new File(SCRIPT_DIR); - if (!scriptDir.exists()) { - scriptDir.mkdirs(); - } - scriptDir.setExecutable(true, false); - scriptDir.setReadable(true, false); - scriptDir.setWritable(true, false); - File scriptFile = new File(SCRIPT_FILE); - PrintWriter writer = new PrintWriter(scriptFile); - writer.println(SCRIPT_CONTENT); - writer.flush(); - writer.close(); - scriptFile.setExecutable(true, false); - scriptFile.setReadable(true, false); - } - - /** - * Main test case which checks proper execution of the testcase. - * - * @throws Exception - */ - @Test - public void testDebugScript() throws Exception { - JobConf conf = new JobConf(); - conf.setLong(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL, 0L); - MiniMRCluster mrCluster = new MiniMRCluster(1, "file:///", 1, null, null, conf); - Path inputPath = new Path(SCRIPT_DIR); - Path outputPath = new Path(SCRIPT_DIR, "task_output"); - - // Run a failing mapper so debug script is launched. - JobID jobId = runFailingMapJob(mrCluster.createJobConf(), inputPath, - outputPath); - // construct the task id of first map task of failmap - TaskAttemptID taskId = new TaskAttemptID( - new TaskID(jobId,TaskType.MAP, 0), 0); - // verify if debug script was launched correctly and ran correctly. - verifyDebugScriptOutput(taskId); - } - - /** - * Method which verifies if debug script ran and ran correctly. - * - * @param taskId - * @param expectedUser - * expected user id from debug script - * @throws Exception - */ - static void verifyDebugScriptOutput(TaskAttemptID taskId) throws Exception { - verifyDebugScriptOutput(taskId, null, null, null); - } - /** - * Method which verifies if debug script ran and ran correctly. - * - * @param taskId - * @param expectedUser - * expected user id from debug script - * @param expectedPerms the expected permissions on the debugout file - * @throws Exception - */ - static void verifyDebugScriptOutput(TaskAttemptID taskId, String expectedUser, - String expectedGroup, String expectedPerms) throws Exception { - File output = TaskLog.getRealTaskLogFileLocation(taskId, false, - TaskLog.LogName.DEBUGOUT); - // Check the presence of the output file if the script is to be run. - assertTrue("Output file does not exists. DebugScript has not been run", - output.exists()); - // slurp the output from file, which is one line - BufferedReader reader = new BufferedReader(new FileReader(output)); - String out = reader.readLine(); - // close the file. - reader.close(); - // Check if there is any output - assertNotNull("DebugScript didn't generate output.", out); - assertTrue(out.contains("failing map")); - if (expectedPerms != null && expectedUser != null) { - //check whether the debugout file ownership/permissions are as expected - TestTaskTrackerLocalization.checkFilePermissions(output.getAbsolutePath(), - expectedPerms, expectedUser, expectedGroup); - } - } - - /** - * Method to run a failing mapper on a given Cluster. - * - * @param conf - * the JobConf for the job - * @param inputPath - * input path for the job. - * @param outputDir - * output directory for job. - * @throws IOException - */ - static JobID runFailingMapJob(JobConf conf, Path inputPath, Path outputDir) - throws IOException { - conf.setMapDebugScript(SCRIPT_FILE); - conf.setMaxMapAttempts(0); - conf.set("mapred.committer.job.setup.cleanup.needed", "false"); - RunningJob rJob = UtilsForTests.runJobFail(conf, inputPath, outputDir); - return rJob.getID(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java deleted file mode 100644 index 81d22afa435..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestEmptyJob.java +++ /dev/null @@ -1,255 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.net.InetAddress; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.lib.IdentityMapper; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; - -/** - * A JUnit test to test Map-Reduce empty jobs. - */ -public class TestEmptyJob extends TestCase { - private static final Log LOG = - LogFactory.getLog(TestEmptyJob.class.getName()); - - private static String TEST_ROOT_DIR = - new File(System.getProperty("test.build.data", "/tmp")).toURI() - .toString().replace(' ', '+'); - - MiniMRCluster mr = null; - - /** Committer with commit waiting on a signal - */ - static class CommitterWithDelayCommit extends FileOutputCommitter { - @Override - public void commitJob(JobContext context) throws IOException { - Configuration conf = context.getConfiguration(); - Path share = new Path(conf.get("share")); - FileSystem fs = FileSystem.get(conf); - - - while (true) { - if (fs.exists(share)) { - break; - } - UtilsForTests.waitFor(100); - } - super.commitJob(context); - } - } - - /** - * Simple method running a MapReduce job with no input data. Used to test that - * such a job is successful. - * - * @param fileSys - * @param numMaps - * @param numReduces - * @return true if the MR job is successful, otherwise false - * @throws IOException - */ - private boolean launchEmptyJob(URI fileSys, int numMaps, int numReduces) - throws IOException { - // create an empty input dir - final Path inDir = new Path(TEST_ROOT_DIR, "testing/empty/input"); - final Path outDir = new Path(TEST_ROOT_DIR, "testing/empty/output"); - final Path inDir2 = new Path(TEST_ROOT_DIR, "testing/dummy/input"); - final Path outDir2 = new Path(TEST_ROOT_DIR, "testing/dummy/output"); - final Path share = new Path(TEST_ROOT_DIR, "share"); - - JobConf conf = mr.createJobConf(); - FileSystem fs = FileSystem.get(fileSys, conf); - fs.delete(new Path(TEST_ROOT_DIR), true); - fs.delete(outDir, true); - if (!fs.mkdirs(inDir)) { - LOG.warn("Can't create " + inDir); - return false; - } - - // use WordCount example - FileSystem.setDefaultUri(conf, fileSys); - conf.setJobName("empty"); - // use an InputFormat which returns no split - conf.setInputFormat(EmptyInputFormat.class); - conf.setOutputCommitter(CommitterWithDelayCommit.class); - conf.setOutputKeyClass(Text.class); - conf.setOutputValueClass(IntWritable.class); - conf.setMapperClass(IdentityMapper.class); - conf.setReducerClass(IdentityReducer.class); - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReduces); - conf.set("share", share.toString()); - - // run job and wait for completion - JobClient jc = new JobClient(conf); - RunningJob runningJob = jc.submitJob(conf); - JobInProgress job = mr.getJobTrackerRunner().getJobTracker().getJob(runningJob.getID()); - - InetAddress ip = InetAddress.getLocalHost(); - if (ip != null) { - assertTrue(job.getJobSubmitHostAddress().equalsIgnoreCase( - ip.getHostAddress())); - assertTrue(job.getJobSubmitHostName().equalsIgnoreCase(ip.getHostName())); - } - - while (true) { - if (job.isCleanupLaunched()) { - LOG.info("Waiting for cleanup to be launched for job " - + runningJob.getID()); - break; - } - UtilsForTests.waitFor(100); - } - - // submit another job so that the map load increases and scheduling happens - LOG.info("Launching dummy job "); - RunningJob dJob = null; - try { - JobConf dConf = new JobConf(conf); - dConf.setOutputCommitter(FileOutputCommitter.class); - dJob = UtilsForTests.runJob(dConf, inDir2, outDir2, 2, 0); - } catch (Exception e) { - LOG.info("Exception ", e); - throw new IOException(e); - } - - while (true) { - LOG.info("Waiting for job " + dJob.getID() + " to complete"); - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - if (dJob.isComplete()) { - break; - } - } - - // check if the second job is successful - assertTrue(dJob.isSuccessful()); - - // signal the cleanup - fs.create(share).close(); - - while (true) { - LOG.info("Waiting for job " + runningJob.getID() + " to complete"); - try { - Thread.sleep(100); - } catch (InterruptedException e) { - } - if (runningJob.isComplete()) { - break; - } - } - - assertTrue(runningJob.isComplete()); - assertTrue(runningJob.isSuccessful()); - JobID jobID = runningJob.getID(); - - TaskReport[] jobSetupTasks = jc.getSetupTaskReports(jobID); - assertTrue("Number of job-setup tips is not 2!", jobSetupTasks.length == 2); - assertTrue("Setup progress is " + runningJob.setupProgress() - + " and not 1.0", runningJob.setupProgress() == 1.0); - assertTrue("Setup task is not finished!", mr.getJobTrackerRunner() - .getJobTracker().getJob(jobID).isSetupFinished()); - - assertTrue("Number of maps is not zero!", jc.getMapTaskReports(runningJob - .getID()).length == 0); - assertTrue( - "Map progress is " + runningJob.mapProgress() + " and not 1.0!", - runningJob.mapProgress() == 1.0); - - assertTrue("Reduce progress is " + runningJob.reduceProgress() - + " and not 1.0!", runningJob.reduceProgress() == 1.0); - assertTrue("Number of reduces is not " + numReduces, jc - .getReduceTaskReports(runningJob.getID()).length == numReduces); - - TaskReport[] jobCleanupTasks = jc.getCleanupTaskReports(jobID); - assertTrue("Number of job-cleanup tips is not 2!", - jobCleanupTasks.length == 2); - assertTrue("Cleanup progress is " + runningJob.cleanupProgress() - + " and not 1.0", runningJob.cleanupProgress() == 1.0); - - assertTrue("Job output directory doesn't exit!", fs.exists(outDir)); - FileStatus[] list = fs.listStatus(outDir, - new Utils.OutputFileUtils.OutputFilesFilter()); - assertTrue("Number of part-files is " + list.length + " and not " - + numReduces, list.length == numReduces); - - // cleanup - fs.delete(outDir, true); - - // return job result - LOG.info("job is complete: " + runningJob.isSuccessful()); - return (runningJob.isSuccessful()); - } - - /** - * Test that a job with no input data (and thus with no input split and no map - * task to execute) is successful. - * - * @throws IOException - */ - public void testEmptyJob() - throws IOException { - FileSystem fileSys = null; - try { - final int taskTrackers = 2; - JobConf conf = new JobConf(); - fileSys = FileSystem.get(conf); - - conf.set(JTConfig.JT_IPC_HANDLER_COUNT, "1"); - conf.set(JTConfig.JT_IPC_ADDRESS, "127.0.0.1:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "127.0.0.1:0"); - conf.set(TTConfig.TT_HTTP_ADDRESS, "127.0.0.1:0"); - - mr = - new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, - null, null, conf); - - assertTrue(launchEmptyJob(fileSys.getUri(), 3, 1)); - assertTrue(launchEmptyJob(fileSys.getUri(), 0, 0)); - } finally { - if (fileSys != null) { - fileSys.close(); - } - if (mr != null) { - mr.shutdown(); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java deleted file mode 100644 index cdbd96f5e59..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobDirCleanup.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.security.UserGroupInformation; - -public class TestJobDirCleanup extends TestCase { - //The testcase brings up a cluster with many trackers, and - //runs a job with a single map and many reduces. The check is - //to see whether the job directories are cleaned up at the - //end of the job (indirectly testing whether all tasktrackers - //got a KillJobAction). - private JobID runSleepJob(JobConf conf) throws Exception { - SleepJob sleep = new SleepJob(); - sleep.setConf(conf); - Job job = sleep.createJob(1, 10, 1000, 1, 10000, 1); - job.waitForCompletion(true); - return job.getJobID(); - } - - public void testJobDirCleanup() throws Exception { - String namenode = null; - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - final int taskTrackers = 10; - Configuration conf = new Configuration(); - JobConf mrConf = new JobConf(); - mrConf.set(TTConfig.TT_REDUCE_SLOTS, "1"); - dfs = new MiniDFSCluster(conf, 1, true, null); - fileSys = dfs.getFileSystem(); - namenode = fileSys.getUri().toString(); - mr = new MiniMRCluster(10, namenode, 3, - null, null, mrConf); - // make cleanup inline sothat validation of existence of these directories - // can be done - mr.setInlineCleanupThreads(); - - // run the sleep job - JobConf jobConf = mr.createJobConf(); - JobID jobid = runSleepJob(jobConf); - - // verify the job directories are cleaned up. - verifyJobDirCleanup(mr, taskTrackers, jobid); - } finally { - if (fileSys != null) { fileSys.close(); } - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); } - } - } - - static void verifyJobDirCleanup(MiniMRCluster mr, int numTT, JobID jobid) - throws IOException { - // wait till killJobAction is sent to all trackers. - // this loops waits atmost for 10 seconds - boolean sent = true; - for (int i = 0; i < 100; i++) { - sent = true; - for (int j = 0; j < numTT; j++) { - if (mr.getTaskTrackerRunner(j).getTaskTracker().getRunningJob( - org.apache.hadoop.mapred.JobID.downgrade(jobid)) != null) { - sent = false; - break; - } - } - if (!sent) { - UtilsForTests.waitFor(100); - } else { - break; - } - } - - assertTrue("KillJobAction not sent for all trackers", sent); - String user = UserGroupInformation.getCurrentUser().getShortUserName(); - String jobDirStr = TaskTracker.getLocalJobDir(user, jobid.toString()); - for(int i=0; i < numTT; ++i) { - for (String localDir : mr.getTaskTrackerLocalDirs(i)) { - File jobDir = new File(localDir, jobDirStr); - assertFalse(jobDir + " is not cleaned up.", jobDir.exists()); - } - } - } -} - - diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java deleted file mode 100644 index 6efe0b6f5e7..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistory.java +++ /dev/null @@ -1,1047 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.*; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.Counters; -import org.apache.hadoop.mapreduce.JobACL; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.jobhistory.JobHistory; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; -import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo; -import org.apache.hadoop.net.Node; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authorize.AccessControlList; - -/** - * - * testJobHistoryFile - * Run a job that will be succeeded and validate its history file format and - * content. - * - * testJobHistoryJobStatus - * Run jobs that will be (1) succeeded (2) failed (3) killed. - * Validate job status read from history file in each case. - * - */ -public class TestJobHistory extends TestCase { - private static final Log LOG = LogFactory.getLog(TestJobHistory.class); - - private static String TEST_ROOT_DIR = new File(System.getProperty( - "test.build.data", "/tmp")).toURI().toString().replace(' ', '+'); - - private static final String LOG_DIR = System.getProperty("hadoop.log.dir"); - - private static final String LOCAL_LOG_DIR_URI = new File(LOG_DIR).toURI() - .toString().replace(' ', '+') + "/history"; - - private static final String DIGITS = "[0-9]+"; - - // hostname like /default-rack/host1.foo.com OR host1.foo.com - private static final Pattern hostNamePattern = Pattern.compile( - "(/(([\\w\\-\\.]+)/)+)?([\\w\\-\\.]+)"); - - private static final String IP_ADDR = - "\\d\\d?\\d?\\.\\d\\d?\\d?\\.\\d\\d?\\d?\\.\\d\\d?\\d?"; - - private static final Pattern trackerNamePattern = Pattern.compile( - "tracker_" + hostNamePattern + ":([\\w\\-\\.]+)/" + - IP_ADDR + ":" + DIGITS); - - - private static final Pattern splitsPattern = Pattern.compile( - hostNamePattern + "(," + hostNamePattern + ")*"); - - private static Map> taskIDsToAttemptIDs = - new HashMap>(); - - //Each Task End seen from history file is added here - private static List taskEnds = new ArrayList(); - - - // Validate Format of Job Level Keys, Values read from history file - private static void validateJobLevelKeyValuesFormat(JobInfo jobInfo, - String status) { - long submitTime = jobInfo.getSubmitTime(); - long launchTime = jobInfo.getLaunchTime(); - long finishTime = jobInfo.getFinishTime(); - - assertTrue("Invalid submit time", submitTime > 0); - assertTrue("SubmitTime > LaunchTime", submitTime <= launchTime); - assertTrue("LaunchTime > FinishTime", launchTime <= finishTime); - - String stat = jobInfo.getJobStatus(); - - assertTrue("Unexpected JOB_STATUS \"" + stat + "\" is seen in" + - " history file", (status.equals(stat))); - String priority = jobInfo.getPriority(); - - assertNotNull(priority); - assertTrue("Unknown priority for the job in history file", - (priority.equals("HIGH") || - priority.equals("LOW") || priority.equals("NORMAL") || - priority.equals("VERY_HIGH") || priority.equals("VERY_LOW"))); - } - - // Validate Format of Task Level Keys, Values read from history file - private static void validateTaskLevelKeyValuesFormat(JobInfo job, - boolean splitsCanBeEmpty) { - Map tasks = job.getAllTasks(); - - // validate info of each task - for (TaskInfo task : tasks.values()) { - - TaskID tid = task.getTaskId(); - long startTime = task.getStartTime(); - assertTrue("Invalid Start time", startTime > 0); - - long finishTime = task.getFinishTime(); - assertTrue("Task FINISH_TIME is < START_TIME in history file", - startTime < finishTime); - - // Make sure that the Task type exists and it is valid - TaskType type = task.getTaskType(); - assertTrue("Unknown Task type \"" + type + "\" is seen in " + - "history file for task " + tid, - (type.equals(TaskType.MAP) || - type.equals(TaskType.REDUCE) || - type.equals(TaskType.JOB_CLEANUP) || - type.equals(TaskType.JOB_SETUP))); - - if (type.equals(TaskType.MAP)) { - String splits = task.getSplitLocations(); - //order in the condition OR check is important here - if (!splitsCanBeEmpty || splits.length() != 0) { - Matcher m = splitsPattern.matcher(splits); - assertTrue("Unexpected format of SPLITS \"" + splits + "\" is seen" + - " in history file for task " + tid, m.matches()); - } - } - - // Validate task status - String status = task.getTaskStatus(); - assertTrue("Unexpected TASK_STATUS \"" + status + "\" is seen in" + - " history file for task " + tid, (status.equals("SUCCEEDED") || - status.equals("FAILED") || status.equals("KILLED"))); - } - } - - // Validate foramt of Task Attempt Level Keys, Values read from history file - private static void validateTaskAttemptLevelKeyValuesFormat(JobInfo job) { - Map tasks = job.getAllTasks(); - - // For each task - for (TaskInfo task : tasks.values()) { - // validate info of each attempt - for (TaskAttemptInfo attempt : task.getAllTaskAttempts().values()) { - - TaskAttemptID id = attempt.getAttemptId(); - assertNotNull(id); - - long startTime = attempt.getStartTime(); - assertTrue("Invalid Start time", startTime > 0); - - long finishTime = attempt.getFinishTime(); - assertTrue("Task FINISH_TIME is < START_TIME in history file", - startTime < finishTime); - - // Make sure that the Task type exists and it is valid - TaskType type = attempt.getTaskType(); - assertTrue("Unknown Task type \"" + type + "\" is seen in " + - "history file for task attempt " + id, - (type.equals(TaskType.MAP) || type.equals(TaskType.REDUCE) || - type.equals(TaskType.JOB_CLEANUP) || - type.equals(TaskType.JOB_SETUP))); - - // Validate task status - String status = attempt.getTaskStatus(); - assertTrue("Unexpected TASK_STATUS \"" + status + "\" is seen in" + - " history file for task attempt " + id, - (status.equals(TaskStatus.State.SUCCEEDED.toString()) || - status.equals(TaskStatus.State.FAILED.toString()) || - status.equals(TaskStatus.State.KILLED.toString()))); - - // Successful Reduce Task Attempts should have valid SHUFFLE_FINISHED - // time and SORT_FINISHED time - if (type.equals(TaskType.REDUCE) && - status.equals(TaskStatus.State.SUCCEEDED.toString())) { - long shuffleFinishTime = attempt.getShuffleFinishTime(); - assertTrue(startTime < shuffleFinishTime); - - long sortFinishTime = attempt.getSortFinishTime(); - assertTrue(shuffleFinishTime < sortFinishTime); - } - else if (type.equals(TaskType.MAP) && - status.equals(TaskStatus.State.SUCCEEDED.toString())) { - // Successful MAP Task Attempts should have valid MAP_FINISHED time - long mapFinishTime = attempt.getMapFinishTime(); - assertTrue(startTime < mapFinishTime); - } - - // check if hostname is valid - String hostname = attempt.getHostname(); - Matcher m = hostNamePattern.matcher(hostname); - assertTrue("Unexpected Host name of task attempt " + id, m.matches()); - - // check if trackername is valid - String trackerName = attempt.getTrackerName(); - m = trackerNamePattern.matcher(trackerName); - assertTrue("Unexpected tracker name of task attempt " + id, - m.matches()); - - if (!status.equals("KILLED")) { - // check if http port is valid - int httpPort = attempt.getHttpPort(); - assertTrue(httpPort > 0); - } - - // check if counters are parsable - Counters counters = attempt.getCounters(); - assertNotNull(counters); - } - } - } - - /** - * Returns the conf file name in the same - * @param path path of the jobhistory file - * @param running whether the job is running or completed - */ - private static Path getPathForConf(Path path, Path dir) { - String parts[] = path.getName().split("_"); - //TODO this is a hack :( - // jobtracker-hostname_jobtracker-identifier_ - String id = parts[0] + "_" + parts[1] + "_" + parts[2]; - return new Path(dir, id + "_conf.xml"); - } - - /** - * Validates the format of contents of history file - * (1) history file exists and in correct location - * (2) Verify if the history file is parsable - * (3) Validate the contents of history file - * (a) Format of all TIMEs are checked against a regex - * (b) validate legality/format of job level key, values - * (c) validate legality/format of task level key, values - * (d) validate legality/format of attempt level key, values - * (e) check if all the TaskAttempts, Tasks started are finished. - * Check finish of each TaskAttemptID against its start to make sure - * that all TaskAttempts, Tasks started are indeed finished and the - * history log lines are in the proper order. - * We want to catch ordering of history lines like - * Task START - * Attempt START - * Task FINISH - * Attempt FINISH - * (speculative execution is turned off for this). - * @param id job id - * @param conf job conf - */ - public static void validateJobHistoryFileFormat(JobHistory jobHistory, - JobID id, JobConf conf, - String status, boolean splitsCanBeEmpty) throws IOException { - - // Get the history file name - Path dir = jobHistory.getCompletedJobHistoryLocation(); - String logFileName = getDoneFile(jobHistory, conf, id, dir); - - // Framework history log file location - Path logFile = new Path(dir, logFileName); - FileSystem fileSys = logFile.getFileSystem(conf); - - // Check if the history file exists - assertTrue("History file does not exist", fileSys.exists(logFile)); - - JobHistoryParser parser = new JobHistoryParser(fileSys, - logFile.toUri().getPath()); - JobHistoryParser.JobInfo jobInfo = parser.parse(); - - // validate format of job level key, values - validateJobLevelKeyValuesFormat(jobInfo, status); - - // validate format of task level key, values - validateTaskLevelKeyValuesFormat(jobInfo, splitsCanBeEmpty); - - // validate format of attempt level key, values - validateTaskAttemptLevelKeyValuesFormat(jobInfo); - - // check if all the TaskAttempts, Tasks started are finished for - // successful jobs - if (status.equals("SUCCEEDED")) { - // Make sure that the lists in taskIDsToAttemptIDs are empty. - for(Iterator it = - taskIDsToAttemptIDs.keySet().iterator();it.hasNext();) { - String taskid = it.next(); - assertTrue("There are some Tasks which are not finished in history " + - "file.", taskEnds.contains(taskid)); - List attemptIDs = taskIDsToAttemptIDs.get(taskid); - if(attemptIDs != null) { - assertTrue("Unexpected. TaskID " + taskid + " has task attempt(s)" + - " that are not finished.", (attemptIDs.size() == 1)); - } - } - } - } - - // Validate Job Level Keys, Values read from history file by - // comparing them with the actual values from JT. - private static void validateJobLevelKeyValues(MiniMRCluster mr, - RunningJob job, JobInfo jobInfo, JobConf conf) throws IOException { - - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = jt.getJob(job.getID()); - - assertTrue("SUBMIT_TIME of job obtained from history file did not " + - "match the expected value", jip.getStartTime() == - jobInfo.getSubmitTime()); - - assertTrue("LAUNCH_TIME of job obtained from history file did not " + - "match the expected value", jip.getLaunchTime() == - jobInfo.getLaunchTime()); - - assertTrue("FINISH_TIME of job obtained from history file did not " + - "match the expected value", jip.getFinishTime() == - jobInfo.getFinishTime()); - - assertTrue("Job Status of job obtained from history file did not " + - "match the expected value", - jobInfo.getJobStatus().equals("SUCCEEDED")); - - assertTrue("Job Priority of job obtained from history file did not " + - "match the expected value", jip.getPriority().toString().equals( - jobInfo.getPriority())); - - assertTrue("Job Name of job obtained from history file did not " + - "match the expected value", - conf.getJobName().equals( - jobInfo.getJobname())); - String user = UserGroupInformation.getCurrentUser().getUserName(); - assertTrue("User Name of job obtained from history file did not " + - "match the expected value", - user.equals( - jobInfo.getUsername())); - - // Validate job counters - Counters c = new Counters(jip.getCounters()); - Counters jiCounters = jobInfo.getTotalCounters(); - assertTrue("Counters of job obtained from history file did not " + - "match the expected value", - c.equals(jiCounters)); - - // Validate number of total maps, total reduces, finished maps, - // finished reduces, failed maps, failed recudes - assertTrue("Unexpected number of total maps in history file", - jobInfo.getTotalMaps() == jip.desiredMaps()); - - assertTrue("Unexpected number of total reduces in history file", - jobInfo.getTotalReduces() == jip.desiredReduces()); - - assertTrue("Unexpected number of finished maps in history file", - jobInfo.getFinishedMaps() == jip.finishedMaps()); - - assertTrue("Unexpected number of finished reduces in history file", - jobInfo.getFinishedReduces() == jip.finishedReduces()); - - assertTrue("Unexpected number of failed maps in history file", - jobInfo.getFailedMaps() == jip.failedMapTasks); - - assertTrue("Unexpected number of failed reduces in history file", - jobInfo.getFailedReduces() == jip.failedReduceTasks); - } - - // Validate Task Level Keys, Values read from history file by - // comparing them with the actual values from JT. - private static void validateTaskLevelKeyValues(MiniMRCluster mr, - RunningJob job, JobInfo jobInfo) throws IOException { - - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = jt.getJob(job.getID()); - - // Get the 1st map, 1st reduce, cleanup & setup taskIDs and - // validate their history info - TaskID mapTaskId = new TaskID(job.getID(), TaskType.MAP, 0); - TaskID reduceTaskId = new TaskID(job.getID(), TaskType.REDUCE, 0); - - TaskInProgress cleanups[] = jip.cleanup; - TaskID cleanupTaskId; - if (cleanups[0].isComplete()) { - cleanupTaskId = cleanups[0].getTIPId(); - } - else { - cleanupTaskId = cleanups[1].getTIPId(); - } - - TaskInProgress setups[] = jip.setup; - TaskID setupTaskId; - if (setups[0].isComplete()) { - setupTaskId = setups[0].getTIPId(); - } - else { - setupTaskId = setups[1].getTIPId(); - } - - Map tasks = jobInfo.getAllTasks(); - - // validate info of the 4 tasks(cleanup, setup, 1st map, 1st reduce) - - for (TaskInfo task : tasks.values()) { - TaskID tid = task.getTaskId(); - - if (tid.equals(mapTaskId) || - tid.equals(reduceTaskId) || - tid.equals(cleanupTaskId) || - tid.equals(setupTaskId)) { - - TaskInProgress tip = jip.getTaskInProgress - (org.apache.hadoop.mapred.TaskID.downgrade(tid)); - assertTrue("START_TIME of Task " + tid + " obtained from history " + - "file did not match the expected value", - tip.getExecStartTime() == - task.getStartTime()); - - assertTrue("FINISH_TIME of Task " + tid + " obtained from history " + - "file did not match the expected value", - tip.getExecFinishTime() == - task.getFinishTime()); - - if (tid == mapTaskId) {//check splits only for map task - assertTrue("Splits of Task " + tid + " obtained from history file " + - " did not match the expected value", - tip.getSplitNodes().equals(task.getSplitLocations())); - } - - TaskAttemptID attemptId = tip.getSuccessfulTaskid(); - TaskStatus ts = tip.getTaskStatus( - org.apache.hadoop.mapred.TaskAttemptID.downgrade(attemptId)); - - // Validate task counters - Counters c = new Counters(ts.getCounters()); - assertTrue("Counters of Task " + tid + " obtained from history file " + - " did not match the expected value", - c.equals(task.getCounters())); - } - } - } - - // Validate Task Attempt Level Keys, Values read from history file by - // comparing them with the actual values from JT. - private static void validateTaskAttemptLevelKeyValues(MiniMRCluster mr, - RunningJob job, JobInfo jobInfo) throws IOException { - - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = jt.getJob(job.getID()); - - Map tasks = jobInfo.getAllTasks(); - - // For each task - for (TaskInfo task : tasks.values()) { - // validate info of each attempt - for (TaskAttemptInfo attempt : task.getAllTaskAttempts().values()) { - - TaskAttemptID attemptId = attempt.getAttemptId(); - TaskID tid = attemptId.getTaskID(); - - TaskInProgress tip = jip.getTaskInProgress - (org.apache.hadoop.mapred.TaskID.downgrade(tid)); - - TaskStatus ts = tip.getTaskStatus( - org.apache.hadoop.mapred.TaskAttemptID.downgrade(attemptId)); - - // Validate task attempt start time - assertTrue("START_TIME of Task attempt " + attemptId + - " obtained from " + - "history file did not match the expected value", - ts.getStartTime() == attempt.getStartTime()); - - // Validate task attempt finish time - assertTrue("FINISH_TIME of Task attempt " + attemptId + - " obtained from " + - "history file " + ts.getFinishTime() + - " did not match the expected value, " + - attempt.getFinishTime(), - ts.getFinishTime() == attempt.getFinishTime()); - - - TaskTrackerStatus ttStatus = - jt.getTaskTrackerStatus(ts.getTaskTracker()); - - if (ttStatus != null) { - assertTrue("http port of task attempt " + attemptId + - " obtained from " + - "history file did not match the expected value", - ttStatus.getHttpPort() == - attempt.getHttpPort()); - - if (attempt.getTaskStatus().equals("SUCCEEDED")) { - Node node = jt.getNode(ttStatus.getHost()); - String ttHostname = node.getName(); - - // check if hostname is valid - assertTrue("Host name : " + attempt.getHostname() + " of task attempt " + attemptId + - " obtained from" + - " history file did not match the expected value " + ttHostname, - ttHostname.equals(attempt.getHostname())); - } - } - if (attempt.getTaskStatus().equals("SUCCEEDED")) { - // Validate SHUFFLE_FINISHED time and SORT_FINISHED time of - // Reduce Task Attempts - if (attempt.getTaskType().equals("REDUCE")) { - assertTrue("SHUFFLE_FINISHED time of task attempt " + attemptId + - " obtained from history file did not match the expected" + - " value", ts.getShuffleFinishTime() == - attempt.getShuffleFinishTime()); - assertTrue("SORT_FINISHED time of task attempt " + attemptId + - " obtained from history file did not match the expected" + - " value", ts.getSortFinishTime() == - attempt.getSortFinishTime()); - } - - //Validate task counters - Counters c = new Counters(ts.getCounters()); - assertTrue("Counters of Task Attempt " + attemptId + " obtained from " + - "history file did not match the expected value", - c.equals(attempt.getCounters())); - } - - // check if tracker name is valid - assertTrue("Tracker name of task attempt " + attemptId + - " obtained from " + - "history file did not match the expected value", - ts.getTaskTracker().equals(attempt.getTrackerName())); - } - } - } - - /** - * Checks if the history file content is as expected comparing with the - * actual values obtained from JT. - * Job Level, Task Level and Task Attempt Level Keys, Values are validated. - * @param job RunningJob object of the job whose history is to be validated - * @param conf job conf - */ - public static void validateJobHistoryFileContent(MiniMRCluster mr, - RunningJob job, JobConf conf) throws IOException { - - JobID id = job.getID(); - JobHistory jobHistory = - mr.getJobTrackerRunner().getJobTracker().getJobHistory(); - Path doneDir = jobHistory.getCompletedJobHistoryLocation(); - // Get the history file name - String logFileName = getDoneFile(jobHistory, conf, id, doneDir); - - // Framework history log file location - Path logFile = new Path(doneDir, logFileName); - FileSystem fileSys = logFile.getFileSystem(conf); - - // Check if the history file exists - assertTrue("History file does not exist", fileSys.exists(logFile)); - - JobHistoryParser parser = new JobHistoryParser(fileSys, - logFile.toUri().getPath()); - - JobHistoryParser.JobInfo jobInfo = parser.parse(); - // Now the history file contents are available in jobInfo. Let us compare - // them with the actual values from JT. - validateJobLevelKeyValues(mr, job, jobInfo, conf); - validateTaskLevelKeyValues(mr, job, jobInfo); - validateTaskAttemptLevelKeyValues(mr, job, jobInfo); - - // Also JobACLs should be correct - if (mr.getJobTrackerRunner().getJobTracker() - .areACLsEnabled()) { - AccessControlList acl = new AccessControlList( - conf.get(JobACL.VIEW_JOB.getAclName(), " ")); - assertTrue("VIEW_JOB ACL is not properly logged to history file.", - acl.toString().equals( - jobInfo.getJobACLs().get(JobACL.VIEW_JOB).toString())); - acl = new AccessControlList( - conf.get(JobACL.MODIFY_JOB.getAclName(), " ")); - assertTrue("MODIFY_JOB ACL is not properly logged to history file.", - acl.toString().equals( - jobInfo.getJobACLs().get(JobACL.MODIFY_JOB).toString())); - } - - // Validate the job queue name - assertTrue(jobInfo.getJobQueueName().equals(conf.getQueueName())); - } - - /** - * Tests the case where the log directory is on local disk, the done folder is on HDFS, - * and the default FS is local. - */ - public void testDoneFolderOnHDFS() throws IOException, InterruptedException { - runDoneFolderTest("history_done", LOCAL_LOG_DIR_URI); - } - - /** - * Tests the case where the log directory and done folder is on local disk - * and the default FS is local. - */ - public void testDoneFolderNotOnDefaultFileSystem() throws IOException, InterruptedException { - runDoneFolderTest(TEST_ROOT_DIR + "/history_done", LOCAL_LOG_DIR_URI); - } - - /** - * Tests the case where the log directory is on HDFS and done folder is on local disk - * and the default FS is local. - */ - public void testHistoryFolderOnHDFS() throws IOException, InterruptedException { - String logDir = "hdfs://localhost:%d/history"; - runDoneFolderTest(TEST_ROOT_DIR + "/done", logDir); - } - - private void runDoneFolderTest(String doneFolder, String historyFolder) throws IOException, InterruptedException { - MiniMRCluster mr = null; - MiniDFSCluster dfsCluster = null; - try { - JobConf conf = new JobConf(); - // keep for less time - conf.setLong("mapred.jobtracker.retirejob.check", 1000); - conf.setLong("mapred.jobtracker.retirejob.interval", 1000); - - //set the done folder location - conf.set(JTConfig.JT_JOBHISTORY_COMPLETED_LOCATION, doneFolder); - - dfsCluster = new MiniDFSCluster(conf, 2, true, null); - String logDir = String.format(historyFolder, dfsCluster.getNameNodePort()); - - //set the history folder location - conf.set(JTConfig.JT_JOBHISTORY_LOCATION, logDir); - - Path logDirPath = new Path(logDir); - FileSystem logDirFs = logDirPath.getFileSystem(conf); - //there may be some stale files, clean them - if (logDirFs.exists(logDirPath)) { - boolean deleted = logDirFs.delete(logDirPath, true); - LOG.info(logDirPath + " deleted " + deleted); - } - - logDirFs.mkdirs(logDirPath); - assertEquals("No of file in logDir not correct", 0, - logDirFs.listStatus(logDirPath).length); - logDirFs.create(new Path(logDirPath, "f1")); - logDirFs.create(new Path(logDirPath, "f2")); - assertEquals("No of file in logDir not correct", 2, - logDirFs.listStatus(logDirPath).length); - - mr = new MiniMRCluster(2, dfsCluster.getFileSystem().getUri().toString(), - 3, null, null, conf); - - assertEquals("Files in logDir did not move to DONE folder", - 0, logDirFs.listStatus(logDirPath).length); - - JobHistory jobHistory = - mr.getJobTrackerRunner().getJobTracker().getJobHistory(); - Path doneDir = jobHistory.getCompletedJobHistoryLocation(); - - assertEquals("Files in DONE dir not correct", - 2, doneDir.getFileSystem(conf).listStatus(doneDir).length); - - // run the TCs - conf = mr.createJobConf(); - - FileSystem fs = FileSystem.get(conf); - // clean up - fs.delete(new Path("succeed"), true); - - Path inDir = new Path("succeed/input"); - Path outDir = new Path("succeed/output"); - - //Disable speculative execution - conf.setSpeculativeExecution(false); - - // Make sure that the job is not removed from memory until we do finish - // the validation of history file content - conf.setInt("mapred.jobtracker.completeuserjobs.maximum", 10); - - // Run a job that will be succeeded and validate its history file - RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir); - - assertEquals("History DONE folder not correct", - new Path(doneFolder).getName(), doneDir.getName()); - JobID id = job.getID(); - String logFileName = getDoneFile(jobHistory, conf, id, doneDir); - - // Framework history log file location - Path logFile = new Path(doneDir, logFileName); - FileSystem fileSys = logFile.getFileSystem(conf); - - Cluster cluster = new Cluster(conf); - assertEquals("Client returned wrong history url", logFile.toString(), - cluster.getJobHistoryUrl(id)); - - // Check if the history file exists - assertTrue("History file does not exist", fileSys.exists(logFile)); - - // check if the corresponding conf file exists - Path confFile = getPathForConf(logFile, doneDir); - assertTrue("Config for completed jobs doesnt exist", - fileSys.exists(confFile)); - - // check if the file exists in a done folder - assertTrue("Completed job config doesnt exist in the done folder", - doneDir.getName().equals(confFile.getParent().getName())); - - // check if the file exists in a done folder - assertTrue("Completed jobs doesnt exist in the done folder", - doneDir.getName().equals(logFile.getParent().getName())); - - - // check if the job file is removed from the history location - Path runningJobsHistoryFolder = logFile.getParent().getParent(); - Path runningJobHistoryFilename = - new Path(runningJobsHistoryFolder, logFile.getName()); - Path runningJobConfFilename = - new Path(runningJobsHistoryFolder, confFile.getName()); - assertFalse("History file not deleted from the running folder", - fileSys.exists(runningJobHistoryFilename)); - assertFalse("Config for completed jobs not deleted from running folder", - fileSys.exists(runningJobConfFilename)); - - validateJobHistoryFileFormat(jobHistory, - job.getID(), conf, "SUCCEEDED", false); - validateJobHistoryFileContent(mr, job, conf); - - // get the job conf filename - } finally { - if (mr != null) { - cleanupLocalFiles(mr); - mr.shutdown(); - } - if (dfsCluster != null) { - dfsCluster.shutdown(); - } - } - } - - /** Run a job that will be succeeded and validate its history file format - * and its content. - */ - public void testJobHistoryFile() throws Exception { - MiniMRCluster mr = null; - try { - JobConf conf = new JobConf(); - - // keep for less time - conf.setLong("mapred.jobtracker.retirejob.check", 1000); - conf.setLong("mapred.jobtracker.retirejob.interval", 1000); - - //set the done folder location - String doneFolder = TEST_ROOT_DIR + "history_done"; - conf.set(JTConfig.JT_JOBHISTORY_COMPLETED_LOCATION, doneFolder); - - // Enable ACLs so that they are logged to history - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - - mr = new MiniMRCluster(2, "file:///", 3, null, null, conf); - - // run the TCs - conf = mr.createJobConf(); - - FileSystem fs = FileSystem.get(conf); - // clean up - fs.delete(new Path(TEST_ROOT_DIR + "/succeed"), true); - - Path inDir = new Path(TEST_ROOT_DIR + "/succeed/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/succeed/output"); - - //Disable speculative execution - conf.setSpeculativeExecution(false); - - // set the job acls - conf.set(JobACL.VIEW_JOB.getAclName(), "user1,user2 group1,group2"); - conf.set(JobACL.MODIFY_JOB.getAclName(), "user3,user4 group3,group4"); - - // Make sure that the job is not removed from memory until we do finish - // the validation of history file content - conf.setInt("mapred.jobtracker.completeuserjobs.maximum", 10); - - // Run a job that will be succeeded and validate its history file - RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir); - JobHistory jobHistory = - mr.getJobTrackerRunner().getJobTracker().getJobHistory(); - Path doneDir = jobHistory.getCompletedJobHistoryLocation(); - assertEquals("History DONE folder not correct", - doneFolder, doneDir.toString()); - JobID id = job.getID(); - String logFileName = getDoneFile(jobHistory, conf, id, doneDir); - - // Framework history log file location - Path logFile = new Path(doneDir, logFileName); - FileSystem fileSys = logFile.getFileSystem(conf); - - // Check if the history file exists - assertTrue("History file does not exist", fileSys.exists(logFile)); - - // check if the corresponding conf file exists - Path confFile = getPathForConf(logFile, doneDir); - assertTrue("Config for completed jobs doesnt exist", - fileSys.exists(confFile)); - - // check if the conf file exists in a done folder - assertTrue("Completed job config doesnt exist in the done folder", - doneDir.getName().equals(confFile.getParent().getName())); - - // check if the file exists in a done folder - assertTrue("Completed jobs doesnt exist in the done folder", - doneDir.getName().equals(logFile.getParent().getName())); - - // check if the job file is removed from the history location - Path runningJobsHistoryFolder = logFile.getParent().getParent(); - Path runningJobHistoryFilename = - new Path(runningJobsHistoryFolder, logFile.getName()); - Path runningJobConfFilename = - new Path(runningJobsHistoryFolder, confFile.getName()); - assertFalse("History file not deleted from the running folder", - fileSys.exists(runningJobHistoryFilename)); - assertFalse("Config for completed jobs not deleted from running folder", - fileSys.exists(runningJobConfFilename)); - - validateJobHistoryFileFormat(jobHistory, job.getID(), conf, - "SUCCEEDED", false); - validateJobHistoryFileContent(mr, job, conf); - - // get the job conf filename - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - String name = jt.getLocalJobFilePath(job.getID()); - File file = new File(name); - - // check if the file get deleted - while (file.exists()) { - LOG.info("Waiting for " + file + " to be deleted"); - UtilsForTests.waitFor(100); - } - } finally { - if (mr != null) { - cleanupLocalFiles(mr); - mr.shutdown(); - } - } - } - - //Returns the file in the done folder - //Waits for sometime to get the file moved to done - private static String getDoneFile(JobHistory jobHistory, - JobConf conf, JobID id, - Path doneDir) throws IOException { - String name = null; - String user = UserGroupInformation.getCurrentUser().getUserName(); - for (int i = 0; name == null && i < 20; i++) { - Path path = JobHistory.getJobHistoryFile( - jobHistory.getCompletedJobHistoryLocation(), id, user); - if (path.getFileSystem(conf).exists(path)) { - name = path.toString(); - } - UtilsForTests.waitFor(1000); - } - assertNotNull("Job history file not created", name); - return name; - } - - private void cleanupLocalFiles(MiniMRCluster mr) - throws IOException { - Configuration conf = mr.createJobConf(); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - Path sysDir = new Path(jt.getSystemDir()); - FileSystem fs = sysDir.getFileSystem(conf); - fs.delete(sysDir, true); - Path jobHistoryDir = - mr.getJobTrackerRunner().getJobTracker().getJobHistory(). - getJobHistoryLocation(); - fs = jobHistoryDir.getFileSystem(conf); - fs.delete(jobHistoryDir, true); - } - - /** - * Checks if the history file has expected job status - * @param id job id - * @param conf job conf - */ - private static void validateJobHistoryJobStatus(JobHistory jobHistory, - JobID id, JobConf conf, String status) throws IOException { - - // Get the history file name - Path doneDir = jobHistory.getCompletedJobHistoryLocation(); - String logFileName = getDoneFile(jobHistory, conf, id, doneDir); - - // Framework history log file location - Path logFile = new Path(doneDir, logFileName); - FileSystem fileSys = logFile.getFileSystem(conf); - - // Check if the history file exists - assertTrue("History file does not exist", fileSys.exists(logFile)); - - // check history file permission - assertTrue("History file permissions does not match", - fileSys.getFileStatus(logFile).getPermission().equals( - new FsPermission(JobHistory.HISTORY_FILE_PERMISSION))); - - JobHistoryParser parser = new JobHistoryParser(fileSys, - logFile.toUri().getPath()); - JobHistoryParser.JobInfo jobInfo = parser.parse(); - - - assertTrue("Job Status read from job history file is not the expected" + - " status", status.equals(jobInfo.getJobStatus())); - } - - // run jobs that will be (1) succeeded (2) failed (3) killed - // and validate job status read from history file in each case - public void testJobHistoryJobStatus() throws IOException { - MiniMRCluster mr = null; - try { - mr = new MiniMRCluster(2, "file:///", 3); - - // run the TCs - JobConf conf = mr.createJobConf(); - - FileSystem fs = FileSystem.get(conf); - // clean up - fs.delete(new Path(TEST_ROOT_DIR + "/succeedfailkilljob"), true); - - Path inDir = new Path(TEST_ROOT_DIR + "/succeedfailkilljob/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/succeedfailkilljob/output"); - - // Run a job that will be succeeded and validate its job status - // existing in history file - RunningJob job = UtilsForTests.runJobSucceed(conf, inDir, outDir); - - JobHistory jobHistory = - mr.getJobTrackerRunner().getJobTracker().getJobHistory(); - validateJobHistoryJobStatus(jobHistory, job.getID(), conf, - JobStatus.getJobRunState(JobStatus.SUCCEEDED)); - - // Run a job that will be failed and validate its job status - // existing in history file - job = UtilsForTests.runJobFail(conf, inDir, outDir); - validateJobHistoryJobStatus(jobHistory, job.getID(), conf, - JobStatus.getJobRunState(JobStatus.FAILED)); - - // Run a job that will be killed and validate its job status - // existing in history file - job = UtilsForTests.runJobKill(conf, inDir, outDir); - validateJobHistoryJobStatus(jobHistory, job.getID(), conf, - JobStatus.getJobRunState(JobStatus.KILLED)); - - } finally { - if (mr != null) { - cleanupLocalFiles(mr); - mr.shutdown(); - } - } - } - - public void testHistoryInitWithCorruptFiles() throws IOException { - MiniMRCluster mr = null; - try { - JobConf conf = new JobConf(); - Path historyDir = new Path(System.getProperty("test.build.data", "."), - "history"); - conf.set(JTConfig.JT_JOBHISTORY_LOCATION, - historyDir.toString()); - conf.setUser("user"); - - FileSystem localFs = FileSystem.getLocal(conf); - - //there may be some stale files, clean them - if (localFs.exists(historyDir)) { - boolean deleted = localFs.delete(historyDir, true); - LOG.info(historyDir + " deleted " + deleted); - } - - // Start the cluster, create a history file - mr = new MiniMRCluster(0, "file:///", 3, null, null, conf); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobHistory jh = jt.getJobHistory(); - final JobID jobId = JobID.forName("job_200809171136_0001"); - jh.setupEventWriter(jobId, conf); - Map jobACLs = - new HashMap(); - JobSubmittedEvent jse = - new JobSubmittedEvent(jobId, "job", "user", 12345, "path", jobACLs, - "default"); - jh.logEvent(jse, jobId); - jh.closeWriter(jobId); - - // Corrupt the history file. User RawLocalFileSystem so that we - // do keep the original CRC file intact. - String historyFileName = jobId.toString() + "_" + "user"; - Path historyFilePath = new Path (historyDir.toString(), historyFileName); - - RawLocalFileSystem fs = (RawLocalFileSystem) - FileSystem.getLocal(conf).getRaw(); - - FSDataOutputStream out = fs.create(historyFilePath, true); - byte[] corruptData = new byte[32]; - new Random().nextBytes(corruptData); - out.write (corruptData, 0, 32); - out.close(); - - // Stop and start the tracker. The tracker should come up nicely - mr.stopJobTracker(); - mr.startJobTracker(); - jt = mr.getJobTrackerRunner().getJobTracker(); - assertNotNull("JobTracker did not come up", jt ); - jh = jt.getJobHistory(); - assertNotNull("JobHistory did not get initialized correctly", jh); - - // Only the done folder should remain in the history directory - assertEquals("Files in logDir did not move to DONE folder", - 1, historyDir.getFileSystem(conf).listStatus(historyDir).length); - } finally { - if (mr != null) { - cleanupLocalFiles(mr); - mr.shutdown(); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java deleted file mode 100644 index 35abd48d124..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobHistoryParsing.java +++ /dev/null @@ -1,138 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import junit.framework.TestCase; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.Counters; -import org.apache.hadoop.mapreduce.JobACL; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.jobhistory.JobFinishedEvent; -import org.apache.hadoop.mapreduce.jobhistory.JobHistory; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; -import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent; -import org.apache.hadoop.mapreduce.jobhistory.TaskFinishedEvent; -import org.apache.hadoop.security.authorize.AccessControlList; - -/** - * Unit test to test if the JobHistory writer/parser is able to handle - * values with special characters - * This test also tests if the job history module is able to gracefully - * ignore events after the event writer is closed - * - */ -public class TestJobHistoryParsing extends TestCase { - - public void testHistoryParsing() throws IOException { - // open a test history file - Path historyDir = new Path(System.getProperty("test.build.data", "."), - "history"); - JobConf conf = new JobConf(); - conf.set("hadoop.job.history.location", historyDir.toString()); - FileSystem fs = FileSystem.getLocal(new JobConf()); - - // Some weird strings - String username = "user"; - String weirdJob = "Value has \n new line \n and " + - "dot followed by new line .\n in it +" + - "ends with escape\\"; - String weirdPath = "Value has characters: " + - "`1234567890-=qwertyuiop[]\\asdfghjkl;'zxcvbnm,./" + - "~!@#$%^&*()_+QWERTYUIOP{}|ASDFGHJKL:\"'ZXCVBNM<>?" + - "\t\b\n\f\"\n in it"; - - String weirdJobQueueName = "my\njob\nQueue\\"; - conf.setUser(username); - - MiniMRCluster mr = null; - mr = new MiniMRCluster(2, "file:///", 3, null, null, conf); - - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobHistory jh = jt.getJobHistory(); - - jh.init(jt, conf, "localhost", 1234); - JobID jobId = JobID.forName("job_200809171136_0001"); - jh.setupEventWriter(jobId, conf); - Map jobACLs = - new HashMap(); - AccessControlList viewJobACL = - new AccessControlList("user1,user2 group1,group2"); - AccessControlList modifyJobACL = - new AccessControlList("user3,user4 group3, group4"); - jobACLs.put(JobACL.VIEW_JOB, viewJobACL); - jobACLs.put(JobACL.MODIFY_JOB, modifyJobACL); - JobSubmittedEvent jse = - new JobSubmittedEvent(jobId, weirdJob, username, 12345, weirdPath, - jobACLs, weirdJobQueueName); - jh.logEvent(jse, jobId); - - JobFinishedEvent jfe = - new JobFinishedEvent(jobId, 12346, 1, 1, 0, 0, new Counters(), - new Counters(), new Counters()); - jh.logEvent(jfe, jobId); - jh.closeWriter(jobId); - - // Try to write one more event now, should not fail - TaskID tid = TaskID.forName("task_200809171136_0001_m_000002"); - TaskFinishedEvent tfe = - new TaskFinishedEvent(tid, null, 0, TaskType.MAP, "", null); - boolean caughtException = false; - - try { - jh.logEvent(tfe, jobId); - } catch (Exception e) { - caughtException = true; - } - - assertFalse("Writing an event after closing event writer is not handled", - caughtException); - - String historyFileName = jobId.toString() + "_" + username; - Path historyFilePath = new Path (historyDir.toString(), - historyFileName); - - System.out.println("History File is " + historyFilePath.toString()); - - JobHistoryParser parser = - new JobHistoryParser(fs, historyFilePath); - - JobHistoryParser.JobInfo jobInfo = parser.parse(); - - assertTrue (jobInfo.getUsername().equals(username)); - assertTrue(jobInfo.getJobname().equals(weirdJob)); - assertTrue(jobInfo.getJobQueueName().equals(weirdJobQueueName)); - assertTrue(jobInfo.getJobConfPath().equals(weirdPath)); - Map parsedACLs = jobInfo.getJobACLs(); - assertEquals(2, parsedACLs.size()); - assertTrue(parsedACLs.get(JobACL.VIEW_JOB).toString().equals( - viewJobACL.toString())); - assertTrue(parsedACLs.get(JobACL.MODIFY_JOB).toString().equals( - modifyJobACL.toString())); - - if (mr != null) { - mr.shutdown(); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java deleted file mode 100644 index 6b16518e42a..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgress.java +++ /dev/null @@ -1,369 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * TestJobInProgress is a unit test to test consistency of JobInProgress class - * data structures under different conditions (speculation/locality) and at - * different stages (tasks are running/pending/killed) - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import static org.junit.Assert.*; -import org.junit.Test; -import org.junit.BeforeClass; -import static org.mockito.Mockito.*; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker; -import org.apache.hadoop.mapred.TaskStatus.Phase; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.JobCounter; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.NetworkTopology; -import org.apache.hadoop.net.Node; -import org.apache.hadoop.net.NodeBase; -import org.apache.hadoop.net.StaticMapping; - -@SuppressWarnings("deprecation") -public class TestJobInProgress { - static final Log LOG = LogFactory.getLog(TestJobInProgress.class); - - static FakeJobTracker jobTracker; - - static String trackers[] = new String[] { - "tracker_tracker1.r1.com:1000", - "tracker_tracker2.r1.com:1000", - "tracker_tracker3.r2.com:1000", - "tracker_tracker4.r3.com:1000" - }; - - static String[] hosts = new String[] { - "tracker1.r1.com", - "tracker2.r1.com", - "tracker3.r2.com", - "tracker4.r3.com" - }; - - static String[] racks = new String[] { "/r1", "/r1", "/r2", "/r3" }; - - static int numUniqueHosts = hosts.length; - static int clusterSize = trackers.length; - - @BeforeClass - public static void setup() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setClass("topology.node.switch.mapping.impl", - StaticMapping.class, DNSToSwitchMapping.class); - jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers); - // Set up the Topology Information - for (int i = 0; i < hosts.length; i++) { - StaticMapping.addNodeToRack(hosts[i], racks[i]); - } - for (String s: trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, s); - } - } - - static class MyFakeJobInProgress extends FakeJobInProgress { - - MyFakeJobInProgress(JobConf jc, JobTracker jt) throws IOException { - super(jc, jt); - } - - @Override - TaskSplitMetaInfo[] createSplits(org.apache.hadoop.mapreduce.JobID jobId) { - // Set all splits to reside on one host. This will ensure that - // one tracker gets data local, one gets rack local and two others - // get non-local maps - TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numMapTasks]; - String[] splitHosts0 = new String[] { hosts[0] }; - for (int i = 0; i < numMapTasks; i++) { - splits[i] = new TaskSplitMetaInfo(splitHosts0, 0, 0); - } - return splits; - } - - private void makeRunning(TaskAttemptID taskId, TaskInProgress tip, - String taskTracker) { - TaskStatus status = TaskStatus.createTaskStatus(tip.isMapTask(), taskId, - 0.0f, 1, TaskStatus.State.RUNNING, "", "", taskTracker, - tip.isMapTask() ? Phase.MAP : Phase.REDUCE, new Counters()); - updateTaskStatus(tip, status); - } - - private TaskInProgress getTipForTaskID(TaskAttemptID tid, boolean isMap) { - TaskInProgress result = null; - TaskID id = tid.getTaskID(); - TaskInProgress[] arrayToLook = isMap ? maps : reduces; - - for (int i = 0; i < arrayToLook.length; i++) { - TaskInProgress tip = arrayToLook[i]; - if (tip.getTIPId() == id) { - result = tip; - break; - } - } - return result; - } - - /** - * Find a new Map or a reduce task and mark it as running on the specified - * tracker - */ - public TaskAttemptID findAndRunNewTask(boolean isMap, - String tt, String host, - int clusterSize, - int numUniqueHosts) - throws IOException { - TaskTrackerStatus tts = new TaskTrackerStatus(tt, host); - Task task = isMap ? - obtainNewMapTask(tts, clusterSize, numUniqueHosts) : - obtainNewReduceTask(tts, clusterSize, numUniqueHosts); - TaskAttemptID tid = task.getTaskID(); - makeRunning(task.getTaskID(), getTipForTaskID(tid, isMap), tt); - return tid; - } - } - - //@Test - public void testPendingMapTaskCount() throws Exception { - - int numMaps = 4; - int numReds = 4; - - JobConf conf = new JobConf(); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReds); - conf.setSpeculativeExecution(false); - conf.setBoolean( - JobContext.SETUP_CLEANUP_NEEDED, false); - MyFakeJobInProgress job1 = new MyFakeJobInProgress(conf, jobTracker); - job1.initTasks(); - - TaskAttemptID[] tid = new TaskAttemptID[numMaps]; - - for (int i = 0; i < numMaps; i++) { - tid[i] = job1.findAndRunNewTask(true, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - } - - // Fail all maps - for (int i = 0; i < numMaps; i++) { - job1.failTask(tid[i]); - } - - MyFakeJobInProgress job2 = new MyFakeJobInProgress(conf, jobTracker); - job2.initTasks(); - - for (int i = 0; i < numMaps; i++) { - tid[i] = job2.findAndRunNewTask(true, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - job2.finishTask(tid[i]); - } - - for (int i = 0; i < numReds/2; i++) { - tid[i] = job2.findAndRunNewTask(false, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - } - - for (int i = 0; i < numReds/4; i++) { - job2.finishTask(tid[i]); - } - - for (int i = numReds/4; i < numReds/2; i++) { - job2.failTask(tid[i]); - } - - // Job1. All Maps have failed, no reduces have been scheduled - checkTaskCounts(job1, 0, numMaps, 0, numReds); - - // Job2. All Maps have completed. One reducer has completed, one has - // failed and two others have not been scheduled - checkTaskCounts(job2, 0, 0, 0, 3 * numReds / 4); - } - - /** - * Test if running tasks are correctly maintained for various types of jobs - */ - static void testRunningTaskCount(boolean speculation) throws Exception { - LOG.info("Testing running jobs with speculation : " + speculation); - - JobConf conf = new JobConf(); - conf.setNumMapTasks(2); - conf.setNumReduceTasks(2); - conf.setSpeculativeExecution(speculation); - MyFakeJobInProgress jip = new MyFakeJobInProgress(conf, jobTracker); - jip.initTasks(); - - TaskAttemptID[] tid = new TaskAttemptID[4]; - - for (int i = 0; i < 2; i++) { - tid[i] = jip.findAndRunNewTask(true, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - } - - // check if the running structures are populated - Set uniqueTasks = new HashSet(); - for (Map.Entry> s : - jip.getRunningMapCache().entrySet()) { - uniqueTasks.addAll(s.getValue()); - } - - // add non local map tasks - uniqueTasks.addAll(jip.getNonLocalRunningMaps()); - - assertEquals("Running map count doesnt match for jobs with speculation " - + speculation, - jip.runningMaps(), uniqueTasks.size()); - - for (int i = 0; i < 2; i++ ) { - tid[i] = jip.findAndRunNewTask(false, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - } - - assertEquals("Running reducer count doesnt match for" + - " jobs with speculation " - + speculation, - jip.runningReduces(), jip.getRunningReduces().size()); - - } - - //@Test - public void testRunningTaskCount() throws Exception { - // test with spec = false - testRunningTaskCount(false); - - // test with spec = true - testRunningTaskCount(true); - - } - - static void checkTaskCounts(JobInProgress jip, int runningMaps, - int pendingMaps, int runningReduces, int pendingReduces) { - Counters counter = jip.getJobCounters(); - long totalTaskCount = counter.getCounter(JobCounter.TOTAL_LAUNCHED_MAPS) - + counter.getCounter(JobCounter.TOTAL_LAUNCHED_REDUCES); - - LOG.info("totalTaskCount is " + totalTaskCount); - LOG.info(" Running Maps:" + jip.runningMaps() + - " Pending Maps:" + jip.pendingMaps() + - " Running Reds:" + jip.runningReduces() + - " Pending Reds:" + jip.pendingReduces()); - - assertEquals(jip.getNumTaskCompletionEvents(),totalTaskCount); - assertEquals(runningMaps, jip.runningMaps()); - assertEquals(pendingMaps, jip.pendingMaps()); - assertEquals(runningReduces, jip.runningReduces()); - assertEquals(pendingReduces, jip.pendingReduces()); - } - - //@Test - public void testJobSummary() throws Exception { - int numMaps = 2; - int numReds = 2; - JobConf conf = new JobConf(); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReds); - // Spying a fake is easier than mocking here - MyFakeJobInProgress jspy = spy(new MyFakeJobInProgress(conf, jobTracker)); - jspy.initTasks(); - TaskAttemptID tid; - - // Launch some map tasks - for (int i = 0; i < numMaps; i++) { - jspy.maps[i].setExecStartTime(i + 1); - tid = jspy.findAndRunNewTask(true, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - jspy.finishTask(tid); - } - - // Launch some reduce tasks - for (int i = 0; i < numReds; i++) { - jspy.reduces[i].setExecStartTime(i + numMaps + 1); - tid = jspy.findAndRunNewTask(false, trackers[i], hosts[i], - clusterSize, numUniqueHosts); - jspy.finishTask(tid); - } - - // Should be invoked numMaps + numReds times by different TIP objects - verify(jspy, times(4)).setFirstTaskLaunchTime(any(TaskInProgress.class)); - - ClusterStatus cspy = spy(new ClusterStatus(4, 0, 0, 0, 0, 4, 4, - JobTrackerStatus.RUNNING, 0)); - - JobInProgress.JobSummary.logJobSummary(jspy, cspy); - - verify(jspy).getStatus(); - verify(jspy).getProfile(); - verify(jspy, atLeastOnce()).getJobCounters(); - verify(jspy, atLeastOnce()).getJobID(); - verify(jspy).getStartTime(); - verify(jspy).getFirstTaskLaunchTimes(); - verify(jspy).getFinishTime(); - verify(jspy).getTasks(TaskType.MAP); - verify(jspy).getTasks(TaskType.REDUCE); - verify(jspy).getNumSlotsPerMap(); - verify(jspy).getNumSlotsPerReduce(); - verify(cspy).getMaxMapTasks(); - verify(cspy).getMaxReduceTasks(); - - assertEquals("firstMapTaskLaunchTime", 1, - jspy.getFirstTaskLaunchTimes().get(TaskType.MAP).longValue()); - assertEquals("firstReduceTaskLaunchTime", 3, - jspy.getFirstTaskLaunchTimes().get(TaskType.REDUCE).longValue()); - } - - @Test - public void testLocality() throws Exception { - NetworkTopology nt = new NetworkTopology(); - - Node r1n1 = new NodeBase("/default/rack1/node1"); - nt.add(r1n1); - Node r1n2 = new NodeBase("/default/rack1/node2"); - nt.add(r1n2); - - Node r2n3 = new NodeBase("/default/rack2/node3"); - nt.add(r2n3); - - LOG.debug("r1n1 parent: " + r1n1.getParent() + "\n" + - "r1n2 parent: " + r1n2.getParent() + "\n" + - "r2n3 parent: " + r2n3.getParent()); - - // Same host - assertEquals(0, JobInProgress.getMatchingLevelForNodes(r1n1, r1n1, 3)); - // Same rack - assertEquals(1, JobInProgress.getMatchingLevelForNodes(r1n1, r1n2, 3)); - // Different rack - assertEquals(2, JobInProgress.getMatchingLevelForNodes(r1n1, r2n3, 3)); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java deleted file mode 100644 index 3c409cdbb07..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobInProgressListener.java +++ /dev/null @@ -1,464 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.util.ArrayList; -import java.io.File; -import java.io.IOException; -import java.util.List; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; -import org.apache.hadoop.mapred.lib.IdentityMapper; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.mapreduce.TestNoJobSetupCleanup; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -/** - * Test whether the JobInProgressListeners are informed as expected. - */ -public class TestJobInProgressListener extends TestCase { - private static final Log LOG = - LogFactory.getLog(TestJobInProgressListener.class); - private static String TEST_ROOT_DIR = new File(System.getProperty( - "test.build.data", "/tmp")).toURI().toString().replace(' ', '+'); - private final Path testDir = - new Path(TEST_ROOT_DIR, "test-jip-listener-update"); - private static MiniMRCluster mr; - private static JobTracker jobtracker; - private static JobConf conf; - private static MyScheduler myScheduler; - - public static Test suite() { - TestSetup setup = - new TestSetup(new TestSuite(TestJobInProgressListener.class)) { - @Override - protected void setUp() throws Exception { - conf = new JobConf(); - conf.setClass(JTConfig.JT_TASK_SCHEDULER, MyScheduler.class, - TaskScheduler.class); - mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); - jobtracker = mr.getJobTrackerRunner().getJobTracker(); - myScheduler = (MyScheduler)jobtracker.getScheduler(); - conf = mr.createJobConf(); - } - - @Override - protected void tearDown() throws Exception { - conf = null; - try { - mr.shutdown(); - } catch (Exception e) { - LOG.info("Error in shutting down the MR cluster", e); - } - jobtracker = null; - myScheduler.terminate(); - } - }; - return setup; - } - - /** - * This test case tests if external updates to JIP do not result into - * undesirable effects - * Test is as follows - * - submit 2 jobs of normal priority. job1 is a waiting job which waits and - * blocks the cluster - * - change one parameter of job2 such that the job bumps up in the queue - * - check if the queue looks ok - * - */ - public void testJobQueueChanges() throws IOException { - LOG.info("Testing job queue changes"); - - // stop the job initializer - myScheduler.stopInitializer(); - - JobQueueJobInProgressListener myListener = - new JobQueueJobInProgressListener(); - - // add the listener - jobtracker.addJobInProgressListener(myListener); - - Path inDir = new Path(testDir, "input"); - Path outputDir1 = new Path(testDir, "output1"); - Path outputDir2 = new Path(testDir, "output2"); - - RunningJob rJob1 = - UtilsForTests.runJob(conf, inDir, outputDir1, 1, 0); - LOG.info("Running job " + rJob1.getID().toString()); - - RunningJob rJob2 = - UtilsForTests.runJob(conf, inDir, outputDir2, 1, 0); - LOG.info("Running job " + rJob2.getID().toString()); - - // I. Check job-priority change - LOG.info("Testing job priority changes"); - - // bump up job2's priority - LOG.info("Increasing job2's priority to HIGH"); - rJob2.setJobPriority("HIGH"); - - // check if the queue is sane - assertTrue("Priority change garbles the queue", - myListener.getJobQueue().size() == 2); - - JobInProgress[] queue = - myListener.getJobQueue().toArray(new JobInProgress[0]); - - // check if the bump has happened - assertTrue("Priority change failed to bump up job2 in the queue", - queue[0].getJobID().equals(rJob2.getID())); - - assertTrue("Priority change failed to bump down job1 in the queue", - queue[1].getJobID().equals(rJob1.getID())); - - assertEquals("Priority change has garbled the queue", - 2, queue.length); - - // II. Check start-time change - LOG.info("Testing job start-time changes"); - - // reset the priority which will make the order as - // - job1 - // - job2 - // this will help in bumping job2 on start-time change - LOG.info("Increasing job2's priority to NORMAL"); - rJob2.setJobPriority("NORMAL"); - - // create the change event - JobInProgress jip2 = jobtracker.getJob(rJob2.getID()); - JobInProgress jip1 = jobtracker.getJob(rJob1.getID()); - - JobStatus prevStatus = (JobStatus)jip2.getStatus().clone(); - - // change job2's start-time and the status - jip2.startTime = jip1.startTime - 1; - jip2.status.setStartTime(jip2.startTime); - - - JobStatus newStatus = (JobStatus)jip2.getStatus().clone(); - - // inform the listener - LOG.info("Updating the listener about job2's start-time change"); - JobStatusChangeEvent event = - new JobStatusChangeEvent(jip2, EventType.START_TIME_CHANGED, - prevStatus, newStatus); - myListener.jobUpdated(event); - - // check if the queue is sane - assertTrue("Start time change garbles the queue", - myListener.getJobQueue().size() == 2); - - queue = myListener.getJobQueue().toArray(new JobInProgress[0]); - - // check if the bump has happened - assertTrue("Start time change failed to bump up job2 in the queue", - queue[0].getJobID().equals(rJob2.getID())); - - assertTrue("Start time change failed to bump down job1 in the queue", - queue[1].getJobID().equals(rJob1.getID())); - - assertEquals("Start time change has garbled the queue", - 2, queue.length); - } - - /** - * Check the queue status upon - * - failed job - * - killed job - * - successful job - */ - public void testJobCompletion() throws Exception { - MyListener mainListener = new MyListener(); - jobtracker.addJobInProgressListener(mainListener); - - // stop the job initializer - myScheduler.stopInitializer(); - - // check queued jobs - testQueuedJobKill(conf, mainListener); - - myScheduler.startInitializer(); - - // check the queue state for job states - testFailedJob(conf, mainListener); - - testKilledJob(conf, mainListener); - - testSuccessfulJob(conf, mainListener); - } - - // A listener that inits the tasks one at a time and also listens to the - // events - public static class MyListener extends JobInProgressListener { - private List wjobs = new ArrayList(); - private List rjobs = new ArrayList(); - // list of job added to the wait queue - private List wjobsAdded = new ArrayList(); - // list of job added to the running queue - private List rjobsAdded = new ArrayList(); - - public boolean contains (JobID id) { - return contains(id, true) || contains(id, false); - } - - public boolean contains (JobID id, boolean waiting) { - if (!wjobsAdded.contains(id)) { - throw new RuntimeException("Job " + id + " not seen in waiting queue"); - } - if (!waiting) { - if (!rjobsAdded.contains(id)) { - throw new RuntimeException("Job " + id + " not seen in run queue"); - } - } - List queue = waiting ? wjobs : rjobs; - for (JobInProgress job : queue) { - if (job.getJobID().equals(id)) { - return true; - } - } - return false; - } - - public void jobAdded(JobInProgress job) { - LOG.info("Job " + job.getJobID().toString() + " added"); - wjobs.add(job); - wjobsAdded.add(job.getJobID()); - } - - public void jobRemoved(JobInProgress job) { - LOG.info("Job " + job.getJobID().toString() + " removed"); - wjobs.remove(job); - rjobs.remove(job); - } - - public void jobUpdated(JobChangeEvent event) { - LOG.info("Job " + event.getJobInProgress().getJobID().toString() + " updated"); - // remove the job is the event is for a completed job - if (event instanceof JobStatusChangeEvent) { - JobStatusChangeEvent statusEvent = (JobStatusChangeEvent)event; - if (statusEvent.getEventType() == EventType.RUN_STATE_CHANGED) { - // check if the state changes from - // RUNNING->COMPLETE(SUCCESS/KILLED/FAILED) - JobInProgress jip = event.getJobInProgress(); - String jobId = jip.getJobID().toString(); - if (jip.isComplete()) { - LOG.info("Job " + jobId + " deleted from the running queue"); - if (statusEvent.getOldStatus().getRunState() == JobStatus.PREP) { - wjobs.remove(jip); - } else { - rjobs.remove(jip); - } - } else { - // PREP->RUNNING - LOG.info("Job " + jobId + " deleted from the waiting queue"); - wjobs.remove(jip); - rjobs.add(jip); - rjobsAdded.add(jip.getJobID()); - } - } - } - } - } - - private void testFailedJob(JobConf job, MyListener myListener) - throws IOException { - LOG.info("Testing job-fail"); - - Path inDir = new Path(TEST_ROOT_DIR + "/jiplistenerfailjob/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/jiplistenerfailjob/output"); - - job.setNumMapTasks(1); - job.setNumReduceTasks(0); - job.setMaxMapAttempts(1); - - // submit a job that fails - RunningJob rJob = UtilsForTests.runJobFail(job, inDir, outDir); - JobID id = rJob.getID(); - - // check if the job failure was notified - assertFalse("Missing event notification on failing a running job", - myListener.contains(id)); - - // check if failed - assertEquals("Job failed!", JobStatus.FAILED, rJob.getJobState()); - } - - private void testKilledJob(JobConf job, MyListener myListener) - throws IOException { - LOG.info("Testing job-kill"); - - Path inDir = new Path(TEST_ROOT_DIR + "/jiplistenerkilljob/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/jiplistenerkilljob/output"); - - job.setNumMapTasks(1); - job.setNumReduceTasks(0); - - // submit and kill the job - RunningJob rJob = UtilsForTests.runJobKill(job, inDir, outDir); - JobID id = rJob.getID(); - - // check if the job failure was notified - assertFalse("Missing event notification on killing a running job", - myListener.contains(id)); - - // check if killed - assertEquals("Job failed!", JobStatus.KILLED, rJob.getJobState()); - } - - private void testSuccessfulJob(JobConf job, MyListener myListener) - throws Exception { - LOG.info("Testing job-success"); - - Path inDir = new Path(TEST_ROOT_DIR + "/jiplistenerjob/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/jiplistenerjob/output"); - - job.setNumMapTasks(1); - job.setNumReduceTasks(0); - - // submit the job - RunningJob rJob = UtilsForTests.runJobSucceed(job, inDir, outDir); - - // wait for the job to be successful - rJob.waitForCompletion(); - - // check if the job success was notified - assertFalse("Missing event notification for a successful job", - myListener.contains(rJob.getID())); - - // check if successful - assertEquals("Job failed!", JobStatus.SUCCEEDED, rJob.getJobState()); - - // test if 0-task jobs with setup-cleanup works fine - LOG.info("Testing job with no task job with setup and cleanup"); - - job.setNumMapTasks(0); - job.setNumReduceTasks(0); - - outDir = new Path(TEST_ROOT_DIR + "/jiplistenerjob/output-no-tasks"); - - // submit the job - rJob = UtilsForTests.runJobSucceed(job, inDir, outDir); - - // wait for the job to be successful - rJob.waitForCompletion(); - - // check if the job success was notified - assertFalse("Missing event notification for a successful job with no tasks", - myListener.contains(rJob.getID(), true)); - - // check if successful - assertEquals("Job failed!", JobStatus.SUCCEEDED, rJob.getJobState()); - - // test if jobs with no tasks (0 maps, 0 red) update the listener properly - LOG.info("Testing job with no-set-cleanup no task"); - - outDir = new Path(TEST_ROOT_DIR + "/jiplistenerjob/output-no-tasks-no-set"); - - Job j = MapReduceTestUtil.createJob(mr.createJobConf(), inDir, outDir, 0, 0); - j.setJobSetupCleanupNeeded(false); - j.setOutputFormatClass(TestNoJobSetupCleanup.MyOutputFormat.class); - j.submit(); - j.waitForCompletion(true); - - JobID id = JobID.downgrade(j.getJobID()); - - // check if the job is in the waiting queue - assertFalse("Missing event notification on no-set-cleanup no task job", - myListener.contains(id, true)); - - // check if the job is successful - assertEquals("Job status doesnt reflect success", - JobStatus.SUCCEEDED, rJob.getJobState()); - } - - /** - * This scheduler never schedules any task as it doesnt init any task. So all - * the jobs are queued forever. - */ - public static class MyScheduler extends JobQueueTaskScheduler { - - @Override - public synchronized void start() throws IOException { - super.start(); - } - - void stopInitializer() throws IOException { - // Remove the eager task initializer - taskTrackerManager.removeJobInProgressListener( - eagerTaskInitializationListener); - // terminate it - eagerTaskInitializationListener.terminate(); - } - - void startInitializer() throws IOException { - eagerTaskInitializationListener = - new EagerTaskInitializationListener(getConf()); - eagerTaskInitializationListener.setTaskTrackerManager(taskTrackerManager); - // start it - eagerTaskInitializationListener.start(); - // add the eager task initializer - taskTrackerManager.addJobInProgressListener( - eagerTaskInitializationListener); - } - } - - private void testQueuedJobKill(JobConf conf, MyListener myListener) - throws IOException { - LOG.info("Testing queued-job-kill"); - - Path inDir = new Path(TEST_ROOT_DIR + "/jiplistenerqueuedjob/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/jiplistener1ueuedjob/output"); - - conf.setMapperClass(IdentityMapper.class); - conf.setReducerClass(IdentityReducer.class); - conf.setNumMapTasks(1); - conf.setNumReduceTasks(0); - RunningJob rJob = UtilsForTests.runJob(conf, inDir, outDir); - JobID id = rJob.getID(); - LOG.info("Job : " + id.toString() + " submitted"); - - // check if the job is in the waiting queue - assertTrue("Missing event notification on submiting a job", - myListener.contains(id, true)); - - // kill the job - LOG.info("Killing job : " + id.toString()); - rJob.killJob(); - - // check if the job is killed - assertEquals("Job status doesnt reflect the kill-job action", - JobStatus.KILLED, rJob.getJobState()); - - // check if the job is correctly moved - // from the waiting list - assertFalse("Missing event notification on killing a waiting job", - myListener.contains(id, true)); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java deleted file mode 100644 index 410207ee15e..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobKillAndFail.java +++ /dev/null @@ -1,195 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.BufferedReader; -import java.io.FileInputStream; -import java.io.File; -import java.io.InputStreamReader; -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.SleepJob; - -/** - * A JUnit test to test Kill Job & Fail Job functionality with local file - * system. - */ -public class TestJobKillAndFail extends TestCase { - - static final Log LOG = LogFactory.getLog(TestJobKillAndFail.class); - - private static String TEST_ROOT_DIR = new File(System.getProperty( - "test.build.data", "/tmp")).toURI().toString().replace(' ', '+'); - - /** - * TaskController instance that just sets a flag when a stack dump - * is performed in a child thread. - */ - static class MockStackDumpTaskController extends DefaultTaskController { - - static volatile int numStackDumps = 0; - - static final Log LOG = LogFactory.getLog(TestJobKillAndFail.class); - - public MockStackDumpTaskController() { - LOG.info("Instantiated MockStackDumpTC"); - } - - @Override - void dumpTaskStack(TaskControllerContext context) { - LOG.info("Got stack-dump request in TaskController"); - MockStackDumpTaskController.numStackDumps++; - super.dumpTaskStack(context); - } - - } - - /** If a task was killed, then dumpTaskStack() should have been - * called. Test whether or not the counter was incremented - * and succeed/fail based on this. */ - private void checkForStackDump(boolean expectDump, int lastNumDumps) { - int curNumDumps = MockStackDumpTaskController.numStackDumps; - - LOG.info("curNumDumps=" + curNumDumps + "; lastNumDumps=" + lastNumDumps - + "; expect=" + expectDump); - - if (expectDump) { - assertTrue("No stack dump recorded!", lastNumDumps < curNumDumps); - } else { - assertTrue("Stack dump happened anyway!", lastNumDumps == curNumDumps); - } - } - - public void testJobFailAndKill() throws Exception { - MiniMRCluster mr = null; - try { - JobConf jtConf = new JobConf(); - jtConf.set("mapred.jobtracker.instrumentation", - JTInstrumentation.class.getName()); - jtConf.set("mapreduce.tasktracker.taskcontroller", - MockStackDumpTaskController.class.getName()); - mr = new MiniMRCluster(2, "file:///", 3, null, null, jtConf); - JTInstrumentation instr = (JTInstrumentation) - mr.getJobTrackerRunner().getJobTracker().getInstrumentation(); - - // run the TCs - JobConf conf = mr.createJobConf(); - conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50); - - Path inDir = new Path(TEST_ROOT_DIR + "/failkilljob/input"); - Path outDir = new Path(TEST_ROOT_DIR + "/failkilljob/output"); - RunningJob runningJob = UtilsForTests.runJobFail(conf, inDir, outDir); - // Checking that the Job got failed - assertEquals(runningJob.getJobState(), JobStatus.FAILED); - assertTrue(instr.verifyJob()); - assertEquals(1, instr.failed); - instr.reset(); - - int prevNumDumps = MockStackDumpTaskController.numStackDumps; - runningJob = UtilsForTests.runJobKill(conf, inDir, outDir); - // Checking that the Job got killed - assertTrue(runningJob.isComplete()); - assertEquals(runningJob.getJobState(), JobStatus.KILLED); - assertTrue(instr.verifyJob()); - assertEquals(1, instr.killed); - // check that job kill does not put a stacktrace in task logs. - checkForStackDump(false, prevNumDumps); - - // Test that a task that times out does have a stack trace - conf = mr.createJobConf(); - conf.setInt(JobContext.TASK_TIMEOUT, 10000); - conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 50); - SleepJob sleepJob = new SleepJob(); - sleepJob.setConf(conf); - Job job = sleepJob.createJob(1, 0, 30000, 1,0, 0); - job.setMaxMapAttempts(1); - prevNumDumps = MockStackDumpTaskController.numStackDumps; - job.waitForCompletion(true); - checkForStackDump(true, prevNumDumps); - } finally { - if (mr != null) { - mr.shutdown(); - } - } - } - - static class JTInstrumentation extends JobTrackerInstrumentation { - volatile int failed; - volatile int killed; - volatile int addPrep; - volatile int decPrep; - volatile int addRunning; - volatile int decRunning; - - void reset() { - failed = 0; - killed = 0; - addPrep = 0; - decPrep = 0; - addRunning = 0; - decRunning = 0; - } - - boolean verifyJob() { - return addPrep==1 && decPrep==1 && addRunning==1 && decRunning==1; - } - - public JTInstrumentation(JobTracker jt, JobConf conf) { - super(jt, conf); - } - - public synchronized void addPrepJob(JobConf conf, JobID id) - { - addPrep++; - } - - public synchronized void decPrepJob(JobConf conf, JobID id) - { - decPrep++; - } - - public synchronized void addRunningJob(JobConf conf, JobID id) - { - addRunning++; - } - - public synchronized void decRunningJob(JobConf conf, JobID id) - { - decRunning++; - } - - public synchronized void failedJob(JobConf conf, JobID id) - { - failed++; - } - - public synchronized void killedJob(JobConf conf, JobID id) - { - killed++; - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java deleted file mode 100644 index d25a165f435..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueClient.java +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.apache.hadoop.mapred.QueueManagerTestUtils.QUEUES_CONFIG_FILE_PATH; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createDocument; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createSimpleDocumentWithAcls; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.miniMRCluster; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.deleteQueuesConfigFile; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.writeToFile; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; - -import java.io.StringWriter; -import java.util.ArrayList; -import java.util.List; - -import junit.framework.Assert; - -import org.apache.hadoop.mapreduce.QueueInfo; -import org.junit.After; -import org.junit.Test; -import org.w3c.dom.Document; - -public class TestJobQueueClient { - - @After - public void tearDown() throws Exception { - deleteQueuesConfigFile(); - } - - @Test - public void testQueueOrdering() throws Exception { - // create some sample queues in a hierarchy.. - JobQueueInfo[] roots = new JobQueueInfo[2]; - roots[0] = new JobQueueInfo("q1", "q1 scheduling info"); - roots[1] = new JobQueueInfo("q2", "q2 scheduling info"); - - List children = new ArrayList(); - children.add(new JobQueueInfo("q1:1", null)); - children.add(new JobQueueInfo("q1:2", null)); - roots[0].setChildren(children); - - // test dfs ordering - JobQueueClient client = new JobQueueClient(new JobConf()); - List allQueues = client.expandQueueList(roots); - assertEquals(4, allQueues.size()); - assertEquals("q1", allQueues.get(0).getQueueName()); - assertEquals("q1:1", allQueues.get(1).getQueueName()); - assertEquals("q1:2", allQueues.get(2).getQueueName()); - assertEquals("q2", allQueues.get(3).getQueueName()); - } - - @Test - public void testQueueInfoPrinting() throws Exception { - // create a test queue with children. - // create some sample queues in a hierarchy.. - JobQueueInfo root = new JobQueueInfo("q1", "q1 scheduling info"); - - List children = new ArrayList(); - children.add(new JobQueueInfo("q1:1", null)); - children.add(new JobQueueInfo("q1:2", null)); - root.setChildren(children); - - JobQueueClient client = new JobQueueClient(new JobConf()); - StringWriter writer = new StringWriter(); - client.printJobQueueInfo(root, writer); - - Assert.assertTrue(writer.toString().contains("Queue Name : q1")); - Assert.assertTrue(writer.toString().contains("Queue State : running")); - Assert.assertTrue(writer.toString().contains("Scheduling Info : q1 scheduling info")); - Assert.assertTrue(writer.toString().contains("Queue Name : q1:1")); - Assert.assertTrue(writer.toString().contains("Queue Name : q1:2")); - } - - @Test - public void testGetQueue() throws Exception { - - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocumentWithAcls(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - JobConf jobConf = new JobConf(); - String namenode = "file:///"; - miniMRCluster = new MiniMRCluster(0, namenode, 3, null, null, jobConf); - - JobClient jc = new JobClient(miniMRCluster.createJobConf()); - // test for existing queue - QueueInfo queueInfo = jc.getQueueInfo("q1"); - assertEquals("q1",queueInfo.getQueueName()); - // try getting a non-existing queue - queueInfo = jc.getQueueInfo("queue"); - assertNull(queueInfo); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java deleted file mode 100644 index ea2980c4244..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.mapreduce.QueueState; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; - -import junit.framework.TestCase; - -public class TestJobQueueInformation extends TestCase { - - private MiniMRCluster mrCluster; - private MiniDFSCluster dfsCluster; - private JobConf jc; - private static final String JOB_SCHEDULING_INFO = "TESTSCHEDULINGINFO"; - private static final Path TEST_DIR = - new Path(System.getProperty("test.build.data","/tmp"), - "job-queue-info-testing"); - private static final Path IN_DIR = new Path(TEST_DIR, "input"); - private static final Path SHARE_DIR = new Path(TEST_DIR, "share"); - private static final Path OUTPUT_DIR = new Path(TEST_DIR, "output"); - - static String getSignalFile() { - return (new Path(SHARE_DIR, "signal")).toString(); - } - - // configure a waiting job with 2 maps - private JobConf configureWaitingJob(JobConf conf) throws IOException { - - UtilsForTests.configureWaitingJobConf(conf, IN_DIR, OUTPUT_DIR, 2, 0, - "test-job-queue-info", getSignalFile(), getSignalFile()); - return conf; - } - - public static class TestTaskScheduler extends LimitTasksPerJobTaskScheduler { - - @Override - public synchronized List assignTasks(TaskTracker taskTracker) - throws IOException { - Collection jips = jobQueueJobInProgressListener - .getJobQueue(); - if (jips != null && !jips.isEmpty()) { - for (JobInProgress jip : jips) { - jip.setSchedulingInfo(JOB_SCHEDULING_INFO); - } - } - return super.assignTasks(taskTracker); - } - } - - @Override - protected void setUp() throws Exception { - super.setUp(); - final int taskTrackers = 4; - Configuration conf = new Configuration(); - dfsCluster = new MiniDFSCluster(conf, 4, true, null); - - jc = new JobConf(); - jc.setClass(JTConfig.JT_TASK_SCHEDULER, TestTaskScheduler.class, - TaskScheduler.class); - jc.setLong(JTConfig.JT_RUNNINGTASKS_PER_JOB, 10L); - mrCluster = new MiniMRCluster(0, 0, taskTrackers, dfsCluster - .getFileSystem().getUri().toString(), 1, null, null, null, jc); - } - - @Override - protected void tearDown() throws Exception { - super.tearDown(); - mrCluster.shutdown(); - dfsCluster.shutdown(); - } - - public void testJobQueues() throws Exception { - JobClient jc = new JobClient(mrCluster.createJobConf()); - String expectedQueueInfo = "Maximum Tasks Per Job :: 10"; - JobQueueInfo[] queueInfos = jc.getQueues(); - assertNotNull(queueInfos); - assertEquals(1, queueInfos.length); - assertEquals("default", queueInfos[0].getQueueName()); - assertEquals(QueueState.RUNNING.getStateName(), - queueInfos[0].getQueueState()); - JobConf conf = mrCluster.createJobConf(); - FileSystem fileSys = dfsCluster.getFileSystem(); - - // configure a waiting job - conf = configureWaitingJob(conf); - conf.setJobName("test-job-queue-info-test"); - - // clear the signal file if any - fileSys.delete(SHARE_DIR, true); - - RunningJob rJob = jc.submitJob(conf); - - while (rJob.getJobState() != JobStatus.RUNNING) { - UtilsForTests.waitFor(10); - } - - int numberOfJobs = 0; - - for (JobQueueInfo queueInfo : queueInfos) { - JobStatus[] jobStatusList = jc.getJobsFromQueue(queueInfo - .getQueueName()); - assertNotNull(queueInfo.getQueueName()); - assertNotNull(queueInfo.getSchedulingInfo()); - assertEquals(expectedQueueInfo, queueInfo.getSchedulingInfo()); - numberOfJobs += jobStatusList.length; - for (JobStatus status : jobStatusList) { - assertEquals(JOB_SCHEDULING_INFO, status.getSchedulingInfo()); - } - } - assertEquals(1, numberOfJobs); - - UtilsForTests.signalTasks(dfsCluster, fileSys, getSignalFile(), - getSignalFile(), 4); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java deleted file mode 100644 index dd297b3f5c0..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueTaskScheduler.java +++ /dev/null @@ -1,343 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import junit.framework.TestCase; - -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; -import org.apache.hadoop.mapreduce.split.JobSplit; - -public class TestJobQueueTaskScheduler extends TestCase { - - private static int jobCounter; - private static int taskCounter; - - static void resetCounters() { - jobCounter = 0; - taskCounter = 0; - } - - static class FakeJobInProgress extends JobInProgress { - - private FakeTaskTrackerManager taskTrackerManager; - - public FakeJobInProgress(JobConf jobConf, - FakeTaskTrackerManager taskTrackerManager, JobTracker jt) - throws IOException { - super(new JobID("test", ++jobCounter), jobConf, jt); - this.taskTrackerManager = taskTrackerManager; - this.startTime = System.currentTimeMillis(); - this.status = new JobStatus(getJobID(), 0f, 0f, JobStatus.PREP, - jobConf.getUser(), - jobConf.getJobName(), "", ""); - this.status.setJobPriority(JobPriority.NORMAL); - this.status.setStartTime(startTime); - } - - @Override - public synchronized void initTasks() throws IOException { - // do nothing - } - - @Override - public Task obtainNewLocalMapTask(TaskTrackerStatus tts, int clusterSize, - int ignored) - throws IOException { - return obtainNewMapTask(tts, clusterSize, ignored); - } - - @Override - public Task obtainNewNonLocalMapTask(TaskTrackerStatus tts, int clusterSize, - int ignored) - throws IOException { - return obtainNewMapTask(tts, clusterSize, ignored); - } - - @Override - public Task obtainNewMapTask(final TaskTrackerStatus tts, int clusterSize, - int ignored) throws IOException { - TaskAttemptID attemptId = getTaskAttemptID(TaskType.MAP); - Task task = new MapTask("", attemptId, 0, new JobSplit.TaskSplitIndex(), 1) { - @Override - public String toString() { - return String.format("%s on %s", getTaskID(), tts.getTrackerName()); - } - }; - taskTrackerManager.update(tts.getTrackerName(), task); - runningMapTasks++; - return task; - } - - @Override - public Task obtainNewReduceTask(final TaskTrackerStatus tts, - int clusterSize, int ignored) throws IOException { - TaskAttemptID attemptId = getTaskAttemptID(TaskType.REDUCE); - Task task = new ReduceTask("", attemptId, 0, 10, 1) { - @Override - public String toString() { - return String.format("%s on %s", getTaskID(), tts.getTrackerName()); - } - }; - taskTrackerManager.update(tts.getTrackerName(), task); - runningReduceTasks++; - return task; - } - - private TaskAttemptID getTaskAttemptID(TaskType type) { - JobID jobId = getJobID(); - return new TaskAttemptID(jobId.getJtIdentifier(), - jobId.getId(), type, ++taskCounter, 0); - } - } - - static class FakeTaskTrackerManager implements TaskTrackerManager { - - int maps = 0; - int reduces = 0; - int maxMapTasksPerTracker = 2; - int maxReduceTasksPerTracker = 2; - List listeners = - new ArrayList(); - QueueManager queueManager; - - private Map trackers = - new HashMap(); - - public FakeTaskTrackerManager() { - JobConf conf = new JobConf(); - queueManager = new QueueManager(conf); - - TaskTracker tt1 = new TaskTracker("tt1"); - tt1.setStatus(new TaskTrackerStatus("tt1", "tt1.host", 1, - new ArrayList(), 0, - maxMapTasksPerTracker, - maxReduceTasksPerTracker)); - trackers.put("tt1", tt1); - - TaskTracker tt2 = new TaskTracker("tt2"); - tt2.setStatus(new TaskTrackerStatus("tt2", "tt2.host", 2, - new ArrayList(), 0, - maxMapTasksPerTracker, - maxReduceTasksPerTracker)); - trackers.put("tt2", tt2); - } - - @Override - public ClusterStatus getClusterStatus() { - int numTrackers = trackers.size(); - return new ClusterStatus(numTrackers, 0, - 10 * 60 * 1000, - maps, reduces, - numTrackers * maxMapTasksPerTracker, - numTrackers * maxReduceTasksPerTracker, - JobTrackerStatus.RUNNING); - } - - @Override - public int getNumberOfUniqueHosts() { - return 0; - } - - @Override - public Collection taskTrackers() { - List statuses = new ArrayList(); - for (TaskTracker tt : trackers.values()) { - statuses.add(tt.getStatus()); - } - return statuses; - } - - - @Override - public void addJobInProgressListener(JobInProgressListener listener) { - listeners.add(listener); - } - - @Override - public void removeJobInProgressListener(JobInProgressListener listener) { - listeners.remove(listener); - } - - @Override - public QueueManager getQueueManager() { - return queueManager; - } - - @Override - public int getNextHeartbeatInterval() { - return JTConfig.JT_HEARTBEAT_INTERVAL_MIN_DEFAULT; - } - - @Override - public void killJob(JobID jobid) { - return; - } - - @Override - public JobInProgress getJob(JobID jobid) { - return null; - } - - @Override - public boolean killTask(TaskAttemptID attemptId, boolean shouldFail) { - return true; - } - - public void initJob(JobInProgress job) { - // do nothing - } - - public void failJob(JobInProgress job) { - // do nothing - } - - // Test methods - - public void submitJob(JobInProgress job) throws IOException { - for (JobInProgressListener listener : listeners) { - listener.jobAdded(job); - } - } - - public TaskTracker getTaskTracker(String trackerID) { - return trackers.get(trackerID); - } - - public void update(String taskTrackerName, final Task t) { - if (t.isMapTask()) { - maps++; - } else { - reduces++; - } - TaskStatus status = new TaskStatus() { - @Override - public boolean getIsMap() { - return t.isMapTask(); - } - - @Override - public void addFetchFailedMap(TaskAttemptID mapTaskId) { - - } - }; - status.setRunState(TaskStatus.State.RUNNING); - trackers.get(taskTrackerName).getStatus().getTaskReports().add(status); - } - - } - - protected JobConf jobConf; - protected TaskScheduler scheduler; - private FakeTaskTrackerManager taskTrackerManager; - - @Override - protected void setUp() throws Exception { - resetCounters(); - jobConf = new JobConf(); - jobConf.setNumMapTasks(10); - jobConf.setNumReduceTasks(10); - taskTrackerManager = new FakeTaskTrackerManager(); - scheduler = createTaskScheduler(); - scheduler.setConf(jobConf); - scheduler.setTaskTrackerManager(taskTrackerManager); - scheduler.start(); - } - - @Override - protected void tearDown() throws Exception { - if (scheduler != null) { - scheduler.terminate(); - } - } - - protected TaskScheduler createTaskScheduler() { - return new JobQueueTaskScheduler(); - } - - static void submitJobs(FakeTaskTrackerManager taskTrackerManager, JobConf jobConf, - int numJobs, int state) - throws IOException { - for (int i = 0; i < numJobs; i++) { - JobInProgress job = new FakeJobInProgress(jobConf, taskTrackerManager, - UtilsForTests.getJobTracker()); - job.getStatus().setRunState(state); - taskTrackerManager.submitJob(job); - } - } - - public void testTaskNotAssignedWhenNoJobsArePresent() throws IOException { - assertEquals(0, scheduler.assignTasks(tracker(taskTrackerManager, "tt1")).size()); - } - - public void testNonRunningJobsAreIgnored() throws IOException { - submitJobs(taskTrackerManager, jobConf, 1, JobStatus.PREP); - submitJobs(taskTrackerManager, jobConf, 1, JobStatus.SUCCEEDED); - submitJobs(taskTrackerManager, jobConf, 1, JobStatus.FAILED); - submitJobs(taskTrackerManager, jobConf, 1, JobStatus.KILLED); - assertEquals(0, scheduler.assignTasks(tracker(taskTrackerManager, "tt1")).size()); - } - - public void testDefaultTaskAssignment() throws IOException { - submitJobs(taskTrackerManager, jobConf, 2, JobStatus.RUNNING); - // All slots are filled with job 1 - checkAssignment(scheduler, tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_m_000001_0 on tt1", - "attempt_test_0001_m_000002_0 on tt1", - "attempt_test_0001_r_000003_0 on tt1"}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_r_000004_0 on tt1"}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt1"), new String[] {}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0001_m_000005_0 on tt2", - "attempt_test_0001_m_000006_0 on tt2", - "attempt_test_0001_r_000007_0 on tt2"}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0001_r_000008_0 on tt2"}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt2"), new String[] {}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt1"), new String[] {}); - checkAssignment(scheduler, tracker(taskTrackerManager, "tt2"), new String[] {}); - } - - static TaskTracker tracker(FakeTaskTrackerManager taskTrackerManager, - String taskTrackerName) { - return taskTrackerManager.getTaskTracker(taskTrackerName); - } - - static void checkAssignment(TaskScheduler scheduler, TaskTracker taskTracker, - String[] expectedTaskStrings) throws IOException { - List tasks = scheduler.assignTasks(taskTracker); - assertNotNull(tasks); - assertEquals(expectedTaskStrings.length, tasks.size()); - for (int i=0; i < expectedTaskStrings.length; ++i) { - assertEquals(expectedTaskStrings[i], tasks.get(i).toString()); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java deleted file mode 100644 index ac68a1577b0..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobRetire.java +++ /dev/null @@ -1,414 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.net.HttpURLConnection; -import java.net.URL; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.MiniMRCluster.TaskTrackerRunner; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.mapreduce.split.JobSplit; - -/** - * Test if the job retire works fine. - */ -public class TestJobRetire extends TestCase { - static final Log LOG = LogFactory.getLog(TestJobRetire.class); - static final Path testDir = - new Path(System.getProperty("test.build.data","/tmp"), - "job-expiry-testing"); - - private MiniMRCluster startCluster(JobConf conf, int numTrackers) - throws IOException { - conf.setBoolean(JTConfig.JT_RETIREJOBS, true); - conf.setLong(JTConfig.JT_RETIREJOB_CACHE_SIZE, 1); - return new MiniMRCluster(0, 0, numTrackers, "file:///", 1, null, null, null, - conf, 0); - } - - public void testJobRetire() throws Exception { - MiniMRCluster mr = null; - try { - JobConf conf = new JobConf(); - mr = startCluster(conf, 1); - - JobConf jobConf = mr.createJobConf(); - JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker(); - - Path inDir = new Path(testDir, "input1"); - Path outDir = new Path(testDir, "output1"); - - JobID id1 = validateJobRetire(jobConf, inDir, outDir, jobtracker); - - outDir = new Path(testDir, "output2"); - JobID id2 = validateJobRetire(jobConf, inDir, outDir, jobtracker); - - assertNull("Job not removed from cache", jobtracker.getJobStatus(id1)); - - assertEquals("Total job in cache not correct", - 1, jobtracker.getAllJobs().length); - } finally { - if (mr != null) { mr.shutdown();} - FileUtil.fullyDelete(new File(testDir.toString())); - } - } - - private JobID validateJobRetire(JobConf jobConf, Path inDir, Path outDir, - JobTracker jobtracker) throws IOException { - - RunningJob rj = UtilsForTests.runJob(jobConf, inDir, outDir, 0, 0); - rj.waitForCompletion(); - assertTrue(rj.isSuccessful()); - JobID id = rj.getID(); - - //wait for job to get retired - waitTillRetire(id, jobtracker); - - assertTrue("History url not set", rj.getHistoryUrl() != null && - rj.getHistoryUrl().length() > 0); - assertNotNull("Job is not in cache", jobtracker.getJobStatus(id)); - - // get the job conf filename - String name = jobtracker.getLocalJobFilePath(id); - File file = new File(name); - - assertFalse("JobConf file not deleted", file.exists()); - - // test redirections - final String JOBDETAILS = "jobdetails"; - final String JOBCONF = "jobconf"; - final String JOBTASKS = "jobtasks"; - final String TASKSTATS = "taskstats"; - final String TASKDETAILS = "taskdetails"; - - // test redirections of job related pages - String jobUrlStr = rj.getTrackingURL(); - URL jobUrl = new URL(jobUrlStr); - URL jobConfUrl = new URL(jobUrlStr.replace(JOBDETAILS, JOBCONF)); - URL jobTasksUrl = new URL(jobUrlStr.replace(JOBDETAILS, JOBTASKS) - + "&type=map&pagenum=1"); - verifyRedirection(jobConfUrl); - verifyRedirection(jobTasksUrl); - verifyRedirection(jobUrl); - - // test redirections of task and task attempt pages - String jobTrackerUrlStr = - jobUrlStr.substring(0, jobUrlStr.indexOf(JOBDETAILS)); - Path logFile = new Path(jobtracker.getJobHistory().getHistoryFilePath(id)); - JobHistoryParser.JobInfo jobInfo = - JSPUtil.getJobInfo(logFile, logFile.getFileSystem(jobConf), jobtracker); - for (TaskID tid : jobInfo.getAllTasks().keySet()) { - URL taskDetailsUrl = new URL(jobTrackerUrlStr + TASKDETAILS + - ".jsp?tipid=" + tid); - // test redirections of all tasks - verifyRedirection(taskDetailsUrl); - } - for (JobHistoryParser.TaskInfo task : jobInfo.getAllTasks().values()) { - for(org.apache.hadoop.mapreduce.TaskAttemptID attemptid : - task.getAllTaskAttempts().keySet()) { - URL taskstats = new URL(jobTrackerUrlStr + TASKSTATS + - ".jsp?attemptid=" + attemptid); - // test redirections of all task attempts - verifyRedirection(taskstats); - } - } - return id; - } - - private void verifyRedirection(URL url) throws IOException { - LOG.info("Verifying redirection of " + url); - HttpURLConnection conn = (HttpURLConnection) url.openConnection(); - conn.setInstanceFollowRedirects(false); - conn.connect(); - assertEquals(HttpURLConnection.HTTP_MOVED_TEMP, conn.getResponseCode()); - conn.disconnect(); - URL redirectedUrl = new URL(conn.getHeaderField("Location")); - conn = (HttpURLConnection) redirectedUrl.openConnection(); - conn.connect(); - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); - conn.disconnect(); - } - - // wait till the job retires - private void waitTillRetire(JobID id, JobTracker jobtracker) { - JobInProgress job = jobtracker.getJob(id); - //wait for job to get retired - for (int i = 0; i < 10 && job != null; i++) { - UtilsForTests.waitFor(1000); - job = jobtracker.getJob(id); - } - assertNull("Job did not retire", job); - } - - /** - * Custom TaskTracker which waits forever after a successful contact to - * the JobTracker. - */ - class WaitingTaskTracker extends TaskTracker { - - private volatile boolean alive = true; - - WaitingTaskTracker(JobConf conf) throws IOException, InterruptedException { - super(conf); - } - - @Override - HeartbeatResponse transmitHeartBeat(long now) throws IOException { - HeartbeatResponse response = super.transmitHeartBeat(now); - LOG.info("WaitingTaskTracker waiting"); - // wait forever - while (alive) { - UtilsForTests.waitFor(1000); - } - throw new IOException ("WaitingTaskTracker shutdown. Bailing out"); - } - - @Override - public synchronized void shutdown() throws IOException { - alive = false; - super.shutdown(); - } - } - - /** - * Test job retire with tasks that report their *first* status only after the - * job retires. - * Steps : - * - Start a mini-mr cluster with 1 task-tracker having only map slots. - * Note that this task-tracker will take care of setup/cleanup and map - * tasks. - * - Submit a job with 1 map task and 1 reduce task - * - Wait for the job to finish the map task - * - Start a 2nd tracker that waits for a long time after contacting the JT. - * - Wait for the 2nd tracker to get stuck - * - Kill the job - * - Wait for the job to retire - * - Check if the tip mappings are cleaned up. - */ - public void testJobRetireWithUnreportedTasks() throws Exception { - MiniMRCluster mr = null; - try { - JobConf conf = new JobConf(); - conf.setInt(TTConfig.TT_MAP_SLOTS, 1); - conf.setInt(TTConfig.TT_REDUCE_SLOTS, 0); - mr = startCluster(conf, 1); - JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker(); - - // submit a job - Path inDir = new Path(testDir, "in-1"); - Path outDir = new Path(testDir, "out-1"); - JobConf jConf = mr.createJobConf(); - FileInputFormat.setInputPaths(jConf, new Path[] {inDir}); - FileOutputFormat.setOutputPath(jConf, outDir); - SleepJob sleepJob = new SleepJob(); - sleepJob.setConf(jConf); - Job job = sleepJob.createJob(1, 1, 0, 1, 0, 1); - - job.submit(); - JobID id = JobID.downgrade(job.getStatus().getJobID()); - JobInProgress jip = jobtracker.getJob(id); - - // wait 100 secs for the map to complete - for (int i = 0; i < 100 && (jip.finishedMaps() < 1); i++) { - UtilsForTests.waitFor(1000); - } - assertEquals(jip.finishedMaps(), 1); - - // start a tracker that will wait - LOG.info("Adding a waiting tracker"); - TaskTrackerRunner testTrackerRunner = - mr.new TaskTrackerRunner(1, 1, null, mr.createJobConf()) { - @Override - TaskTracker createTaskTracker(JobConf conf) - throws IOException, InterruptedException { - return new WaitingTaskTracker(conf); - } - }; - mr.addTaskTracker(testTrackerRunner); - LOG.info("Waiting tracker added"); - - WaitingTaskTracker testTT = - (WaitingTaskTracker)testTrackerRunner.getTaskTracker(); - - // wait 100 secs for the newly started task-tracker to join - for (int i = 0; i < 1000 && (jobtracker.taskTrackers().size() < 2); i++) { - UtilsForTests.waitFor(100); - } - assertEquals(jobtracker.taskTrackers().size(), 2); - LOG.info("Cluster is now ready"); - - // stop the test-tt as its no longer required - mr.stopTaskTracker(mr.getTaskTrackerID(testTT.getName())); - - // check if a reduce task got scheduled or not - assertEquals("Waiting tracker joined but no reduce task got scheduled", - 1, jip.runningReduces()); - - // kill the job - job.killJob(); - - // check if the reduce task attempt status is missing - TaskInProgress tip = jip.getTasks(TaskType.REDUCE)[0]; - assertNull(tip.getTaskStatus(tip.getAllTaskAttemptIDs()[0])); - - // wait for the job to retire - waitTillRetire(id, jobtracker); - - // check the taskidToTIPMap - for (TaskAttemptID tid : jobtracker.taskidToTIPMap.keySet()) { - LOG.info("TaskidToTIP mapping left over : " + tid); - } - assertEquals("'taskid' to TIP mapping still exists", - 0, jobtracker.taskidToTIPMap.size()); - } finally { - if (mr != null) { mr.shutdown(); } - FileUtil.fullyDelete(new File(testDir.toString())); - } - } - - /** - * (Mock)Test JobTracker.removeJobTasks() which is called only when the job - * retires. - */ - public void testJobRemoval() throws Exception { - MiniMRCluster mr = null; - try { - JobConf conf = new JobConf(); - mr = startCluster(conf, 0); - JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker(); - - // test map task removal - testRemoveJobTasks(jobtracker, conf, TaskType.MAP); - // test reduce task removal - testRemoveJobTasks(jobtracker, conf, TaskType.REDUCE); - // test job setup removal - testRemoveJobTasks(jobtracker, conf, TaskType.JOB_SETUP); - // test job cleanup removal - testRemoveJobTasks(jobtracker, conf, TaskType.JOB_CLEANUP); - } finally { - if (mr != null) { mr.shutdown();} - // cleanup - FileUtil.fullyDelete(new File(testDir.toString())); - } - } - - // create a new job and add it to the jobtracker - private JobInProgress createAndAddJob(JobTracker jobtracker, JobConf conf) { - // submit a job in a fake manner - // get the new job-id - JobID id = - new JobID(jobtracker.getTrackerIdentifier(), jobtracker.jobs.size() + 1); - // create a JobInProgress for this fake job - JobInProgress jip = new JobInProgress(id, conf, jobtracker); - - // insert this fake completed job in the jobtracker - jobtracker.jobs.put(id, jip); - - return jip; - } - - // create a new TaskInProgress and make it running by adding it to jobtracker - private TaskInProgress createAndAddTIP(JobTracker jobtracker, - JobInProgress jip, TaskType type) { - JobConf conf = jip.getJobConf(); - JobID id = jip.getJobID(); - // now create a fake tip for this fake job - TaskInProgress tip = null; - if (type == TaskType.MAP) { - tip = new TaskInProgress(id, "dummy", JobSplit.EMPTY_TASK_SPLIT, - jobtracker, conf, jip, 0, 1); - jip.maps = new TaskInProgress[] {tip}; - } else if (type == TaskType.REDUCE) { - tip = new TaskInProgress(id, "dummy", jip.desiredMaps(), 0, - jobtracker, conf, jip, 1); - jip.reduces = new TaskInProgress[] {tip}; - } else if (type == TaskType.JOB_SETUP) { - tip = - new TaskInProgress(id, "dummy", JobSplit.EMPTY_TASK_SPLIT, - jobtracker, conf, jip, 0, 1); - jip.setup = new TaskInProgress[] {tip}; - } else if (type == TaskType.JOB_CLEANUP) { - tip = - new TaskInProgress(id, "dummy", JobSplit.EMPTY_TASK_SPLIT, - jobtracker, conf, jip, 0, 1); - jip.cleanup = new TaskInProgress[] {tip}; - } - return tip; - } - - // create a new Task for the given tip and make it running - private TaskAttemptID createAndAddAttempt(TaskInProgress tip, int attemptId) { - // create a fake attempt for this fake task - TaskAttemptID taskid = new TaskAttemptID(tip.getTIPId(), attemptId); - - // insert this fake task into the jobtracker by making it running - tip.addRunningTask(taskid, "test-tt"); - - return taskid; - } - - // Mock a job run such that the jobtracker is in a state similar to that - // resulting from an actual job run. - // Steps : - // - generate a new job-id - // - create and add a JobInProgress object using the fake job-id - // - create and add a fake tip of the passed type 't' under the fake job - // Note that t can be a MAP or a REDUCE or a JOB_SETUP or a JOB_CLEANUP. - // - create and add a fake attempt under the fake tip - // - remove the job from the jobtracker - // - check if the fake attempt is removed from the jobtracker - private void testRemoveJobTasks(JobTracker jobtracker, JobConf conf, - TaskType type) { - // create and submit a job - JobInProgress jip = createAndAddJob(jobtracker, conf); - // create and add a tip - TaskInProgress tip = createAndAddTIP(jobtracker, jip, type); - // create and add an attempt - TaskAttemptID taskid = createAndAddAttempt(tip, 0); - - // this fake attempt should not have any status - assertNull(tip.getTaskStatus(taskid)); - - // remove the job tasks for this fake job from the jobtracker - jobtracker.removeJobTasks(jip); - - // check the taskidToTIPMap - for (TaskAttemptID tid : jobtracker.taskidToTIPMap.keySet()) { - LOG.info("TaskidToTIP : " + tid); - } - - // check if the fake attempt is removed from the jobtracker - assertEquals("'taskid' to TIP mapping still exists", - 0, jobtracker.taskidToTIPMap.size()); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java deleted file mode 100644 index 9a51ae91cb4..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobStatusPersistency.java +++ /dev/null @@ -1,185 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.OutputStream; -import java.io.OutputStreamWriter; -import java.io.Writer; -import java.io.IOException; -import java.util.Properties; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -public class TestJobStatusPersistency extends ClusterMapReduceTestCase { - static final Path TEST_DIR = - new Path(System.getProperty("test.build.data","/tmp"), - "job-status-persistence"); - - @Override - protected void setUp() throws Exception { - // Don't start anything by default - } - - private JobID runJob() throws Exception { - OutputStream os = getFileSystem().create(new Path(getInputDir(), "text.txt")); - Writer wr = new OutputStreamWriter(os); - wr.write("hello1\n"); - wr.write("hello2\n"); - wr.write("hello3\n"); - wr.write("hello4\n"); - wr.close(); - - JobConf conf = createJobConf(); - conf.setJobName("mr"); - - conf.setInputFormat(TextInputFormat.class); - - conf.setMapOutputKeyClass(LongWritable.class); - conf.setMapOutputValueClass(Text.class); - - conf.setOutputFormat(TextOutputFormat.class); - conf.setOutputKeyClass(LongWritable.class); - conf.setOutputValueClass(Text.class); - - conf.setMapperClass(org.apache.hadoop.mapred.lib.IdentityMapper.class); - conf.setReducerClass(org.apache.hadoop.mapred.lib.IdentityReducer.class); - - FileInputFormat.setInputPaths(conf, getInputDir()); - - FileOutputFormat.setOutputPath(conf, getOutputDir()); - - return JobClient.runJob(conf).getID(); - } - - public void testNonPersistency() throws Exception { - startCluster(true, null); - JobID jobId = runJob(); - JobClient jc = new JobClient(createJobConf()); - RunningJob rj = jc.getJob(jobId); - assertNotNull(rj); - stopCluster(); - startCluster(false, null); - jc = new JobClient(createJobConf()); - rj = jc.getJob(jobId); - assertNull(rj); - } - - public void testPersistency() throws Exception { - Properties config = new Properties(); - config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS, "true"); - config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1"); - startCluster(true, config); - JobID jobId = runJob(); - JobClient jc = new JobClient(createJobConf()); - RunningJob rj0 = jc.getJob(jobId); - assertNotNull(rj0); - boolean sucessfull0 = rj0.isSuccessful(); - String jobName0 = rj0.getJobName(); - Counters counters0 = rj0.getCounters(); - TaskCompletionEvent[] events0 = rj0.getTaskCompletionEvents(0); - - stopCluster(); - startCluster(false, config); - - jc = new JobClient(createJobConf()); - RunningJob rj1 = jc.getJob(jobId); - assertNotNull(rj1); - assertEquals(sucessfull0, rj1.isSuccessful()); - assertEquals(jobName0, rj0.getJobName()); - assertEquals(counters0.size(), rj1.getCounters().size()); - - TaskCompletionEvent[] events1 = rj1.getTaskCompletionEvents(0); - assertEquals(events0.length, events1.length); - for (int i = 0; i < events0.length; i++) { - assertEquals(events0[i].getTaskAttemptId(), events1[i].getTaskAttemptId()); - assertEquals(events0[i].getTaskStatus(), events1[i].getTaskStatus()); - } - } - - /** - * Test if the completed job status is persisted to localfs. - */ - public void testLocalPersistency() throws Exception { - FileSystem fs = FileSystem.getLocal(new JobConf()); - - fs.delete(TEST_DIR, true); - - Properties config = new Properties(); - config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS, "true"); - config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1"); - config.setProperty(JTConfig.JT_PERSIST_JOBSTATUS_DIR, - fs.makeQualified(TEST_DIR).toString()); - startCluster(true, config); - JobID jobId = runJob(); - JobClient jc = new JobClient(createJobConf()); - RunningJob rj = jc.getJob(jobId); - assertNotNull(rj); - - // check if the local fs has the data - Path jobInfo = new Path(TEST_DIR, rj.getID() + ".info"); - assertTrue("Missing job info from the local fs", fs.exists(jobInfo)); - fs.delete(TEST_DIR, true); - } - - /** - * Verify that completed-job store is inactive if the jobinfo path is not - * writable. - * - * @throws Exception - */ - public void testJobStoreDisablingWithInvalidPath() throws Exception { - MiniMRCluster mr = null; - Path parent = new Path(TEST_DIR, "parent"); - try { - FileSystem fs = FileSystem.getLocal(new JobConf()); - - if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) { - fail("Cannot delete TEST_DIR!"); - } - - if (fs.mkdirs(new Path(TEST_DIR, parent))) { - if (!(new File(parent.toUri().getPath()).setWritable(false, false))) { - fail("Cannot chmod parent!"); - } - } else { - fail("Cannot create parent dir!"); - } - JobConf config = new JobConf(); - config.set(JTConfig.JT_PERSIST_JOBSTATUS, "true"); - config.set(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1"); - config.set(JTConfig.JT_PERSIST_JOBSTATUS_DIR, new Path(parent, - "child").toUri().getPath()); - boolean started = true; - JobConf conf = MiniMRCluster.configureJobConf(config, "file:///", 0, 0, null); - try { - JobTracker jt = JobTracker.startTracker(conf); - } catch (IOException ex) { - started = false; - } - assertFalse(started); - } finally { - new File(parent.toUri().getPath()).setWritable(true, false); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java deleted file mode 100644 index 0262060ea2b..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerInstrumentation.java +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTrackerMetricsInst; -import org.apache.hadoop.mapred.TestTaskTrackerBlacklisting.FakeJobTracker; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; - -@SuppressWarnings("deprecation") -public class TestJobTrackerInstrumentation extends TestCase { - - static String trackers[] = new String[] { - "tracker_tracker1:1000", - "tracker_tracker2:1000", - "tracker_tracker3:1000" }; - - static String hosts[] = new String[] { "tracker1", "tracker2", "tracker3" }; - // heartbeat responseId. increment this after sending a heartbeat - private static short responseId = 1; - - private static FakeJobTracker jobTracker; - private static FakeJobInProgress fakeJob; - - private static int mapSlotsPerTracker = 4; - private static int reduceSlotsPerTracker = 2; - - private static int numMapSlotsToReserve = 2; - private static int numReduceSlotsToReserve = 2; - - private static FakeJobTrackerMetricsInst mi; - - - - public static Test suite() { - TestSetup setup = - new TestSetup(new TestSuite(TestJobTrackerInstrumentation.class)) { - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setInt(JTConfig.JT_MAX_TRACKER_BLACKLISTS, 1); - conf.setClass(JTConfig.JT_TASK_SCHEDULER, - FakeTaskScheduler.class, TaskScheduler.class); - - conf.set(JTConfig.JT_INSTRUMENTATION, - FakeJobTrackerMetricsInst.class.getName()); - jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers); - mi = (FakeJobTrackerMetricsInst) jobTracker.getInstrumentation(); - for (String tracker : trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, tracker); - } - - } - protected void tearDown() throws Exception { - } - }; - return setup; - } - - private TaskTrackerStatus getTTStatus(String trackerName, - List taskStatuses) { - return new TaskTrackerStatus(trackerName, - JobInProgress.convertTrackerNameToHostName(trackerName), 0, - taskStatuses, 0, mapSlotsPerTracker, reduceSlotsPerTracker); - } - - public void testMetrics() throws Exception { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[3]; - - // create TaskTrackerStatus and send heartbeats - TaskTrackerStatus[] status = new TaskTrackerStatus[trackers.length]; - status[0] = getTTStatus(trackers[0], new ArrayList()); - status[1] = getTTStatus(trackers[1], new ArrayList()); - status[2] = getTTStatus(trackers[2], new ArrayList()); - for (int i = 0; i< trackers.length; i++) { - FakeObjectUtilities.sendHeartBeat(jobTracker, status[i], false, - false, trackers[i], responseId); - } - responseId++; - - assertEquals("Mismatch in number of trackers", - trackers.length, mi.numTrackers); - - int numMaps = 2; - int numReds = 1; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(false); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReds); - conf.setMaxTaskFailuresPerTracker(1); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - assertTrue(mi.numJobsPreparing == 1); - - job.setClusterSize(trackers.length); - job.initTasks(); - jobTracker.addJob(job.getJobID(), job); - - taskAttemptID[0] = job.findMapTask(trackers[0]); - taskAttemptID[1] = job.findMapTask(trackers[1]); - taskAttemptID[2] = job.findReduceTask(trackers[2]); - - job.finishTask(taskAttemptID[0]); - job.finishTask(taskAttemptID[1]); - job.finishTask(taskAttemptID[2]); - jobTracker.finalizeJob(job); - - assertTrue("Mismatch in map tasks launched", - mi.numMapTasksLaunched == numMaps); - assertTrue("Mismatch in map tasks completed", - mi.numMapTasksCompleted == numMaps); - assertTrue("Mismatch in map tasks failed", - mi.numMapTasksFailed == 0); - - assertTrue("Mismatch in reduce tasks launched", - mi.numReduceTasksLaunched == numReds); - assertTrue("Mismatch in reduce tasks completed", - mi.numReduceTasksCompleted == numReds); - assertTrue("Mismatch in reduce tasks failed", - mi.numReduceTasksFailed == 0); - - assertTrue("Mismatch in num Jobs submitted", - mi.numJobsSubmitted == 1); - - assertTrue("Mismatch in num map slots", - mi.numMapSlots == (mapSlotsPerTracker * trackers.length)); - assertTrue("Mismatch in num reduce slots", - mi.numReduceSlots == (reduceSlotsPerTracker * trackers.length)); - - assertTrue("No heartbeats were recorded, but at least one was sent.", - mi.numHeartbeats > 0); - } - - public void testBlackListing() throws IOException { - int numMaps, numReds; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(false); - conf.setMaxTaskFailuresPerTracker(1); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - TaskAttemptID[] taskAttemptID = new TaskAttemptID[3]; - - numMaps = 1; - numReds = 1; - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReds); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - - FakeJobInProgress job1 = new FakeJobInProgress(conf, jobTracker); - job1.setClusterSize(trackers.length); - job1.initTasks(); - jobTracker.addJob(job1.getJobID(), job1); - taskAttemptID[0] = job1.findMapTask(trackers[0]); - job1.failTask(taskAttemptID[0]); - taskAttemptID[1] = job1.findMapTask(trackers[1]); - job1.finishTask(taskAttemptID[1]); - taskAttemptID[2] = job1.findReduceTask(trackers[0]); - job1.failTask(taskAttemptID[2]); - taskAttemptID[2] = job1.findReduceTask(trackers[2]); - job1.finishTask(taskAttemptID[2]); - jobTracker.finalizeJob(job1); - - assertEquals("Mismatch in number of failed map tasks", - 1, mi.numMapTasksFailed); - assertEquals("Mismatch in number of failed reduce tasks", - 1, mi.numReduceTasksFailed); - - assertEquals("Mismatch in number of blacklisted trackers", - 1, mi.numTrackersBlackListed); - - assertEquals("Mismatch in blacklisted map slots", - mi.numBlackListedMapSlots, - (mapSlotsPerTracker * mi.numTrackersBlackListed)); - - assertEquals("Mismatch in blacklisted reduce slots", - mi.numBlackListedReduceSlots, - (reduceSlotsPerTracker * mi.numTrackersBlackListed)); - } - - public void testOccupiedSlotCounts() throws Exception { - - TaskTrackerStatus[] status = new TaskTrackerStatus[trackers.length]; - - List list = new ArrayList(); - - // create a map task status, which uses 2 slots. - int mapSlotsPerTask = 2; - TaskStatus ts = TaskStatus.createTaskStatus(true, - new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0), 0.0f, mapSlotsPerTask, - TaskStatus.State.RUNNING, "", "", trackers[0], - TaskStatus.Phase.MAP, null); - list.add(ts); - int mapSlotsPerTask1 = 1; - ts = TaskStatus.createTaskStatus(true, - new TaskAttemptID("jt", 1, TaskType.MAP, 0, 0), 0.0f, mapSlotsPerTask1, - TaskStatus.State.RUNNING, "", "", trackers[0], - TaskStatus.Phase.MAP, null); - list.add(ts); - - // create a reduce task status, which uses 3 slot. - int reduceSlotsPerTask = 3; - ts = TaskStatus.createTaskStatus(false, - new TaskAttemptID("jt", 1, TaskType.REDUCE, 0, 0), 0.0f, - reduceSlotsPerTask, - TaskStatus.State.RUNNING, "", "", trackers[0], - TaskStatus.Phase.REDUCE, null); - list.add(ts); - int reduceSlotsPerTask1 = 1; - ts = TaskStatus.createTaskStatus(false, - new TaskAttemptID("jt", 1, TaskType.REDUCE, 0, 0), 0.0f, - reduceSlotsPerTask1, - TaskStatus.State.RUNNING, "", "", trackers[0], - TaskStatus.Phase.REDUCE, null); - list.add(ts); - - // create TaskTrackerStatus and send heartbeats - status = new TaskTrackerStatus[trackers.length]; - status[0] = getTTStatus(trackers[0], list); - status[1] = getTTStatus(trackers[1], new ArrayList()); - status[2] = getTTStatus(trackers[2], new ArrayList()); - for (int i = 0; i< trackers.length; i++) { - FakeObjectUtilities.sendHeartBeat(jobTracker, status[i], false, - false, trackers[i], responseId); - } - responseId++; - - assertEquals("Mismatch in map slots occupied", - mapSlotsPerTask+mapSlotsPerTask1, mi.numOccupiedMapSlots); - assertEquals("Mismatch in reduce slots occupied", - reduceSlotsPerTask+reduceSlotsPerTask1, mi.numOccupiedReduceSlots); - assertEquals("Mismatch in num running maps", - 2, mi.numRunningMaps); - assertEquals("Mismatch in num running reduces", - 2, mi.numRunningReduces); - - //now send heartbeat with no running tasks - status = new TaskTrackerStatus[1]; - status[0] = getTTStatus(trackers[0], new ArrayList()); - FakeObjectUtilities.sendHeartBeat(jobTracker, status[0], false, - false, trackers[0], responseId); - - assertEquals("Mismatch in map slots occupied", - 0, mi.numOccupiedMapSlots); - assertEquals("Mismatch in reduce slots occupied", - 0, mi.numOccupiedReduceSlots); - assertEquals("Mismatch in num running maps", - 0, mi.numRunningMaps); - assertEquals("Mismatch in num running reduces", - 0, mi.numRunningReduces); - } - - public void testReservedSlots() throws IOException { - JobConf conf = new JobConf(); - conf.setNumMapTasks(1); - conf.setNumReduceTasks(1); - conf.setSpeculativeExecution(false); - - //Set task tracker objects for reservation. - TaskTracker tt2 = jobTracker.getTaskTracker(trackers[1]); - TaskTrackerStatus status2 = new TaskTrackerStatus( - trackers[1],JobInProgress.convertTrackerNameToHostName( - trackers[1]),0,new ArrayList(), 0, 2, 2); - tt2.setStatus(status2); - - fakeJob = new FakeJobInProgress(conf, jobTracker); - fakeJob.setClusterSize(3); - fakeJob.initTasks(); - - FakeObjectUtilities.sendHeartBeat(jobTracker, status2, false, - true, trackers[1], responseId); - responseId++; - - assertEquals("Mismtach in reserved map slots", - numMapSlotsToReserve, mi.numReservedMapSlots); - assertEquals("Mismtach in reserved red slots", - numReduceSlotsToReserve, mi.numReservedReduceSlots); - } - - public void testDecomissionedTrackers() throws IOException { - // create TaskTrackerStatus and send heartbeats - TaskTrackerStatus[] status = new TaskTrackerStatus[trackers.length]; - status[0] = getTTStatus(trackers[0], new ArrayList()); - status[1] = getTTStatus(trackers[1], new ArrayList()); - status[2] = getTTStatus(trackers[2], new ArrayList()); - for (int i = 0; i< trackers.length; i++) { - FakeObjectUtilities.sendHeartBeat(jobTracker, status[i], false, - false, trackers[i], responseId); - } - - assertEquals("Mismatch in number of trackers", - trackers.length, mi.numTrackers); - Set dHosts = new HashSet(); - dHosts.add(hosts[1]); - assertEquals("Mismatch in number of decommissioned trackers", - 0, mi.numTrackersDecommissioned); - jobTracker.decommissionNodes(dHosts); - assertEquals("Mismatch in number of decommissioned trackers", - 1, mi.numTrackersDecommissioned); - assertEquals("Mismatch in number of trackers", - trackers.length - 1, mi.numTrackers); - } - - public void testKillTasks() throws IOException { - int numMaps, numReds; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(false); - conf.setMaxTaskFailuresPerTracker(1); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - TaskAttemptID[] taskAttemptID = new TaskAttemptID[2]; - - numMaps = 1; - numReds = 1; - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReds); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - - assertEquals("Mismatch in number of killed map tasks", - 0, mi.numMapTasksKilled); - assertEquals("Mismatch in number of killed reduce tasks", - 0, mi.numReduceTasksKilled); - - FakeJobInProgress job1 = new FakeJobInProgress(conf, jobTracker); - job1.setClusterSize(trackers.length); - job1.initTasks(); - jobTracker.addJob(job1.getJobID(), job1); - taskAttemptID[0] = job1.findMapTask(trackers[0]); - job1.killTask(taskAttemptID[0]); - taskAttemptID[1] = job1.findReduceTask(trackers[0]); - job1.killTask(taskAttemptID[1]); - jobTracker.finalizeJob(job1); - - assertEquals("Mismatch in number of killed map tasks", - 1, mi.numMapTasksKilled); - assertEquals("Mismatch in number of killed reduce tasks", - 1, mi.numReduceTasksKilled); - } - - static class FakeTaskScheduler extends JobQueueTaskScheduler { - public FakeTaskScheduler() { - super(); - } - public List assignTasks(TaskTracker tt) { - tt.reserveSlots(TaskType.MAP, fakeJob, numMapSlotsToReserve); - tt.reserveSlots(TaskType.REDUCE, fakeJob, numReduceSlotsToReserve); - return new ArrayList(); - } - } - - static class FakeJobInProgress extends - org.apache.hadoop.mapred.TestTaskTrackerBlacklisting.FakeJobInProgress { - - FakeJobInProgress(JobConf jobConf, JobTracker tracker) throws IOException { - super(jobConf, tracker); - } - - @Override - public synchronized void initTasks() throws IOException { - super.initTasks(); - jobtracker.getInstrumentation().addWaitingMaps(getJobID(), - numMapTasks); - jobtracker.getInstrumentation().addWaitingReduces(getJobID(), - numReduceTasks); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java deleted file mode 100644 index 8f78074b504..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerStart.java +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; - -import junit.framework.TestCase; - -/** - * Test {@link JobTracker} w.r.t config parameters. - */ -public class TestJobTrackerStart extends TestCase { - - public void testJobTrackerStartConfig() throws Exception { - JobConf conf = new JobConf(); - conf = MiniMRCluster.configureJobConf(conf, "file:///", 0, 0, null); - - // test JobTracker's default clock - Clock c = JobTracker.getClock(); - assertNotNull(c); - assertEquals(c, JobTracker.DEFAULT_CLOCK); - - // test with default values - JobTracker jt = JobTracker.startTracker(conf); - c = JobTracker.getClock(); - // test clock - assertNotNull(c); - assertEquals(c, JobTracker.DEFAULT_CLOCK); - // test identifier - assertEquals(12, jt.getTrackerIdentifier().length()); // correct upto mins - jt.stopTracker(); - - // test with special clock - FakeClock myClock = new FakeClock(); - jt = JobTracker.startTracker(conf, myClock); - c = JobTracker.getClock(); - assertNotNull(c); - assertEquals(c, myClock); - jt.stopTracker(); - - // test with special identifier - String identifier = "test-identifier"; - jt = JobTracker.startTracker(conf, JobTracker.DEFAULT_CLOCK, identifier); - assertEquals(identifier, jt.getTrackerIdentifier()); - jt.stopTracker(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java deleted file mode 100644 index e1074c03376..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobTrackerXmlJsp.java +++ /dev/null @@ -1,55 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import javax.xml.parsers.DocumentBuilder; -import javax.xml.parsers.DocumentBuilderFactory; -import javax.xml.parsers.ParserConfigurationException; -import org.w3c.dom.Document; -import org.w3c.dom.NodeList; -import org.xml.sax.SAXException; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -public class TestJobTrackerXmlJsp extends ClusterMapReduceTestCase { - - private static final Log LOG = LogFactory.getLog(TestJobTrackerXmlJsp.class); - - /** - * Read the jobtracker.jspx status page and validate that the XML is well formed. - */ - public void testXmlWellFormed() throws IOException, ParserConfigurationException, SAXException { - MiniMRCluster cluster = getMRCluster(); - int infoPort = cluster.getJobTrackerRunner().getJobTrackerInfoPort(); - - String xmlJspUrl = "http://localhost:" + infoPort + "/jobtracker.jspx"; - LOG.info("Retrieving XML from URL: " + xmlJspUrl); - - DocumentBuilder parser = DocumentBuilderFactory.newInstance().newDocumentBuilder(); - Document doc = parser.parse(xmlJspUrl); - - // If we get here, then the document was successfully parsed by SAX and is well-formed. - LOG.info("Document received and parsed."); - - // Make sure it has a element as top-level. - NodeList clusterNodes = doc.getElementsByTagName("cluster"); - assertEquals("There should be exactly 1 element", 1, clusterNodes.getLength()); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java deleted file mode 100644 index 29faa5dc0e9..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmManager.java +++ /dev/null @@ -1,327 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileOutputStream; -import java.io.FileReader; -import java.io.IOException; -import java.util.HashMap; -import java.util.Vector; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.Executors; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.mapred.JvmManager.JvmManagerForType; -import org.apache.hadoop.mapred.JvmManager.JvmManagerForType.JvmRunner; -import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.junit.After; -import static org.junit.Assert.*; -import org.junit.Before; -import org.junit.Test; - -public class TestJvmManager { - static final Log LOG = LogFactory.getLog(TestJvmManager.class); - - private static File TEST_DIR = new File(System.getProperty("test.build.data", - "/tmp"), TestJvmManager.class.getSimpleName()); - private static int MAP_SLOTS = 10; - private static int REDUCE_SLOTS = 10; - private TaskTracker tt; - private JvmManager jvmManager; - private JobConf ttConf; - - @Before - public void setUp() { - TEST_DIR.mkdirs(); - } - - @After - public void tearDown() { - FileUtil.fullyDelete(TEST_DIR); - } - - public TestJvmManager() throws Exception { - tt = new TaskTracker(); - ttConf = new JobConf(); - ttConf.setLong(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL, 2000); - tt.setConf(ttConf); - tt.setMaxMapSlots(MAP_SLOTS); - tt.setMaxReduceSlots(REDUCE_SLOTS); - tt.setTaskController(new DefaultTaskController()); - jvmManager = new JvmManager(tt); - tt.setJvmManagerInstance(jvmManager); - } - - // write a shell script to execute the command. - private File writeScript(String fileName, String cmd, File pidFile) throws IOException { - File script = new File(TEST_DIR, fileName); - FileOutputStream out = new FileOutputStream(script); - // write pid into a file - out.write(("echo $$ >" + pidFile.toString() + ";").getBytes()); - // ignore SIGTERM - out.write(("trap '' 15\n").getBytes()); - // write the actual command it self. - out.write(cmd.getBytes()); - out.close(); - script.setExecutable(true); - return script; - } - - /** - * Tests the jvm kill from JvmRunner and JvmManager simultaneously. - * - * Starts a process, which sleeps for 60 seconds, in a thread. - * Calls JvmRunner.kill() in a thread. - * Also calls JvmManager.taskKilled(). - * Makes sure that the jvm is killed and JvmManager could launch another task - * properly. - * @throws Exception - */ - @Test - public void testJvmKill() throws Exception { - JvmManagerForType mapJvmManager = jvmManager - .getJvmManagerForType(TaskType.MAP); - // launch a jvm - JobConf taskConf = new JobConf(ttConf); - TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, 0, 0); - MapTask task = new MapTask(null, attemptID, 0, null, 1); - task.setConf(taskConf); - TaskInProgress tip = tt.new TaskInProgress(task, taskConf); - File pidFile = new File(TEST_DIR, "pid"); - final TaskRunner taskRunner = new MapTaskRunner(tip, tt, taskConf); - // launch a jvm which sleeps for 60 seconds - final Vector vargs = new Vector(2); - vargs.add(writeScript("SLEEP", "sleep 60\n", pidFile).getAbsolutePath()); - final File workDir = new File(TEST_DIR, "work"); - workDir.mkdir(); - final File stdout = new File(TEST_DIR, "stdout"); - final File stderr = new File(TEST_DIR, "stderr"); - - // launch the process and wait in a thread, till it finishes - Thread launcher = new Thread() { - public void run() { - try { - taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100, - workDir, null); - } catch (InterruptedException e) { - e.printStackTrace(); - return; - } - } - }; - launcher.start(); - // wait till the jvm is launched - // this loop waits for at most 1 second - for (int i = 0; i < 10; i++) { - if (pidFile.exists()) { - break; - } - UtilsForTests.waitFor(100); - } - // assert that the process is launched - assertTrue("pidFile is not present", pidFile.exists()); - - // imitate Child code. - // set pid in jvmManager - BufferedReader in = new BufferedReader(new FileReader(pidFile)); - String pid = in.readLine(); - in.close(); - JVMId jvmid = mapJvmManager.runningTaskToJvm.get(taskRunner); - jvmManager.setPidToJvm(jvmid, pid); - - // kill JvmRunner - final JvmRunner jvmRunner = mapJvmManager.jvmIdToRunner.get(jvmid); - Thread killer = new Thread() { - public void run() { - jvmRunner.kill(); - } - }; - killer.start(); - - //wait for a while so that killer thread is started. - Thread.sleep(100); - - // kill the jvm externally - taskRunner.kill(); - - assertTrue(jvmRunner.killed); - - // launch another jvm and see it finishes properly - attemptID = new TaskAttemptID("test", 0, TaskType.MAP, 0, 1); - task = new MapTask(null, attemptID, 0, null, 1); - task.setConf(taskConf); - tip = tt.new TaskInProgress(task, taskConf); - TaskRunner taskRunner2 = new MapTaskRunner(tip, tt, taskConf); - // build dummy vargs to call ls - Vector vargs2 = new Vector(1); - vargs2.add(writeScript("LS", "ls", pidFile).getAbsolutePath()); - File workDir2 = new File(TEST_DIR, "work2"); - workDir.mkdir(); - File stdout2 = new File(TEST_DIR, "stdout2"); - File stderr2 = new File(TEST_DIR, "stderr2"); - taskRunner2.launchJvmAndWait(null, vargs2, stdout2, stderr2, 100, workDir2, - null); - // join all the threads - killer.join(); - jvmRunner.join(); - launcher.join(); - } - - - /** - * Create a bunch of tasks and use a special hash map to detect - * racy access to the various internal data structures of JvmManager. - * (Regression test for MAPREDUCE-2224) - */ - @Test - public void testForRaces() throws Exception { - JvmManagerForType mapJvmManager = jvmManager - .getJvmManagerForType(TaskType.MAP); - - // Sub out the HashMaps for maps that will detect racy access. - mapJvmManager.jvmToRunningTask = new RaceHashMap(); - mapJvmManager.runningTaskToJvm = new RaceHashMap(); - mapJvmManager.jvmIdToRunner = new RaceHashMap(); - - // Launch a bunch of JVMs, but only allow MAP_SLOTS to run at once. - final ExecutorService exec = Executors.newFixedThreadPool(MAP_SLOTS); - final AtomicReference failed = - new AtomicReference(); - - for (int i = 0; i < MAP_SLOTS*5; i++) { - JobConf taskConf = new JobConf(ttConf); - TaskAttemptID attemptID = new TaskAttemptID("test", 0, TaskType.MAP, i, 0); - Task task = new MapTask(null, attemptID, i, null, 1); - task.setConf(taskConf); - TaskInProgress tip = tt.new TaskInProgress(task, taskConf); - File pidFile = new File(TEST_DIR, "pid_" + i); - final TaskRunner taskRunner = new MapTaskRunner(tip, tt, taskConf); - // launch a jvm which sleeps for 60 seconds - final Vector vargs = new Vector(2); - vargs.add(writeScript("script_" + i, "echo hi\n", pidFile).getAbsolutePath()); - final File workDir = new File(TEST_DIR, "work_" + i); - workDir.mkdir(); - final File stdout = new File(TEST_DIR, "stdout_" + i); - final File stderr = new File(TEST_DIR, "stderr_" + i); - - // launch the process and wait in a thread, till it finishes - Runnable launcher = new Runnable() { - public void run() { - try { - taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100, - workDir, null); - } catch (Throwable t) { - failed.compareAndSet(null, t); - exec.shutdownNow(); - return; - } - } - }; - exec.submit(launcher); - } - - exec.shutdown(); - exec.awaitTermination(3, TimeUnit.MINUTES); - if (failed.get() != null) { - throw new RuntimeException(failed.get()); - } - } - - /** - * HashMap which detects racy usage by sleeping during operations - * and checking that no other threads access the map while asleep. - */ - static class RaceHashMap extends HashMap { - Object syncData = new Object(); - RuntimeException userStack = null; - boolean raced = false; - - private void checkInUse() { - synchronized (syncData) { - RuntimeException thisStack = new RuntimeException(Thread.currentThread().toString()); - - if (userStack != null && raced == false) { - RuntimeException other = userStack; - raced = true; - LOG.fatal("Race between two threads."); - LOG.fatal("First", thisStack); - LOG.fatal("Second", other); - throw new RuntimeException("Raced"); - } else { - userStack = thisStack; - } - } - } - - private void sleepABit() { - try { - Thread.sleep(60); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - } - } - - private void done() { - synchronized (syncData) { - userStack = null; - } - } - - @Override - public V get(Object key) { - checkInUse(); - try { - sleepABit(); - return super.get(key); - } finally { - done(); - } - } - - @Override - public boolean containsKey(Object key) { - checkInUse(); - try { - sleepABit(); - return super.containsKey(key); - } finally { - done(); - } - } - - @Override - public V put(K key, V val) { - checkInUse(); - try { - sleepABit(); - return super.put(key, val); - } finally { - done(); - } - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java deleted file mode 100644 index 8e279df11da..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJvmReuse.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.mapreduce.TaskType; -import static org.junit.Assert.*; -import org.junit.Test; - -public class TestJvmReuse { - private static Path rootDir = new Path(System.getProperty("test.build.data", - "/tmp"), TestJvmReuse.class.getName()); - private int numMappers = 5; - private static int taskWithCleanup = 2; // third task - - /** - * A mapper class in which all attempts log taskid. Zeroth attempt of task - * with id=taskWithCleanup, fails with System.exit to force a cleanup attempt - * for the task in a new jvm. - */ - public static class MapperClass extends MapReduceBase implements - Mapper { - String taskid; - static int instances = 0; - Reporter reporter = null; - - public void configure(JobConf job) { - taskid = job.get("mapred.task.id"); - } - - public void map(LongWritable key, Text value, - OutputCollector output, Reporter reporter) - throws IOException { - System.err.println(taskid); - this.reporter = reporter; - - if (TaskAttemptID.forName(taskid).getTaskID().getId() == taskWithCleanup) { - if (taskid.endsWith("_0")) { - System.exit(-1); - } - } - } - - public void close() throws IOException { - reporter.incrCounter("jvm", "use", ++instances); - } - } - - public RunningJob launchJob(JobConf conf, Path inDir, Path outDir) - throws IOException { - // set up the input file system and write input text. - FileSystem inFs = inDir.getFileSystem(conf); - FileSystem outFs = outDir.getFileSystem(conf); - outFs.delete(outDir, true); - if (!inFs.mkdirs(inDir)) { - throw new IOException("Mkdirs failed to create " + inDir.toString()); - } - for (int i = 0; i < numMappers; i++) { - // write input into input file - DataOutputStream file = inFs.create(new Path(inDir, "part-" + i)); - file.writeBytes("input"); - file.close(); - } - - // configure the mapred Job - conf.setMapperClass(MapperClass.class); - conf.setNumReduceTasks(0); - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - // enable jvm reuse - conf.setNumTasksToExecutePerJvm(-1); - // return the RunningJob handle. - return new JobClient(conf).submitJob(conf); - } - - private void validateAttempt(TaskInProgress tip, TaskAttemptID attemptId, - TaskStatus ts, boolean isCleanup) throws IOException { - assertEquals(isCleanup, tip.isCleanupAttempt(attemptId)); - // validate tasklogs for task attempt - String log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR, - attemptId, false); - assertTrue(log.equals(attemptId.toString())); - assertTrue(ts != null); - if (!isCleanup) { - assertEquals(TaskStatus.State.SUCCEEDED, ts.getRunState()); - } else { - assertEquals(TaskStatus.State.FAILED, ts.getRunState()); - // validate tasklogs for cleanup attempt - log = MapReduceTestUtil.readTaskLog(TaskLog.LogName.STDERR, attemptId, - true); - assertTrue(log.equals(TestTaskFail.cleanupLog)); - } - } - - // validates logs of all attempts of the job. - private void validateJob(RunningJob job, MiniMRCluster mr) throws IOException { - assertEquals(JobStatus.SUCCEEDED, job.getJobState()); - long uses = job.getCounters().findCounter("jvm", "use").getValue(); - assertTrue("maps = " + numMappers + ", jvms = " + uses, numMappers < uses); - - JobID jobId = job.getID(); - - for (int i = 0; i < numMappers; i++) { - TaskAttemptID attemptId = new TaskAttemptID(new TaskID(jobId, - TaskType.MAP, i), 0); - TaskInProgress tip = mr.getJobTrackerRunner().getJobTracker().getTip( - attemptId.getTaskID()); - TaskStatus ts = mr.getJobTrackerRunner().getJobTracker().getTaskStatus( - attemptId); - validateAttempt(tip, attemptId, ts, i == taskWithCleanup); - if (i == taskWithCleanup) { - // validate second attempt of the task - attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, i), 1); - ts = mr.getJobTrackerRunner().getJobTracker().getTaskStatus(attemptId); - validateAttempt(tip, attemptId, ts, false); - } - } - } - - /** - * Runs job with jvm reuse and verifies that the logs for all attempts can be - * read properly. - * - * @throws IOException - */ - @Test - public void testTaskLogs() throws IOException { - MiniMRCluster mr = null; - try { - Configuration conf = new Configuration(); - final int taskTrackers = 1; // taskTrackers should be 1 to test jvm reuse. - conf.setInt("mapred.tasktracker.map.tasks.maximum", 1); - mr = new MiniMRCluster(taskTrackers, "file:///", 1); - - final Path inDir = new Path(rootDir, "input"); - final Path outDir = new Path(rootDir, "output"); - JobConf jobConf = mr.createJobConf(); - jobConf.setOutputCommitter(TestTaskFail.CommitterWithLogs.class); - RunningJob rJob = launchJob(jobConf, inDir, outDir); - rJob.waitForCompletion(); - validateJob(rJob, mr); - } finally { - if (mr != null) { - mr.shutdown(); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java deleted file mode 100644 index 8ce2283e7cd..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillCompletedJob.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker; -import org.apache.hadoop.mapred.TestRackAwareTaskPlacement.MyFakeJobInProgress; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -/** - * A JUnit test to test that killing completed jobs does not move them - * to the failed sate - See JIRA HADOOP-2132 - */ -public class TestKillCompletedJob extends TestCase { - - MyFakeJobInProgress job; - static FakeJobTracker jobTracker; - - static FakeClock clock; - - static String trackers[] = new String[] {"tracker_tracker1:1000"}; - - @Override - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setLong(JTConfig.JT_TRACKER_EXPIRY_INTERVAL, 1000); - jobTracker = new FakeJobTracker(conf, (clock = new FakeClock()), trackers); - } - - - @SuppressWarnings("deprecation") - public void testKillCompletedJob() throws IOException, InterruptedException { - job = new MyFakeJobInProgress(new JobConf(), jobTracker); - jobTracker.addJob(job.getJobID(), (JobInProgress)job); - job.status.setRunState(JobStatus.SUCCEEDED); - - jobTracker.killJob(job.getJobID()); - - assertTrue("Run state changed when killing completed job" , - job.status.getRunState() == JobStatus.SUCCEEDED); - - } - -} - diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java deleted file mode 100644 index 00cf61cb54b..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestKillSubProcesses.java +++ /dev/null @@ -1,542 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; -import java.util.Random; -import java.util.Iterator; -import java.util.StringTokenizer; - -import junit.framework.TestCase; - -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.mapreduce.util.TestProcfsBasedProcessTree; - -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.ProcessTree; -import org.apache.hadoop.util.Shell; -import org.apache.hadoop.util.Shell.ExitCodeException; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -/** - * A JUnit test to test Kill Job that has tasks with children and checks if the - * children(subprocesses of java task) are also killed when a task is killed. - */ -public class TestKillSubProcesses extends TestCase { - - private static volatile Log LOG = LogFactory - .getLog(TestKillSubProcesses.class); - - private static String BASE_TEST_ROOT_DIR = new File(System.getProperty( - "test.build.data", "/tmp")).getAbsolutePath(); - private static String TEST_ROOT_DIR = BASE_TEST_ROOT_DIR + Path.SEPARATOR - + "killSubProcesses"; - - private static Path scriptDir = new Path(TEST_ROOT_DIR, "script"); - private static String scriptDirName = scriptDir.toUri().getPath(); - private static Path signalFile = new Path(TEST_ROOT_DIR - + "/script/signalFile"); - - private static JobClient jobClient = null; - - static MiniMRCluster mr = null; - - private static String pid = null; - - // number of levels in the subtree of subprocesses of map task - private static int numLevelsOfSubProcesses = 4; - - /** - * Runs a job, kills the job and verifies if the map task and its - * subprocesses are also killed properly or not. - */ - private static void runKillingJobAndValidate(JobTracker jt, JobConf conf) throws IOException { - - conf.setJobName("testkilljobsubprocesses"); - conf.setMapperClass(KillingMapperWithChildren.class); - - RunningJob job = runJobAndSetProcessHandle(jt, conf); - - // kill the job now - job.killJob(); - - while (job.cleanupProgress() == 0.0f) { - try { - Thread.sleep(100); - } catch (InterruptedException ie) { - LOG.warn("sleep is interrupted:" + ie); - break; - } - } - - validateKillingSubprocesses(job, conf); - // Checking the Job status - assertEquals(job.getJobState(), JobStatus.KILLED); - } - - /** - * Runs a job that will fail and verifies if the subprocesses of failed map - * task are killed properly or not. - */ - private static void runFailingJobAndValidate(JobTracker jt, JobConf conf) throws IOException { - - conf.setJobName("testfailjobsubprocesses"); - conf.setMapperClass(FailingMapperWithChildren.class); - - // We don't want to run the failing map task 4 times. So we run it once and - // check if all the subprocesses are killed properly. - conf.setMaxMapAttempts(1); - - RunningJob job = runJobAndSetProcessHandle(jt, conf); - signalTask(signalFile.toString(), conf); - validateKillingSubprocesses(job, conf); - // Checking the Job status - assertEquals(job.getJobState(), JobStatus.FAILED); - } - - /** - * Runs a job that will succeed and verifies if the subprocesses of succeeded - * map task are killed properly or not. - */ - private static void runSuccessfulJobAndValidate(JobTracker jt, JobConf conf) - throws IOException { - - conf.setJobName("testsucceedjobsubprocesses"); - conf.setMapperClass(MapperWithChildren.class); - - RunningJob job = runJobAndSetProcessHandle(jt, conf); - signalTask(signalFile.toString(), conf); - validateKillingSubprocesses(job, conf); - // Checking the Job status - assertEquals(job.getJobState(), JobStatus.SUCCEEDED); - } - - /** - * Runs the given job and saves the pid of map task. - * Also checks if the subprocesses of map task are alive. - */ - private static RunningJob runJobAndSetProcessHandle(JobTracker jt, JobConf conf) - throws IOException { - RunningJob job = runJob(conf); - while (job.getJobState() != JobStatus.RUNNING) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - - pid = null; - jobClient = new JobClient(conf); - - // get the taskAttemptID of the map task and use it to get the pid - // of map task - TaskReport[] mapReports = jobClient.getMapTaskReports(job.getID()); - - JobInProgress jip = jt.getJob(job.getID()); - for(TaskReport tr : mapReports) { - TaskInProgress tip = jip.getTaskInProgress(tr.getTaskID()); - - // for this tip, get active tasks of all attempts - while(tip.getActiveTasks().size() == 0) { - //wait till the activeTasks Tree is built - try { - Thread.sleep(500); - } catch (InterruptedException ie) { - LOG.warn("sleep is interrupted:" + ie); - break; - } - } - - for (Iterator it = - tip.getActiveTasks().keySet().iterator(); it.hasNext();) { - TaskAttemptID id = it.next(); - LOG.info("taskAttemptID of map task is " + id); - - while(pid == null) { - pid = mr.getTaskTrackerRunner(0).getTaskTracker().getPid(id); - if (pid == null) { - try { - Thread.sleep(500); - } catch(InterruptedException e) {} - } - } - LOG.info("pid of map task is " + pid); - //Checking if the map task is alive - assertTrue("Map is no more alive", isAlive(pid)); - LOG.info("The map task is alive before Job completion, as expected."); - } - } - - // Checking if the descendant processes of map task are alive - if(ProcessTree.isSetsidAvailable) { - String childPid = TestProcfsBasedProcessTree.getPidFromPidFile( - scriptDirName + "/childPidFile" + 0); - while(childPid == null) { - LOG.warn(scriptDirName + "/childPidFile" + 0 + " is null; Sleeping..."); - try { - Thread.sleep(500); - } catch (InterruptedException ie) { - LOG.warn("sleep is interrupted:" + ie); - break; - } - childPid = TestProcfsBasedProcessTree.getPidFromPidFile( - scriptDirName + "/childPidFile" + 0); - } - - // As childPidFile0(leaf process in the subtree of processes with - // map task as root) is created, all other child pid files should - // have been created already(See the script for details). - // Now check if the descendants of map task are alive. - for(int i=0; i <= numLevelsOfSubProcesses; i++) { - childPid = TestProcfsBasedProcessTree.getPidFromPidFile( - scriptDirName + "/childPidFile" + i); - LOG.info("pid of the descendant process at level " + i + - "in the subtree of processes(with the map task as the root)" + - " is " + childPid); - assertTrue("Unexpected: The subprocess at level " + i + - " in the subtree is not alive before Job completion", - isAlive(childPid)); - } - } - return job; - } - - /** - * Verifies if the subprocesses of the map task are killed properly. - */ - private static void validateKillingSubprocesses(RunningJob job, JobConf conf) - throws IOException { - // wait till the the job finishes - while (!job.isComplete()) { - try { - Thread.sleep(500); - } catch (InterruptedException e) { - break; - } - } - - // Checking if the map task got killed or not - assertTrue(!ProcessTree.isAlive(pid)); - LOG.info("The map task is not alive after Job is completed, as expected."); - - // Checking if the descendant processes of map task are killed properly - if(ProcessTree.isSetsidAvailable) { - for(int i=0; i <= numLevelsOfSubProcesses; i++) { - String childPid = TestProcfsBasedProcessTree.getPidFromPidFile( - scriptDirName + "/childPidFile" + i); - LOG.info("pid of the descendant process at level " + i + - "in the subtree of processes(with the map task as the root)" + - " is " + childPid); - assertTrue("Unexpected: The subprocess at level " + i + - " in the subtree is alive after Job completion", - !isAlive(childPid)); - } - } - FileSystem fs = FileSystem.getLocal(mr.createJobConf()); - if(fs.exists(scriptDir)) { - fs.delete(scriptDir, true); - } - } - - private static RunningJob runJob(JobConf conf) throws IOException { - - final Path inDir; - final Path outDir; - FileSystem fs = FileSystem.getLocal(conf); - FileSystem tempFs = FileSystem.get(conf); - //Check if test is run with hdfs or local file system. - //if local filesystem then prepend TEST_ROOT_DIR, otherwise - //killjob folder would be created in workspace root. - if (!tempFs.getUri().toASCIIString().equals( - fs.getUri().toASCIIString())) { - inDir = new Path("killjob/input"); - outDir = new Path("killjob/output"); - } else { - inDir = new Path(TEST_ROOT_DIR, "input"); - outDir = new Path(TEST_ROOT_DIR, "output"); - } - - - if(fs.exists(scriptDir)) { - fs.delete(scriptDir, true); - } - - conf.setNumMapTasks(1); - conf.setNumReduceTasks(0); - - conf.set(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, - conf.get(JobConf.MAPRED_MAP_TASK_JAVA_OPTS, - conf.get(JobConf.MAPRED_TASK_JAVA_OPTS)) + - " -Dtest.build.data=" + BASE_TEST_ROOT_DIR); - conf.set(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, - conf.get(JobConf.MAPRED_REDUCE_TASK_JAVA_OPTS, - conf.get(JobConf.MAPRED_TASK_JAVA_OPTS)) + - " -Dtest.build.data=" + BASE_TEST_ROOT_DIR); - - return UtilsForTests.runJob(conf, inDir, outDir); - } - - public void testJobKillFailAndSucceed() throws IOException { - if (Shell.WINDOWS) { - System.out.println( - "setsid doesn't work on WINDOWS as expected. Not testing"); - return; - } - - JobConf conf=null; - try { - mr = new MiniMRCluster(1, "file:///", 1); - - // run the TCs - conf = mr.createJobConf(); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - runTests(conf, jt); - } finally { - if (mr != null) { - mr.shutdown(); - } - } - } - - void runTests(JobConf conf, JobTracker jt) throws IOException { - FileSystem fs = FileSystem.getLocal(mr.createJobConf()); - Path rootDir = new Path(TEST_ROOT_DIR); - if(!fs.exists(rootDir)) { - fs.mkdirs(rootDir); - } - fs.setPermission(rootDir, - new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); - runKillingJobAndValidate(jt, conf); - runFailingJobAndValidate(jt, conf); - runSuccessfulJobAndValidate(jt, conf); - } - - /** - * Creates signal file - */ - private static void signalTask(String signalFile, JobConf conf) { - try { - FileSystem fs = FileSystem.getLocal(conf); - fs.createNewFile(new Path(signalFile)); - } catch(IOException e) { - LOG.warn("Unable to create signal file. " + e); - } - } - - /** - * Runs a recursive shell script to create a chain of subprocesses - */ - private static void runChildren(JobConf conf) throws IOException { - if (ProcessTree.isSetsidAvailable) { - FileSystem fs = FileSystem.getLocal(conf); - - if (fs.exists(scriptDir)) { - fs.delete(scriptDir, true); - } - - // Create the directory and set open permissions so that the TT can - // access. - fs.mkdirs(scriptDir); - fs.setPermission(scriptDir, new FsPermission(FsAction.ALL, FsAction.ALL, - FsAction.ALL)); - - // create shell script - Random rm = new Random(); - Path scriptPath = new Path(scriptDirName, "_shellScript_" + rm.nextInt() - + ".sh"); - String shellScript = scriptPath.toString(); - - // Construct the script. Set umask to 0000 so that TT can access all the - // files. - String script = - "umask 000\n" + - "echo $$ > " + scriptDirName + "/childPidFile" + "$1\n" + - "echo hello\n" + - "trap 'echo got SIGTERM' 15 \n" + - "if [ $1 != 0 ]\nthen\n" + - " sh " + shellScript + " $(($1-1))\n" + - "else\n" + - " while true\n do\n" + - " sleep 2\n" + - " done\n" + - "fi"; - DataOutputStream file = fs.create(scriptPath); - file.writeBytes(script); - file.close(); - - // Set executable permissions on the script. - new File(scriptPath.toUri().getPath()).setExecutable(true); - - LOG.info("Calling script from map task : " + shellScript); - Runtime.getRuntime() - .exec(shellScript + " " + numLevelsOfSubProcesses); - - String childPid = TestProcfsBasedProcessTree.getPidFromPidFile(scriptDirName - + "/childPidFile" + 0); - while (childPid == null) { - LOG.warn(scriptDirName + "/childPidFile" + 0 + " is null; Sleeping..."); - try { - Thread.sleep(500); - } catch (InterruptedException ie) { - LOG.warn("sleep is interrupted:" + ie); - break; - } - childPid = TestProcfsBasedProcessTree.getPidFromPidFile(scriptDirName - + "/childPidFile" + 0); - } - } - } - - /** - * Mapper that starts children - */ - static class MapperWithChildren extends MapReduceBase implements - Mapper { - FileSystem fs = null; - public void configure(JobConf conf) { - try { - fs = FileSystem.getLocal(conf); - runChildren(conf); - } catch (Exception e) { - LOG.warn("Exception in configure: " + - StringUtils.stringifyException(e)); - } - } - - // Mapper waits for the signal(signal is the existence of a file) - public void map(WritableComparable key, Writable value, - OutputCollector out, Reporter reporter) - throws IOException { - while (!fs.exists(signalFile)) {// wait for signal file creation - try { - reporter.progress(); - synchronized (this) { - this.wait(1000); - } - } catch (InterruptedException ie) { - System.out.println("Interrupted while the map was waiting for " - + " the signal."); - break; - } - } - } - } - - /** - * Mapper that waits till it gets killed. - */ - static class KillingMapperWithChildren extends MapperWithChildren { - public void configure(JobConf conf) { - super.configure(conf); - } - - public void map(WritableComparable key, Writable value, - OutputCollector out, Reporter reporter) - throws IOException { - - try { - while(true) {//just wait till kill happens - Thread.sleep(1000); - } - } catch (InterruptedException e) { - LOG.warn("Exception in KillMapperWithChild.map:" + e); - } - } - } - - /** - * Mapper that fails when receives a signal. Signal is existence of a file. - */ - static class FailingMapperWithChildren extends MapperWithChildren { - public void configure(JobConf conf) { - super.configure(conf); - } - - public void map(WritableComparable key, Writable value, - OutputCollector out, Reporter reporter) - throws IOException { - while (!fs.exists(signalFile)) {// wait for signal file creation - try { - reporter.progress(); - synchronized (this) { - this.wait(1000); - } - } catch (InterruptedException ie) { - System.out.println("Interrupted while the map was waiting for " - + " the signal."); - break; - } - } - throw new RuntimeException("failing map"); - } - } - - /** - * Check for presence of the process with the pid passed is alive or not - * currently. - * - * @param pid pid of the process - * @return if a process is alive or not. - */ - private static boolean isAlive(String pid) throws IOException { - String commandString ="ps -o pid,command -e"; - String args[] = new String[] {"bash", "-c" , commandString}; - ShellCommandExecutor shExec = new ShellCommandExecutor(args); - try { - shExec.execute(); - }catch(ExitCodeException e) { - return false; - } catch (IOException e) { - LOG.warn("IOExecption thrown while checking if process is alive" + - StringUtils.stringifyException(e)); - throw e; - } - - String output = shExec.getOutput(); - - //Parse the command output and check for pid, ignore the commands - //which has ps or grep in it. - StringTokenizer strTok = new StringTokenizer(output, "\n"); - boolean found = false; - while(strTok.hasMoreTokens()) { - StringTokenizer pidToken = new StringTokenizer(strTok.nextToken(), - " "); - String pidStr = pidToken.nextToken(); - String commandStr = pidToken.nextToken(); - if(pid.equals(pidStr) && !(commandStr.contains("ps") - || commandStr.contains("grep"))) { - found = true; - break; - } - } - return found; - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java deleted file mode 100644 index 2d2e45d3377..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLimitTasksPerJobTaskScheduler.java +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.mapred.TestJobQueueTaskScheduler.FakeTaskTrackerManager; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -public class TestLimitTasksPerJobTaskScheduler extends TestCase { - protected JobConf jobConf; - protected TaskScheduler scheduler; - private FakeTaskTrackerManager taskTrackerManager; - - @Override - protected void setUp() throws Exception { - TestJobQueueTaskScheduler.resetCounters(); - jobConf = new JobConf(); - jobConf.setNumMapTasks(10); - jobConf.setNumReduceTasks(10); - taskTrackerManager = new FakeTaskTrackerManager(); - scheduler = createTaskScheduler(); - scheduler.setConf(jobConf); - scheduler.setTaskTrackerManager(taskTrackerManager); - scheduler.start(); - } - - @Override - protected void tearDown() throws Exception { - if (scheduler != null) { - scheduler.terminate(); - } - } - - protected TaskScheduler createTaskScheduler() { - return new LimitTasksPerJobTaskScheduler(); - } - - public void testMaxRunningTasksPerJob() throws IOException { - jobConf.setLong(JTConfig.JT_RUNNINGTASKS_PER_JOB, - 4L); - scheduler.setConf(jobConf); - TestJobQueueTaskScheduler.submitJobs(taskTrackerManager, jobConf, - 2, JobStatus.RUNNING); - - // First 4 slots are filled with job 1, second 4 with job 2 - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_m_000001_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_m_000002_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_r_000003_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_r_000004_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_m_000005_0 on tt2"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_m_000006_0 on tt2"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_r_000007_0 on tt2"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_r_000008_0 on tt2"}); - } - - public void testMaxRunningTasksPerJobWithInterleavedTrackers() - throws IOException { - jobConf.setLong(JTConfig.JT_RUNNINGTASKS_PER_JOB, - 4L); - scheduler.setConf(jobConf); - TestJobQueueTaskScheduler.submitJobs(taskTrackerManager, jobConf, 2, JobStatus.RUNNING); - - // First 4 slots are filled with job 1, second 4 with job 2 - // even when tracker requests are interleaved - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_m_000001_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_m_000002_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0001_m_000003_0 on tt2"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0001_r_000004_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_m_000005_0 on tt2"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt1"), - new String[] {"attempt_test_0002_r_000006_0 on tt1"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_r_000007_0 on tt2"}); - TestJobQueueTaskScheduler.checkAssignment( - scheduler, TestJobQueueTaskScheduler.tracker(taskTrackerManager, "tt2"), - new String[] {"attempt_test_0002_r_000008_0 on tt2"}); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java deleted file mode 100644 index e6f1bb9f5bd..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestLostTracker.java +++ /dev/null @@ -1,231 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -/** - * A test to verify JobTracker's resilience to lost task trackers. - * - */ -@SuppressWarnings("deprecation") -public class TestLostTracker extends TestCase { - - FakeJobInProgress job; - static FakeJobTracker jobTracker; - - static FakeClock clock; - - static String trackers[] = new String[] {"tracker_tracker1:1000", - "tracker_tracker2:1000"}; - - @Override - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setLong(JTConfig.JT_TRACKER_EXPIRY_INTERVAL, 1000); - conf.set(JTConfig.JT_MAX_TRACKER_BLACKLISTS, "1"); - jobTracker = new FakeJobTracker(conf, (clock = new FakeClock()), trackers); - jobTracker.startExpireTrackersThread(); - } - - @Override - protected void tearDown() throws Exception { - jobTracker.stopExpireTrackersThread(); - } - - public void testLostTracker() throws IOException { - // Tracker 0 contacts JT - FakeObjectUtilities.establishFirstContact(jobTracker, trackers[0]); - - TaskAttemptID[] tid = new TaskAttemptID[2]; - JobConf conf = new JobConf(); - conf.setNumMapTasks(1); - conf.setNumReduceTasks(1); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - - // Tracker 0 gets the map task - tid[0] = job.findMapTask(trackers[0]); - - job.finishTask(tid[0]); - - // Advance clock. Tracker 0 would have got lost - clock.advance(8 * 1000); - - jobTracker.checkExpiredTrackers(); - - // Tracker 1 establishes contact with JT - FakeObjectUtilities.establishFirstContact(jobTracker, trackers[1]); - - // Tracker1 should get assigned the lost map task - tid[1] = job.findMapTask(trackers[1]); - - assertNotNull("Map Task from Lost Tracker did not get reassigned", tid[1]); - - assertEquals("Task ID of reassigned map task does not match", - tid[0].getTaskID().toString(), tid[1].getTaskID().toString()); - - job.finishTask(tid[1]); - - } - - /** - * Test whether the tracker gets blacklisted after its lost. - */ - public void testLostTrackerBeforeBlacklisting() throws Exception { - FakeObjectUtilities.establishFirstContact(jobTracker, trackers[0]); - TaskAttemptID[] tid = new TaskAttemptID[3]; - JobConf conf = new JobConf(); - conf.setNumMapTasks(1); - conf.setNumReduceTasks(1); - conf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1"); - conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false"); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - job.setClusterSize(4); - - // Tracker 0 gets the map task - tid[0] = job.findMapTask(trackers[0]); - - job.finishTask(tid[0]); - - // validate the total tracker count - assertEquals("Active tracker count mismatch", - 1, jobTracker.getClusterStatus(false).getTaskTrackers()); - - // lose the tracker - clock.advance(1100); - jobTracker.checkExpiredTrackers(); - assertFalse("Tracker 0 not lost", - jobTracker.getClusterStatus(false).getActiveTrackerNames() - .contains(trackers[0])); - - // validate the total tracker count - assertEquals("Active tracker count mismatch", - 0, jobTracker.getClusterStatus(false).getTaskTrackers()); - - // Tracker 1 establishes contact with JT - FakeObjectUtilities.establishFirstContact(jobTracker, trackers[1]); - - // Tracker1 should get assigned the lost map task - tid[1] = job.findMapTask(trackers[1]); - - assertNotNull("Map Task from Lost Tracker did not get reassigned", tid[1]); - - assertEquals("Task ID of reassigned map task does not match", - tid[0].getTaskID().toString(), tid[1].getTaskID().toString()); - - // finish the map task - job.finishTask(tid[1]); - - // finish the reduce task - tid[2] = job.findReduceTask(trackers[1]); - job.finishTask(tid[2]); - - // check if job is successful - assertEquals("Job not successful", - JobStatus.SUCCEEDED, job.getStatus().getRunState()); - - // check if the tracker is lost - // validate the total tracker count - assertEquals("Active tracker count mismatch", - 1, jobTracker.getClusterStatus(false).getTaskTrackers()); - // validate blacklisted count .. since we lost one blacklisted tracker - assertEquals("Blacklisted tracker count mismatch", - 0, jobTracker.getClusterStatus(false).getBlacklistedTrackers()); - } - - /** - * Test whether the tracker gets lost after its blacklisted. - */ - public void testLostTrackerAfterBlacklisting() throws Exception { - FakeObjectUtilities.establishFirstContact(jobTracker, trackers[0]); - clock.advance(600); - TaskAttemptID[] tid = new TaskAttemptID[2]; - JobConf conf = new JobConf(); - conf.setNumMapTasks(1); - conf.setNumReduceTasks(0); - conf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1"); - conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false"); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - job.setClusterSize(4); - - // check if the tracker count is correct - assertEquals("Active tracker count mismatch", - 1, jobTracker.taskTrackers().size()); - - // Tracker 0 gets the map task - tid[0] = job.findMapTask(trackers[0]); - // Fail the task - job.failTask(tid[0]); - - // Tracker 1 establishes contact with JT - FakeObjectUtilities.establishFirstContact(jobTracker, trackers[1]); - // check if the tracker count is correct - assertEquals("Active tracker count mismatch", - 2, jobTracker.taskTrackers().size()); - - // Tracker 1 gets the map task - tid[1] = job.findMapTask(trackers[1]); - // Finish the task and also the job - job.finishTask(tid[1]); - - // check if job is successful - assertEquals("Job not successful", - JobStatus.SUCCEEDED, job.getStatus().getRunState()); - - // check if the trackers 1 got blacklisted - assertTrue("Tracker 0 not blacklisted", - jobTracker.getBlacklistedTrackers()[0].getTaskTrackerName() - .equals(trackers[0])); - // check if the tracker count is correct - assertEquals("Active tracker count mismatch", - 2, jobTracker.taskTrackers().size()); - // validate blacklisted count - assertEquals("Blacklisted tracker count mismatch", - 1, jobTracker.getClusterStatus(false).getBlacklistedTrackers()); - - // Advance clock. Tracker 0 should be lost - clock.advance(500); - jobTracker.checkExpiredTrackers(); - - // check if the task tracker is lost - assertFalse("Tracker 0 not lost", - jobTracker.getClusterStatus(false).getActiveTrackerNames() - .contains(trackers[0])); - - // check if the lost tracker has removed from the jobtracker - assertEquals("Active tracker count mismatch", - 1, jobTracker.taskTrackers().size()); - // validate blacklisted count - assertEquals("Blacklisted tracker count mismatch", - 0, jobTracker.getClusterStatus(false).getBlacklistedTrackers()); - - } -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java deleted file mode 100644 index 2fab1460eac..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMRServerPorts.java +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import javax.security.auth.login.LoginException; - -import junit.framework.TestCase; -import org.apache.hadoop.hdfs.TestHDFSServerPorts; -import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; - -/** - * This test checks correctness of port usage by mapreduce components: - * JobTracker, and TaskTracker. - * - * The correct behavior is:
- * - when a specific port is provided the server must either start on that port - * or fail by throwing {@link java.net.BindException}.
- * - if the port = 0 (ephemeral) then the server should choose - * a free port and start on it. - */ -public class TestMRServerPorts extends TestCase { - static final String THIS_HOST = TestHDFSServerPorts.getFullHostName() + ":0"; - - TestHDFSServerPorts hdfs = new TestHDFSServerPorts(); - - // Runs the JT in a separate thread - private static class JTRunner extends Thread { - JobTracker jt; - void setJobTracker(JobTracker jt) { - this.jt = jt; - } - - public void run() { - if (jt != null) { - try { - jt.offerService(); - } catch (Exception ioe) {} - } - } - } - /** - * Check whether the JobTracker can be started. - */ - private JobTracker startJobTracker(JobConf conf, JTRunner runner) - throws IOException, LoginException { - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - JobTracker jt = null; - try { - jt = JobTracker.startTracker(conf); - runner.setJobTracker(jt); - runner.start(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:" + jt.getTrackerPort()); - conf.set(JTConfig.JT_HTTP_ADDRESS, - "0.0.0.0:" + jt.getInfoPort()); - } catch(InterruptedException e) { - throw new IOException(e.getLocalizedMessage()); - } - return jt; - } - - private void setDataNodePorts(Configuration conf) { - conf.set("dfs.datanode.address", THIS_HOST); - conf.set("dfs.datanode.http.address", THIS_HOST); - conf.set("dfs.datanode.ipc.address", THIS_HOST); - } - - /** - * Check whether the JobTracker can be started. - */ - private boolean canStartJobTracker(JobConf conf) - throws IOException, InterruptedException, LoginException { - JobTracker jt = null; - try { - jt = JobTracker.startTracker(conf); - } catch(IOException e) { - if (e instanceof java.net.BindException) - return false; - throw e; - } - jt.fs.close(); - jt.stopTracker(); - return true; - } - - /** - * Check whether the TaskTracker can be started. - */ - private boolean canStartTaskTracker(JobConf conf) - throws IOException, InterruptedException { - TaskTracker tt = null; - try { - tt = new TaskTracker(conf); - } catch(IOException e) { - if (e instanceof java.net.BindException) - return false; - throw e; - } - tt.shutdown(); - return true; - } - - /** - * Verify JobTracker port usage. - */ - public void testJobTrackerPorts() throws Exception { - NameNode nn = null; - DataNode dn = null; - try { - nn = hdfs.startNameNode(); - setDataNodePorts(hdfs.getConfig()); - dn = hdfs.startDataNode(1, hdfs.getConfig()); - - // start job tracker on the same port as name-node - JobConf conf2 = new JobConf(hdfs.getConfig()); - conf2.set(JTConfig.JT_IPC_ADDRESS, - FileSystem.getDefaultUri(hdfs.getConfig()).toString()); - conf2.set(JTConfig.JT_HTTP_ADDRESS, THIS_HOST); - boolean started = canStartJobTracker(conf2); - assertFalse(started); // should fail - - // bind http server to the same port as name-node - conf2.set(JTConfig.JT_IPC_ADDRESS, THIS_HOST); - conf2.set(JTConfig.JT_HTTP_ADDRESS, - hdfs.getConfig().get("dfs.http.address")); - started = canStartJobTracker(conf2); - assertFalse(started); // should fail again - - // both ports are different from the name-node ones - conf2.set(JTConfig.JT_IPC_ADDRESS, THIS_HOST); - conf2.set(JTConfig.JT_HTTP_ADDRESS, THIS_HOST); - started = canStartJobTracker(conf2); - assertTrue(started); // should start now - - } finally { - hdfs.stopDataNode(dn); - hdfs.stopNameNode(nn); - } - } - - /** - * Verify JobTracker port usage. - */ - public void testTaskTrackerPorts() throws Exception { - NameNode nn = null; - DataNode dn = null; - JobTracker jt = null; - JTRunner runner = null; - try { - nn = hdfs.startNameNode(); - setDataNodePorts(hdfs.getConfig()); - dn = hdfs.startDataNode(2, hdfs.getConfig()); - - JobConf conf2 = new JobConf(hdfs.getConfig()); - runner = new JTRunner(); - jt = startJobTracker(conf2, runner); - - // start job tracker on the same port as name-node - conf2.set(TTConfig.TT_REPORT_ADDRESS, - FileSystem.getDefaultUri(hdfs.getConfig()).toString()); - conf2.set(TTConfig.TT_HTTP_ADDRESS, THIS_HOST); - boolean started = canStartTaskTracker(conf2); - assertFalse(started); // should fail - - // bind http server to the same port as name-node - conf2.set(TTConfig.TT_REPORT_ADDRESS, THIS_HOST); - conf2.set(TTConfig.TT_HTTP_ADDRESS, - hdfs.getConfig().get("dfs.http.address")); - started = canStartTaskTracker(conf2); - assertFalse(started); // should fail again - - // both ports are different from the name-node ones - conf2.set(TTConfig.TT_REPORT_ADDRESS, THIS_HOST); - conf2.set(TTConfig.TT_HTTP_ADDRESS, THIS_HOST); - started = canStartTaskTracker(conf2); - assertTrue(started); // should start now - } finally { - if (jt != null) { - jt.fs.close(); - jt.stopTracker(); - runner.interrupt(); - runner.join(); - } - hdfs.stopDataNode(dn); - hdfs.stopNameNode(nn); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java deleted file mode 100644 index 1b2d8f846cf..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMapredHeartbeat.java +++ /dev/null @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.examples.RandomWriter; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.hdfs.MiniDFSCluster; - -public class TestMapredHeartbeat extends TestCase { - public void testJobDirCleanup() throws IOException { - MiniMRCluster mr = null; - try { - // test the default heartbeat interval - int taskTrackers = 2; - JobConf conf = new JobConf(); - mr = new MiniMRCluster(taskTrackers, "file:///", 3, - null, null, conf); - JobClient jc = new JobClient(mr.createJobConf()); - while(jc.getClusterStatus().getTaskTrackers() != taskTrackers) { - UtilsForTests.waitFor(100); - } - assertEquals(JTConfig.JT_HEARTBEAT_INTERVAL_MIN_DEFAULT, - mr.getJobTrackerRunner().getJobTracker().getNextHeartbeatInterval()); - mr.shutdown(); - - // test configured heartbeat interval - taskTrackers = 5; - conf.setInt(JTConfig.JT_HEARTBEATS_IN_SECOND, 1); - mr = new MiniMRCluster(taskTrackers, "file:///", 3, - null, null, conf); - jc = new JobClient(mr.createJobConf()); - while(jc.getClusterStatus().getTaskTrackers() != taskTrackers) { - UtilsForTests.waitFor(100); - } - assertEquals(taskTrackers * 1000, - mr.getJobTrackerRunner().getJobTracker().getNextHeartbeatInterval()); - mr.shutdown(); - - // test configured heartbeat interval is capped with min value - taskTrackers = 5; - conf.setInt(JTConfig.JT_HEARTBEATS_IN_SECOND, 100); - mr = new MiniMRCluster(taskTrackers, "file:///", 3, - null, null, conf); - jc = new JobClient(mr.createJobConf()); - while(jc.getClusterStatus().getTaskTrackers() != taskTrackers) { - UtilsForTests.waitFor(100); - } - assertEquals(JTConfig.JT_HEARTBEAT_INTERVAL_MIN_DEFAULT, - mr.getJobTrackerRunner().getJobTracker().getNextHeartbeatInterval()); - } finally { - if (mr != null) { mr.shutdown(); } - } - } - - public void testOutOfBandHeartbeats() throws Exception { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - try { - Configuration conf = new Configuration(); - dfs = new MiniDFSCluster(conf, 4, true, null); - - int taskTrackers = 1; - JobConf jobConf = new JobConf(); - jobConf.setFloat(JTConfig.JT_HEARTBEATS_SCALING_FACTOR, 30.0f); - jobConf.setBoolean(TTConfig.TT_OUTOFBAND_HEARBEAT, true); - mr = new MiniMRCluster(taskTrackers, - dfs.getFileSystem().getUri().toString(), 3, - null, null, jobConf); - long start = System.currentTimeMillis(); - TestMiniMRDFSSort.runRandomWriter(mr.createJobConf(), new Path("rw")); - long end = System.currentTimeMillis(); - - final int expectedRuntimeSecs = 120; - final int runTimeSecs = (int)((end-start) / 1000); - System.err.println("Runtime is " + runTimeSecs); - assertEquals("Actual runtime " + runTimeSecs + "s not less than expected " + - "runtime of " + expectedRuntimeSecs + "s!", - true, (runTimeSecs <= 120)); - } finally { - if (mr != null) { mr.shutdown(); } - if (dfs != null) { dfs.shutdown(); } - } - } - -} - - diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java deleted file mode 100644 index e7b2a73be01..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMapredSystemDir.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.security.*; - -/** - * Test if JobTracker is resilient to garbage in {@link JTConfig#JT_SYSTEM_DIR} - */ -public class TestMapredSystemDir extends TestCase { - private static final Log LOG = LogFactory.getLog(TestMapredSystemDir.class); - - // dfs ugi - private static final UserGroupInformation DFS_UGI = - TestMiniMRWithDFSWithDistinctUsers.createUGI("dfs", true); - // mapred ugi - private static final UserGroupInformation MR_UGI = - TestMiniMRWithDFSWithDistinctUsers.createUGI("mr", false); - private static final FsPermission SYSTEM_DIR_PARENT_PERMISSION = - FsPermission.createImmutable((short) 0755); // rwxr-xr-x - private static final FsPermission SYSTEM_DIR_PERMISSION = - FsPermission.createImmutable((short) 0700); // rwx------ - - public void testGarbledMapredSystemDir() throws Exception { - Configuration conf = new Configuration(); - final MiniDFSCluster dfs = new MiniDFSCluster(conf, 1, true, null); - MiniMRCluster mr = null; - try { - // start dfs - conf.set("dfs.permissions.supergroup", "supergroup"); - FileSystem fs = DFS_UGI.doAs(new PrivilegedExceptionAction() { - public FileSystem run() throws IOException { - return dfs.getFileSystem(); - } - }); - - - // create Configs.SYSTEM_DIR's parent with restrictive permissions. - // So long as the JT has access to the system dir itself it should - // be able to start. - Path mapredSysDir = new Path(conf.get(JTConfig.JT_SYSTEM_DIR)); - Path parentDir = mapredSysDir.getParent(); - fs.mkdirs(parentDir); - fs.setPermission(parentDir, - new FsPermission(SYSTEM_DIR_PARENT_PERMISSION)); - fs.mkdirs(mapredSysDir); - fs.setPermission(mapredSysDir, new FsPermission(SYSTEM_DIR_PERMISSION)); - fs.setOwner(mapredSysDir, "mr", "mrgroup"); - - // start mr (i.e jobtracker) - Configuration mrConf = new Configuration(conf); - mr = new MiniMRCluster(0, 0, 0, dfs.getFileSystem().getUri().toString(), - 1, null, null, MR_UGI, new JobConf(mrConf)); - JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker(); - - // add garbage to Configs.SYSTEM_DIR - Path garbage = new Path(jobtracker.getSystemDir(), "garbage"); - fs.mkdirs(garbage); - fs.setPermission(garbage, new FsPermission(SYSTEM_DIR_PERMISSION)); - fs.setOwner(garbage, "test", "test-group"); - - // stop the jobtracker - mr.stopJobTracker(); - mr.getJobTrackerConf().setBoolean(JTConfig.JT_RESTART_ENABLED, - false); - // start jobtracker but dont wait for it to be up - mr.startJobTracker(false); - - // check 5 times .. each time wait for 2 secs to check if the jobtracker - // has crashed or not. - for (int i = 0; i < 5; ++i) { - LOG.info("Check #" + i); - if (!mr.getJobTrackerRunner().isActive()) { - return; - } - UtilsForTests.waitFor(2000); - } - - assertFalse("JobTracker did not bail out (waited for 10 secs)", - mr.getJobTrackerRunner().isActive()); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown();} - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java deleted file mode 100644 index 1b860437880..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRDFSSort.java +++ /dev/null @@ -1,237 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.io.IOException; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.lib.IdentityMapper; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.apache.hadoop.mapreduce.FileSystemCounter; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.examples.RandomWriter; -import org.apache.hadoop.examples.Sort; - -/** - * A JUnit test to test the Map-Reduce framework's sort - * with a Mini Map-Reduce Cluster with a Mini HDFS Clusters. - */ -public class TestMiniMRDFSSort extends TestCase { - // Input/Output paths for sort - private static final Path SORT_INPUT_PATH = new Path("/sort/input"); - private static final Path SORT_OUTPUT_PATH = new Path("/sort/output"); - - // Knobs to control randomwriter; and hence sort - private static final int NUM_HADOOP_SLAVES = 3; - // make it big enough to cause a spill in the map - private static final int RW_BYTES_PER_MAP = 3 * 1024 * 1024; - private static final int RW_MAPS_PER_HOST = 2; - - private static MiniMRCluster mrCluster = null; - private static MiniDFSCluster dfsCluster = null; - private static FileSystem dfs = null; - public static Test suite() { - TestSetup setup = new TestSetup(new TestSuite(TestMiniMRDFSSort.class)) { - protected void setUp() throws Exception { - Configuration conf = new Configuration(); - dfsCluster = new MiniDFSCluster(conf, NUM_HADOOP_SLAVES, true, null); - dfs = dfsCluster.getFileSystem(); - mrCluster = new MiniMRCluster(NUM_HADOOP_SLAVES, - dfs.getUri().toString(), 1); - } - protected void tearDown() throws Exception { - if (dfsCluster != null) { dfsCluster.shutdown(); } - if (mrCluster != null) { mrCluster.shutdown(); } - } - }; - return setup; - } - - public static void runRandomWriter(JobConf job, Path sortInput) - throws Exception { - // Scale down the default settings for RandomWriter for the test-case - // Generates NUM_HADOOP_SLAVES * RW_MAPS_PER_HOST * RW_BYTES_PER_MAP - job.setInt(RandomWriter.BYTES_PER_MAP, RW_BYTES_PER_MAP); - job.setInt(RandomWriter.MAPS_PER_HOST, RW_MAPS_PER_HOST); - String[] rwArgs = {sortInput.toString()}; - - // Run RandomWriter - assertEquals(ToolRunner.run(job, new RandomWriter(), rwArgs), 0); - } - - private static void runSort(JobConf job, Path sortInput, Path sortOutput) - throws Exception { - - job.setInt(JobContext.JVM_NUMTASKS_TORUN, -1); - job.setInt(JobContext.IO_SORT_MB, 1); - job.setNumMapTasks(12); - - // Setup command-line arguments to 'sort' - String[] sortArgs = {sortInput.toString(), sortOutput.toString()}; - - // Run Sort - Sort sort = new Sort(); - assertEquals(ToolRunner.run(job, sort, sortArgs), 0); - org.apache.hadoop.mapreduce.Counters counters = sort.getResult().getCounters(); - long mapInput = counters.findCounter(FileInputFormatCounter.BYTES_READ) - .getValue(); - long hdfsRead = counters.findCounter("hdfs", FileSystemCounter.BYTES_READ) - .getValue(); - // the hdfs read should be between 100% and 110% of the map input bytes - assertTrue("map input = " + mapInput + ", hdfs read = " + hdfsRead, - (hdfsRead < (mapInput * 1.1)) && - (hdfsRead >= mapInput)); - } - - private static void runSortValidator(JobConf job, - Path sortInput, Path sortOutput) - throws Exception { - String[] svArgs = {"-sortInput", sortInput.toString(), - "-sortOutput", sortOutput.toString()}; - - // Run Sort-Validator - assertEquals(ToolRunner.run(job, new SortValidator(), svArgs), 0); - } - - private static class ReuseDetector extends MapReduceBase - implements Mapper { - static int instances = 0; - Reporter reporter = null; - - @Override - public void map(BytesWritable key, BytesWritable value, - OutputCollector output, - Reporter reporter) throws IOException { - this.reporter = reporter; - } - - public void close() throws IOException { - reporter.incrCounter("jvm", "use", ++instances); - } - } - - private static void runJvmReuseTest(JobConf job, - boolean reuse) throws IOException { - // setup a map-only job that reads the input and only sets the counters - // based on how many times the jvm was reused. - job.setInt(JobContext.JVM_NUMTASKS_TORUN, reuse ? -1 : 1); - FileInputFormat.setInputPaths(job, SORT_INPUT_PATH); - job.setInputFormat(SequenceFileInputFormat.class); - job.setOutputFormat(NullOutputFormat.class); - job.setMapperClass(ReuseDetector.class); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(Text.class); - job.setNumMapTasks(24); - job.setNumReduceTasks(0); - RunningJob result = JobClient.runJob(job); - long uses = result.getCounters().findCounter("jvm", "use").getValue(); - int maps = job.getNumMapTasks(); - if (reuse) { - assertTrue("maps = " + maps + ", uses = " + uses, maps < uses); - } else { - assertEquals("uses should be number of maps", job.getNumMapTasks(), uses); - } - } - - public void testMapReduceSort() throws Exception { - // Run randomwriter to generate input for 'sort' - runRandomWriter(mrCluster.createJobConf(), SORT_INPUT_PATH); - - // Run sort - runSort(mrCluster.createJobConf(), SORT_INPUT_PATH, SORT_OUTPUT_PATH); - - // Run sort-validator to check if sort worked correctly - runSortValidator(mrCluster.createJobConf(), SORT_INPUT_PATH, - SORT_OUTPUT_PATH); - } - - public void testJvmReuse() throws Exception { - runJvmReuseTest(mrCluster.createJobConf(), true); - } - - public void testNoJvmReuse() throws Exception { - runJvmReuseTest(mrCluster.createJobConf(), false); - } - - private static class BadPartitioner - implements Partitioner { - boolean low; - public void configure(JobConf conf) { - low = conf.getBoolean("test.testmapred.badpartition", true); - } - public int getPartition(LongWritable k, Text v, int numPartitions) { - return low ? -1 : numPartitions; - } - } - - public void testPartitioner() throws Exception { - JobConf conf = mrCluster.createJobConf(); - conf.setPartitionerClass(BadPartitioner.class); - conf.setNumReduceTasks(3); - FileSystem fs = FileSystem.get(conf); - Path testdir = - new Path("blah").makeQualified(fs.getUri(), fs.getWorkingDirectory()); - Path inFile = new Path(testdir, "blah"); - DataOutputStream f = fs.create(inFile); - f.writeBytes("blah blah blah\n"); - f.close(); - FileInputFormat.setInputPaths(conf, inFile); - FileOutputFormat.setOutputPath(conf, new Path(testdir, "out")); - conf.setMapperClass(IdentityMapper.class); - conf.setReducerClass(IdentityReducer.class); - conf.setOutputKeyClass(LongWritable.class); - conf.setOutputValueClass(Text.class); - conf.setMaxMapAttempts(1); - - // partition too low - conf.setBoolean("test.testmapred.badpartition", true); - boolean pass = true; - try { - JobClient.runJob(conf); - } catch (IOException e) { - pass = false; - } - assertFalse("should fail for partition < 0", pass); - - // partition too high - conf.setBoolean("test.testmapred.badpartition", false); - pass = true; - try { - JobClient.runJob(conf); - } catch (IOException e) { - pass = false; - } - assertFalse("should fail for partition >= numPartitions", pass); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java deleted file mode 100644 index 2788a373402..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRLocalFS.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.DataInput; -import java.io.DataOutput; -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; -import java.util.Iterator; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.examples.SecondarySort; -import org.apache.hadoop.examples.WordCount; -import org.apache.hadoop.examples.SecondarySort.FirstGroupingComparator; -import org.apache.hadoop.examples.SecondarySort.FirstPartitioner; -import org.apache.hadoop.examples.SecondarySort.IntPair; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.WritableUtils; -import org.apache.hadoop.mapred.MRCaching.TestResult; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.mapreduce.TaskCounter; -import org.apache.hadoop.mapreduce.TestMapReduceLocal; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.util.Progressable; - -/** - * A JUnit test to test min map-reduce cluster with local file system. - */ -public class TestMiniMRLocalFS extends TestCase { - private static String TEST_ROOT_DIR = - new File(System.getProperty("test.build.data","/tmp")) - .toURI().toString().replace(' ', '+'); - - public void testWithLocal() - throws IOException, InterruptedException, ClassNotFoundException { - MiniMRCluster mr = null; - try { - mr = new MiniMRCluster(2, "file:///", 3); - // make cleanup inline sothat validation of existence of these directories - // can be done - mr.setInlineCleanupThreads(); - - TestMiniMRWithDFS.runPI(mr, mr.createJobConf()); - - // run the wordcount example with caching - JobConf job = mr.createJobConf(); - TestResult ret = MRCaching.launchMRCache(TEST_ROOT_DIR + "/wc/input", - TEST_ROOT_DIR + "/wc/output", - TEST_ROOT_DIR + "/cachedir", - job, - "The quick brown fox\n" - + "has many silly\n" - + "red fox sox\n"); - // assert the number of lines read during caching - assertTrue("Failed test archives not matching", ret.isOutputOk); - // test the task report fetchers - JobClient client = new JobClient(job); - JobID jobid = ret.job.getID(); - TaskReport[] reports; - reports = client.getSetupTaskReports(jobid); - assertEquals("number of setups", 2, reports.length); - reports = client.getMapTaskReports(jobid); - assertEquals("number of maps", 1, reports.length); - reports = client.getReduceTaskReports(jobid); - assertEquals("number of reduces", 1, reports.length); - reports = client.getCleanupTaskReports(jobid); - assertEquals("number of cleanups", 2, reports.length); - Counters counters = ret.job.getCounters(); - assertEquals("number of map inputs", 3, - counters.getCounter(TaskCounter.MAP_INPUT_RECORDS)); - assertEquals("number of reduce outputs", 9, - counters.getCounter(TaskCounter.REDUCE_OUTPUT_RECORDS)); - runCustomFormats(mr); - runSecondarySort(mr.createJobConf()); - } finally { - if (mr != null) { mr.shutdown(); } - } - } - - private void runCustomFormats(MiniMRCluster mr) throws IOException { - JobConf job = mr.createJobConf(); - FileSystem fileSys = FileSystem.get(job); - Path testDir = new Path(TEST_ROOT_DIR + "/test_mini_mr_local"); - Path outDir = new Path(testDir, "out"); - System.out.println("testDir= " + testDir); - fileSys.delete(testDir, true); - - job.setInputFormat(MyInputFormat.class); - job.setOutputFormat(MyOutputFormat.class); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(IntWritable.class); - - job.setMapperClass(MyMapper.class); - job.setReducerClass(MyReducer.class); - job.setNumMapTasks(100); - job.setNumReduceTasks(1); - // explicitly do not use "normal" job.setOutputPath to make sure - // that it is not hardcoded anywhere in the framework. - job.set("non.std.out", outDir.toString()); - try { - JobClient.runJob(job); - String result = - MapReduceTestUtil.readOutput(outDir, job); - assertEquals("output", ("aunt annie\t1\n" + - "bumble boat\t4\n" + - "crocodile pants\t0\n" + - "duck-dog\t5\n"+ - "eggs\t2\n" + - "finagle the agent\t3\n"), result); - } finally { - fileSys.delete(testDir, true); - } - - } - - private static class MyInputFormat - implements InputFormat { - - static final String[] data = new String[]{ - "crocodile pants", - "aunt annie", - "eggs", - "finagle the agent", - "bumble boat", - "duck-dog", - }; - - private static class MySplit implements InputSplit { - int first; - int length; - - public MySplit() { } - - public MySplit(int first, int length) { - this.first = first; - this.length = length; - } - - public String[] getLocations() { - return new String[0]; - } - - public long getLength() { - return length; - } - - public void write(DataOutput out) throws IOException { - WritableUtils.writeVInt(out, first); - WritableUtils.writeVInt(out, length); - } - - public void readFields(DataInput in) throws IOException { - first = WritableUtils.readVInt(in); - length = WritableUtils.readVInt(in); - } - } - - static class MyRecordReader implements RecordReader { - int index; - int past; - int length; - - MyRecordReader(int index, int length) { - this.index = index; - this.past = index + length; - this.length = length; - } - - public boolean next(IntWritable key, Text value) throws IOException { - if (index < past) { - key.set(index); - value.set(data[index]); - index += 1; - return true; - } - return false; - } - - public IntWritable createKey() { - return new IntWritable(); - } - - public Text createValue() { - return new Text(); - } - - public long getPos() throws IOException { - return index; - } - - public void close() throws IOException {} - - public float getProgress() throws IOException { - return 1.0f - (past-index)/length; - } - } - - public InputSplit[] getSplits(JobConf job, - int numSplits) throws IOException { - return new MySplit[]{new MySplit(0, 1), new MySplit(1, 3), - new MySplit(4, 2)}; - } - - public RecordReader getRecordReader(InputSplit split, - JobConf job, - Reporter reporter) - throws IOException { - MySplit sp = (MySplit) split; - return new MyRecordReader(sp.first, sp.length); - } - - } - - static class MyMapper extends MapReduceBase - implements Mapper { - - public void map(WritableComparable key, Writable value, - OutputCollector out, - Reporter reporter) throws IOException { - System.out.println("map: " + key + ", " + value); - out.collect((WritableComparable) value, key); - InputSplit split = reporter.getInputSplit(); - if (split.getClass() != MyInputFormat.MySplit.class) { - throw new IOException("Got wrong split in MyMapper! " + - split.getClass().getName()); - } - } - } - - static class MyReducer extends MapReduceBase - implements Reducer { - public void reduce(WritableComparable key, Iterator values, - OutputCollector output, - Reporter reporter) throws IOException { - try { - InputSplit split = reporter.getInputSplit(); - throw new IOException("Got an input split of " + split); - } catch (UnsupportedOperationException e) { - // expected result - } - while (values.hasNext()) { - Writable value = values.next(); - System.out.println("reduce: " + key + ", " + value); - output.collect(key, value); - } - } - } - - static class MyOutputFormat implements OutputFormat { - static class MyRecordWriter implements RecordWriter { - private DataOutputStream out; - - public MyRecordWriter(Path outputFile, JobConf job) throws IOException { - out = outputFile.getFileSystem(job).create(outputFile); - } - - public void write(Object key, Object value) throws IOException { - out.writeBytes(key.toString() + "\t" + value.toString() + "\n"); - } - - public void close(Reporter reporter) throws IOException { - out.close(); - } - } - - public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, - String name, - Progressable progress - ) throws IOException { - return new MyRecordWriter(new Path(job.get("non.std.out")), job); - } - - public void checkOutputSpecs(FileSystem ignored, - JobConf job) throws IOException { - } - } - - private void runSecondarySort(Configuration conf) throws IOException, - InterruptedException, - ClassNotFoundException { - FileSystem localFs = FileSystem.getLocal(conf); - localFs.delete(new Path(TEST_ROOT_DIR + "/in"), true); - localFs.delete(new Path(TEST_ROOT_DIR + "/out"), true); - TestMapReduceLocal.writeFile - ("in/part1", "-1 -4\n-3 23\n5 10\n-1 -2\n-1 300\n-1 10\n4 1\n" + - "4 2\n4 10\n4 -1\n4 -10\n10 20\n10 30\n10 25\n"); - Job job = Job.getInstance(conf, "word count"); - job.setJarByClass(WordCount.class); - job.setNumReduceTasks(2); - job.setMapperClass(SecondarySort.MapClass.class); - job.setReducerClass(SecondarySort.Reduce.class); - // group and partition by the first int in the pair - job.setPartitionerClass(FirstPartitioner.class); - job.setGroupingComparatorClass(FirstGroupingComparator.class); - - // the map output is IntPair, IntWritable - job.setMapOutputKeyClass(IntPair.class); - job.setMapOutputValueClass(IntWritable.class); - - // the reduce output is Text, IntWritable - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(IntWritable.class); - - FileInputFormat.addInputPath(job, new Path(TEST_ROOT_DIR + "/in")); - FileOutputFormat.setOutputPath(job, new Path(TEST_ROOT_DIR + "/out")); - assertTrue(job.waitForCompletion(true)); - String out = TestMapReduceLocal.readFile("out/part-r-00000"); - assertEquals("------------------------------------------------\n" + - "4\t-10\n4\t-1\n4\t1\n4\t2\n4\t10\n" + - "------------------------------------------------\n" + - "10\t20\n10\t25\n10\t30\n", out); - out = TestMapReduceLocal.readFile("out/part-r-00001"); - assertEquals("------------------------------------------------\n" + - "-3\t23\n" + - "------------------------------------------------\n" + - "-1\t-4\n-1\t-2\n-1\t10\n-1\t300\n" + - "------------------------------------------------\n" + - "5\t10\n", out); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java deleted file mode 100644 index 2e16f844bc8..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMiniMRWithDFS.java +++ /dev/null @@ -1,341 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.BufferedReader; -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.FileSystemCounter; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.mapreduce.TaskCounter; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.StringUtils; - -/** - * A JUnit test to test Mini Map-Reduce Cluster with Mini-DFS. - */ -public class TestMiniMRWithDFS extends TestCase { - private static final Log LOG = - LogFactory.getLog(TestMiniMRWithDFS.class.getName()); - - static final int NUM_MAPS = 10; - static final int NUM_SAMPLES = 100000; - - public static class TestResult { - public String output; - public RunningJob job; - TestResult(RunningJob job, String output) { - this.job = job; - this.output = output; - } - } - public static TestResult launchWordCount(JobConf conf, - Path inDir, - Path outDir, - String input, - int numMaps, - int numReduces) throws IOException { - FileSystem inFs = inDir.getFileSystem(conf); - FileSystem outFs = outDir.getFileSystem(conf); - outFs.delete(outDir, true); - if (!inFs.mkdirs(inDir)) { - throw new IOException("Mkdirs failed to create " + inDir.toString()); - } - { - DataOutputStream file = inFs.create(new Path(inDir, "part-0")); - file.writeBytes(input); - file.close(); - } - conf.setJobName("wordcount"); - conf.setInputFormat(TextInputFormat.class); - - // the keys are words (strings) - conf.setOutputKeyClass(Text.class); - // the values are counts (ints) - conf.setOutputValueClass(IntWritable.class); - - conf.setMapperClass(WordCount.MapClass.class); - conf.setCombinerClass(WordCount.Reduce.class); - conf.setReducerClass(WordCount.Reduce.class); - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReduces); - RunningJob job = JobClient.runJob(conf); - return new TestResult(job, MapReduceTestUtil.readOutput(outDir, conf)); - } - - /** - * Make sure that there are exactly the directories that we expect to find. - * - *
- *
- * - * For e.g., if we want to check the existence of *only* the directories for - * user1's tasks job1-attempt1, job1-attempt2, job2-attempt1, we pass user1 as - * user, {job1, job1, job2, job3} as jobIds and {attempt1, attempt2, attempt1, - * attempt3} as taskDirs. - * - * @param mr the map-reduce cluster - * @param user the name of the job-owner - * @param jobIds the list of jobs - * @param taskDirs the task ids that should be present - */ - static void checkTaskDirectories(MiniMRCluster mr, String user, - String[] jobIds, String[] taskDirs) { - - mr.waitUntilIdle(); - int trackers = mr.getNumTaskTrackers(); - - List observedJobDirs = new ArrayList(); - List observedFilesInsideJobDir = new ArrayList(); - - for (int i = 0; i < trackers; ++i) { - - // Verify that mapred-local-dir and it's direct contents are valid - File localDir = new File(mr.getTaskTrackerLocalDir(i)); - assertTrue("Local dir " + localDir + " does not exist.", localDir - .isDirectory()); - LOG.info("Verifying contents of " + MRConfig.LOCAL_DIR + " " - + localDir.getAbsolutePath()); - - // Verify contents(user-dir) of tracker-sub-dir - File trackerSubDir = new File(localDir, TaskTracker.SUBDIR); - if (trackerSubDir.isDirectory()) { - - // Verify contents of user-dir and populate the job-dirs/attempt-dirs - // lists - File userDir = new File(trackerSubDir, user); - if (userDir.isDirectory()) { - LOG.info("Verifying contents of user-dir " - + userDir.getAbsolutePath()); - verifyContents(new String[] { TaskTracker.JOBCACHE, - TaskTracker.DISTCACHEDIR }, userDir.list()); - - File jobCacheDir = - new File(localDir, TaskTracker.getJobCacheSubdir(user)); - String[] jobDirs = jobCacheDir.list(); - observedJobDirs.addAll(Arrays.asList(jobDirs)); - - for (String jobDir : jobDirs) { - String[] attemptDirs = new File(jobCacheDir, jobDir).list(); - observedFilesInsideJobDir.addAll(Arrays.asList(attemptDirs)); - } - } - } - } - - // Now verify that only expected job-dirs and attempt-dirs are present. - LOG.info("Verifying the list of job directories"); - verifyContents(jobIds, observedJobDirs.toArray(new String[observedJobDirs - .size()])); - LOG.info("Verifying the list of task directories"); - // All taskDirs should be present in the observed list. Other files like - // job.xml etc may be present too, we are not checking them here. - for (int j = 0; j < taskDirs.length; j++) { - assertTrue( - "Expected task-directory " + taskDirs[j] + " is not present!", - observedFilesInsideJobDir.contains(taskDirs[j])); - } - } - - /** - * Check the list of expectedFiles against the list of observedFiles and make - * sure they both are the same. Duplicates can be present in either of the - * lists and all duplicate entries are treated as a single entity. - * - * @param expectedFiles - * @param observedFiles - */ - private static void verifyContents(String[] expectedFiles, - String[] observedFiles) { - boolean[] foundExpectedFiles = new boolean[expectedFiles.length]; - boolean[] validObservedFiles = new boolean[observedFiles.length]; - for (int j = 0; j < observedFiles.length; ++j) { - for (int k = 0; k < expectedFiles.length; ++k) { - if (expectedFiles[k].equals(observedFiles[j])) { - foundExpectedFiles[k] = true; - validObservedFiles[j] = true; - } - } - } - for (int j = 0; j < foundExpectedFiles.length; j++) { - assertTrue("Expected file " + expectedFiles[j] + " not found", - foundExpectedFiles[j]); - } - for (int j = 0; j < validObservedFiles.length; j++) { - assertTrue("Unexpected file " + observedFiles[j] + " found", - validObservedFiles[j]); - } - } - - public static void runPI(MiniMRCluster mr, JobConf jobconf) - throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("runPI"); - double estimate = org.apache.hadoop.examples.QuasiMonteCarlo.estimatePi( - NUM_MAPS, NUM_SAMPLES, jobconf).doubleValue(); - double error = Math.abs(Math.PI - estimate); - assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01)); - String userName = UserGroupInformation.getLoginUser().getUserName(); - checkTaskDirectories(mr, userName, new String[] {}, new String[] {}); - } - - public static void runWordCount(MiniMRCluster mr, JobConf jobConf) - throws IOException { - LOG.info("runWordCount"); - // Run a word count example - // Keeping tasks that match this pattern - String pattern = - TaskAttemptID.getTaskAttemptIDsPattern(null, null, TaskType.MAP, 1, null); - jobConf.setKeepTaskFilesPattern(pattern); - TestResult result; - final Path inDir = new Path("./wc/input"); - final Path outDir = new Path("./wc/output"); - String input = "The quick brown fox\nhas many silly\nred fox sox\n"; - result = launchWordCount(jobConf, inDir, outDir, input, 3, 1); - assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + - "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result.output); - JobID jobid = result.job.getID(); - TaskAttemptID taskid = new TaskAttemptID( - new TaskID(jobid, TaskType.MAP, 1),0); - String userName = UserGroupInformation.getLoginUser().getUserName(); - - checkTaskDirectories(mr, userName, new String[] { jobid.toString() }, - new String[] { taskid.toString() }); - // test with maps=0 - jobConf = mr.createJobConf(); - input = "owen is oom"; - result = launchWordCount(jobConf, inDir, outDir, input, 0, 1); - assertEquals("is\t1\noom\t1\nowen\t1\n", result.output); - Counters counters = result.job.getCounters(); - long hdfsRead = counters.findCounter("HDFS", - FileSystemCounter.BYTES_READ).getValue(); - long hdfsWrite = counters.findCounter("HDFS", - FileSystemCounter.BYTES_WRITTEN).getValue(); - long rawSplitBytesRead = - counters.findCounter(TaskCounter.SPLIT_RAW_BYTES).getCounter(); - assertEquals(result.output.length(), hdfsWrite); - // add the correction factor of 234 as the input split is also streamed - assertEquals(input.length() + rawSplitBytesRead, hdfsRead); - - // Run a job with input and output going to localfs even though the - // default fs is hdfs. - { - FileSystem localfs = FileSystem.getLocal(jobConf); - String TEST_ROOT_DIR = - new File(System.getProperty("test.build.data","/tmp")) - .toString().replace(' ', '+'); - Path localIn = localfs.makeQualified - (new Path(TEST_ROOT_DIR + "/local/in")); - Path localOut = localfs.makeQualified - (new Path(TEST_ROOT_DIR + "/local/out")); - result = launchWordCount(jobConf, localIn, localOut, - "all your base belong to us", 1, 1); - assertEquals("all\t1\nbase\t1\nbelong\t1\nto\t1\nus\t1\nyour\t1\n", - result.output); - assertTrue("outputs on localfs", localfs.exists(localOut)); - - } - } - - public void testWithDFS() - throws IOException, InterruptedException, ClassNotFoundException { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - final int taskTrackers = 4; - - Configuration conf = new Configuration(); - dfs = new MiniDFSCluster(conf, 4, true, null); - fileSys = dfs.getFileSystem(); - mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); - // make cleanup inline sothat validation of existence of these directories - // can be done - mr.setInlineCleanupThreads(); - - runPI(mr, mr.createJobConf()); - runWordCount(mr, mr.createJobConf()); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); - } - } - } - - public void testWithDFSWithDefaultPort() throws IOException { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - final int taskTrackers = 4; - - Configuration conf = new Configuration(); - // start a dfs with the default port number - dfs = new MiniDFSCluster( - NameNode.DEFAULT_PORT, conf, 4, true, true, null, null); - fileSys = dfs.getFileSystem(); - mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); - - JobConf jobConf = mr.createJobConf(); - TestResult result; - final Path inDir = new Path("./wc/input"); - final Path outDir = new Path("hdfs://" + - dfs.getNameNode().getNameNodeAddress().getHostName() + - ":" + NameNode.DEFAULT_PORT +"/./wc/output"); - String input = "The quick brown fox\nhas many silly\nred fox sox\n"; - result = launchWordCount(jobConf, inDir, outDir, input, 3, 1); - assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + - "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result.output); - final Path outDir2 = new Path("hdfs:/test/wc/output2"); - jobConf.set("fs.default.name", "hdfs://localhost:" + NameNode.DEFAULT_PORT); - result = launchWordCount(jobConf, inDir, outDir2, input, 3, 1); - assertEquals("The\t1\nbrown\t1\nfox\t2\nhas\t1\nmany\t1\n" + - "quick\t1\nred\t1\nsilly\t1\nsox\t1\n", result.output); - } catch (java.net.BindException be) { - LOG.info("Skip the test this time because can not start namenode on port " - + NameNode.DEFAULT_PORT, be); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java deleted file mode 100644 index 30449094ea7..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestNodeHealthService.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.TimerTask; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; - -import junit.framework.TestCase; - -public class TestNodeHealthService extends TestCase { - - private static volatile Log LOG = LogFactory - .getLog(TestNodeHealthService.class); - - private static final String nodeHealthConfigPath = System.getProperty( - "test.build.extraconf", "build/test/extraconf"); - - final static File nodeHealthConfigFile = new File(nodeHealthConfigPath, - "mapred-site.xml"); - - private String testRootDir = new File(System.getProperty("test.build.data", - "/tmp")).getAbsolutePath(); - - private File nodeHealthscriptFile = new File(testRootDir, "failingscript.sh"); - - @Override - protected void tearDown() throws Exception { - if (nodeHealthConfigFile.exists()) { - nodeHealthConfigFile.delete(); - } - if (nodeHealthscriptFile.exists()) { - nodeHealthscriptFile.delete(); - } - super.tearDown(); - } - - private Configuration getConfForNodeHealthScript() { - Configuration conf = new Configuration(); - conf.set(NodeHealthCheckerService.HEALTH_CHECK_SCRIPT_PROPERTY, - nodeHealthscriptFile.getAbsolutePath()); - conf.setLong(NodeHealthCheckerService.HEALTH_CHECK_INTERVAL_PROPERTY, 500); - conf.setLong( - NodeHealthCheckerService.HEALTH_CHECK_FAILURE_INTERVAL_PROPERTY, 1000); - return conf; - } - - private void writeNodeHealthScriptFile(String scriptStr, boolean setExecutable) - throws IOException { - PrintWriter pw = new PrintWriter(new FileOutputStream(nodeHealthscriptFile)); - pw.println(scriptStr); - pw.flush(); - pw.close(); - nodeHealthscriptFile.setExecutable(setExecutable); - } - - public void testNodeHealthScriptShouldRun() throws IOException { - // Node health script should not start if there is no property called - // node health script path. - assertFalse("Health checker should not have started", - NodeHealthCheckerService.shouldRun(new Configuration())); - Configuration conf = getConfForNodeHealthScript(); - // Node health script should not start if the node health script does not - // exists - assertFalse("Node health script should start", NodeHealthCheckerService - .shouldRun(conf)); - // Create script path. - conf.writeXml(new FileOutputStream(nodeHealthConfigFile)); - writeNodeHealthScriptFile("", false); - // Node health script should not start if the node health script is not - // executable. - assertFalse("Node health script should start", NodeHealthCheckerService - .shouldRun(conf)); - writeNodeHealthScriptFile("", true); - assertTrue("Node health script should start", NodeHealthCheckerService - .shouldRun(conf)); - } - - public void testNodeHealthScript() throws Exception { - TaskTrackerHealthStatus healthStatus = new TaskTrackerHealthStatus(); - String errorScript = "echo ERROR\n echo \"Tracker not healthy\""; - String normalScript = "echo \"I am all fine\""; - String timeOutScript = "sleep 4\n echo\"I am fine\""; - Configuration conf = getConfForNodeHealthScript(); - conf.writeXml(new FileOutputStream(nodeHealthConfigFile)); - - NodeHealthCheckerService nodeHealthChecker = new NodeHealthCheckerService( - conf); - TimerTask timer = nodeHealthChecker.getTimer(); - writeNodeHealthScriptFile(normalScript, true); - timer.run(); - - nodeHealthChecker.setHealthStatus(healthStatus); - LOG.info("Checking initial healthy condition"); - // Check proper report conditions. - assertTrue("Node health status reported unhealthy", healthStatus - .isNodeHealthy()); - assertTrue("Node health status reported unhealthy", healthStatus - .getHealthReport().isEmpty()); - - // write out error file. - // Healthy to unhealthy transition - writeNodeHealthScriptFile(errorScript, true); - // Run timer - timer.run(); - // update health status - nodeHealthChecker.setHealthStatus(healthStatus); - LOG.info("Checking Healthy--->Unhealthy"); - assertFalse("Node health status reported healthy", healthStatus - .isNodeHealthy()); - assertFalse("Node health status reported healthy", healthStatus - .getHealthReport().isEmpty()); - - // Check unhealthy to healthy transitions. - writeNodeHealthScriptFile(normalScript, true); - timer.run(); - nodeHealthChecker.setHealthStatus(healthStatus); - LOG.info("Checking UnHealthy--->healthy"); - // Check proper report conditions. - assertTrue("Node health status reported unhealthy", healthStatus - .isNodeHealthy()); - assertTrue("Node health status reported unhealthy", healthStatus - .getHealthReport().isEmpty()); - - // Healthy to timeout transition. - writeNodeHealthScriptFile(timeOutScript, true); - timer.run(); - nodeHealthChecker.setHealthStatus(healthStatus); - LOG.info("Checking Healthy--->timeout"); - assertFalse("Node health status reported healthy even after timeout", - healthStatus.isNodeHealthy()); - assertEquals("Node time out message not propogated", healthStatus - .getHealthReport(), - NodeHealthCheckerService.NODE_HEALTH_SCRIPT_TIMED_OUT_MSG); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java deleted file mode 100644 index d0e51dec025..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestNodeRefresh.java +++ /dev/null @@ -1,490 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.BufferedWriter; -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.OutputStreamWriter; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Shell.ShellCommandExecutor; - -/** - * Test node decommissioning and recommissioning via refresh. Also check if the - * nodes are decommissioned upon refresh. - */ -public class TestNodeRefresh extends TestCase { - private String namenode = null; - private MiniDFSCluster dfs = null; - private MiniMRCluster mr = null; - private JobTracker jt = null; - private String[] hosts = null; - private String[] trackerHosts = null; - private UserGroupInformation owner, user1, user2, user3, user4, user5; - private static final Log LOG = - LogFactory.getLog(TestNodeRefresh.class); - - private String getHostname(int i) { - return "host" + i + ".com"; - } - - private void startCluster(int numHosts, int numTrackerPerHost, - int numExcluded, UserGroupInformation clusterUgi, - Configuration conf) - throws IOException { - try { - // create fake mapping for the groups - owner = UserGroupInformation.getLoginUser(); - user1= UserGroupInformation.createUserForTesting("user1", - new String[] {"user1"}); - user2= UserGroupInformation.createUserForTesting("user2", - new String[] {"user2"}); - user3= UserGroupInformation.createUserForTesting("user3", - new String[] {"abc"}); - user4= UserGroupInformation.createUserForTesting("user4", - new String[] {"supergroup"}); - user5= UserGroupInformation.createUserForTesting("user5", - new String[] {"user5"}); - conf.setBoolean("dfs.replication.considerLoad", false); - - // prepare hosts info - hosts = new String[numHosts]; - for (int i = 1; i <= numHosts; ++i) { - hosts[i - 1] = getHostname(i); - } - - // start dfs - dfs = new MiniDFSCluster(conf, 1, true, null, hosts); - dfs.waitActive(); - dfs.startDataNodes(conf, numHosts, true, null, null, hosts, null); - dfs.waitActive(); - FileSystem.mkdirs(dfs.getFileSystem(), new Path("/"), - new FsPermission((short) 0777)); - - namenode = (dfs.getFileSystem()).getUri().getHost() + ":" + - (dfs.getFileSystem()).getUri().getPort(); - - // create tracker hosts - trackerHosts = new String[numHosts * numTrackerPerHost]; - for (int i = 1; i <= (numHosts * numTrackerPerHost); ++i) { - trackerHosts[i - 1] = getHostname(i); - } - - // start mini mr - JobConf jtConf = new JobConf(conf); - mr = new MiniMRCluster(0, 0, numHosts * numTrackerPerHost, namenode, 1, - null, trackerHosts, clusterUgi, jtConf, - numExcluded * numTrackerPerHost); - - jt = mr.getJobTrackerRunner().getJobTracker(); - - // check if trackers from all the desired hosts have connected - Set hostsSeen = new HashSet(); - for (TaskTrackerStatus status : jt.taskTrackers()) { - hostsSeen.add(status.getHost()); - } - assertEquals("Not all hosts are up", numHosts - numExcluded, - hostsSeen.size()); - } catch (IOException ioe) { - stopCluster(); - } - } - - private void stopCluster() { - hosts = null; - trackerHosts = null; - if (dfs != null) { - dfs.shutdown(); - dfs = null; - namenode = null; - } - if (mr != null) { - mr.shutdown(); - mr = null; - jt = null; - } - } - - private AdminOperationsProtocol getClient(Configuration conf, - UserGroupInformation ugi) - throws IOException { - return (AdminOperationsProtocol)RPC.getProxy(AdminOperationsProtocol.class, - AdminOperationsProtocol.versionID, JobTracker.getAddress(conf), ugi, - conf, NetUtils.getSocketFactory(conf, AdminOperationsProtocol.class)); - } - - /** - * Check default value of HOSTS_EXCLUDE. Also check if only - * owner is allowed to this command. - */ - public void testMRRefreshDefault() throws IOException { - // start a cluster with 2 hosts and no exclude-hosts file - Configuration conf = new Configuration(); - conf.set(JTConfig.JT_HOSTS_EXCLUDE_FILENAME, ""); - startCluster(2, 1, 0, UserGroupInformation.getLoginUser(),conf); - - conf = mr.createJobConf(new JobConf(conf)); - - // refresh with wrong user - AdminOperationsProtocol client = getClient(conf, user1); - boolean success = false; - try { - // Also try tool runner - client.refreshNodes(); - success = true; - } catch (IOException ioe) {} - assertFalse("Invalid user performed privileged refresh operation", success); - - // refresh with correct user - success = false; - client = getClient(conf, owner); - try { - client.refreshNodes(); - success = true; - } catch (IOException ioe){} - assertTrue("Privileged user denied permission for refresh operation", - success); - - // refresh with invalid user - success = false; - client = getClient(conf, user4); - try { - client.refreshNodes(); - success = true; - } catch (IOException ioe){} - assertFalse("Invalid user performed privileged refresh operation", - success); - - // check the cluster status and tracker size - assertEquals("Trackers are lost upon refresh with empty hosts.exclude", - 2, jt.getClusterStatus(false).getTaskTrackers()); - assertEquals("Excluded node count is incorrect", - 0, jt.getClusterStatus(false).getNumExcludedNodes()); - - // check if the host is disallowed - Set hosts = new HashSet(); - for (TaskTrackerStatus status : jt.taskTrackers()) { - hosts.add(status.getHost()); - } - assertEquals("Host is excluded upon refresh with empty hosts.exclude", - 2, hosts.size()); - - stopCluster(); - } - - /** - * Check refresh with a specific user/group set in the conf - */ - public void testMRSuperUsers() throws IOException { - // start a cluster with 1 host and specified cluster administrators - Configuration conf = new Configuration(); - // set the supergroup - conf.set(MRConfig.MR_SUPERGROUP, "supergroup"); - // set the admin acl - conf.set(MRConfig.MR_ADMINS, "user5 abc"); - startCluster(2, 1, 0, UserGroupInformation.createRemoteUser("user1"), conf); - - conf = mr.createJobConf(new JobConf(conf)); - - // refresh with wrong user - AdminOperationsProtocol client = getClient(conf, user2); - boolean success = false; - try { - // Also try tool runner - client.refreshNodes(); - success = true; - } catch (IOException ioe) {} - assertFalse("Invalid user performed privileged refresh operation", success); - - // refresh with correct user - success = false; - client = getClient(conf, user1); - try { - client.refreshNodes(); - success = true; - } catch (IOException ioe){} - assertTrue("Privileged user denied permission for refresh operation", - success); - - // refresh with admin group - success = false; - client = getClient(conf, user3); - try { - client.refreshNodes(); - success = true; - } catch (IOException ioe){} - assertTrue("Admin group member denied permission for refresh operation", - success); - - // refresh with admin user - success = false; - client = getClient(conf, user5); - try { - client.refreshNodes(); - success = true; - } catch (IOException ioe){} - assertTrue("Admin user denied permission for refresh operation", - success); - - // refresh with deprecated super group member - success = false; - client = getClient(conf, user4); - try { - client.refreshNodes(); - success = true; - } catch (IOException ioe){} - assertTrue("Deprecated Super group member denied permission for refresh" + - " operation", success); - - stopCluster(); - } - - /** - * Check node refresh for decommissioning. Check if an allowed host is - * disallowed upon refresh. Also check if only owner/cluster administrator is - * allowed to fire this command. - */ - public void testMRRefreshDecommissioning() throws IOException { - // start a cluster with 2 hosts and empty exclude-hosts file - Configuration conf = new Configuration(); - File file = new File("hosts.exclude"); - file.delete(); - startCluster(2, 1, 0, UserGroupInformation.getLoginUser(), conf); - String hostToDecommission = getHostname(1); - conf = mr.createJobConf(new JobConf(conf)); - - // change the exclude-hosts file to include one host - FileOutputStream out = new FileOutputStream(file); - LOG.info("Writing excluded nodes to log file " + file.toString()); - BufferedWriter writer = null; - try { - writer = new BufferedWriter(new OutputStreamWriter(out)); - writer.write( hostToDecommission + "\n"); // decommission first host - } finally { - if (writer != null) { - writer.close(); - } - out.close(); - } - file.deleteOnExit(); - - AdminOperationsProtocol client = getClient(conf, owner); - try { - client.refreshNodes(); - } catch (IOException ioe){} - - // check the cluster status and tracker size - assertEquals("Tracker is not lost upon host decommissioning", - 1, jt.getClusterStatus(false).getTaskTrackers()); - assertEquals("Excluded node count is incorrect", - 1, jt.getClusterStatus(false).getNumExcludedNodes()); - - // check if the host is disallowed - for (TaskTrackerStatus status : jt.taskTrackers()) { - assertFalse("Tracker from decommissioned host still exist", - status.getHost().equals(hostToDecommission)); - } - - stopCluster(); - } - - /** - * Check node refresh for recommissioning. Check if an disallowed host is - * allowed upon refresh. - */ - public void testMRRefreshRecommissioning() throws IOException { - String hostToInclude = getHostname(1); - - // start a cluster with 2 hosts and exclude-hosts file having one hostname - Configuration conf = new Configuration(); - - // create a exclude-hosts file to include one host - File file = new File("hosts.exclude"); - file.delete(); - FileOutputStream out = new FileOutputStream(file); - LOG.info("Writing excluded nodes to log file " + file.toString()); - BufferedWriter writer = null; - try { - writer = new BufferedWriter(new OutputStreamWriter(out)); - writer.write(hostToInclude + "\n"); // exclude first host - } finally { - if (writer != null) { - writer.close(); - } - out.close(); - } - - startCluster(2, 1, 1, UserGroupInformation.getLoginUser(), conf); - - file.delete(); - - // change the exclude-hosts file to include no hosts - // note that this will also test hosts file with no content - out = new FileOutputStream(file); - LOG.info("Clearing hosts.exclude file " + file.toString()); - writer = null; - try { - writer = new BufferedWriter(new OutputStreamWriter(out)); - writer.write("\n"); - } finally { - if (writer != null) { - writer.close(); - } - out.close(); - } - file.deleteOnExit(); - - conf = mr.createJobConf(new JobConf(conf)); - - AdminOperationsProtocol client = getClient(conf, owner); - try { - client.refreshNodes(); - } catch (IOException ioe){} - - // start a tracker - mr.startTaskTracker(hostToInclude, null, 2, 1); - - // wait for the tracker to join the jt - while (jt.taskTrackers().size() < 2) { - UtilsForTests.waitFor(100); - } - - assertEquals("Excluded node count is incorrect", - 0, jt.getClusterStatus(false).getNumExcludedNodes()); - - // check if the host is disallowed - boolean seen = false; - for (TaskTrackerStatus status : jt.taskTrackers()) { - if(status.getHost().equals(hostToInclude)) { - seen = true; - break; - } - } - assertTrue("Tracker from excluded host doesnt exist", seen); - - stopCluster(); - } - - // Mapper that fails once for the first time - static class FailOnceMapper extends MapReduceBase implements - Mapper { - - private boolean shouldFail = false; - public void map(WritableComparable key, Writable value, - OutputCollector out, Reporter reporter) - throws IOException { - - if (shouldFail) { - throw new RuntimeException("failing map"); - } - } - - @Override - public void configure(JobConf conf) { - TaskAttemptID id = TaskAttemptID.forName(conf.get("mapred.task.id")); - shouldFail = id.getId() == 0 && id.getTaskID().getId() == 0; - } - } - - /** - * Check refreshNodes for decommissioning blacklisted nodes. - */ - public void testBlacklistedNodeDecommissioning() throws Exception { - LOG.info("Testing blacklisted node decommissioning"); - - Configuration conf = new Configuration(); - conf.set(JTConfig.JT_MAX_TRACKER_BLACKLISTS, "1"); - - startCluster(2, 1, 0, UserGroupInformation.getLoginUser(), conf); - - assertEquals("Trackers not up", 2, - mr.getJobTrackerRunner().getJobTracker().getActiveTrackers().length); - // validate the total tracker count - assertEquals("Active tracker count mismatch", - 2, jt.getClusterStatus(false).getTaskTrackers()); - // validate blacklisted count - assertEquals("Blacklisted tracker count mismatch", - 0, jt.getClusterStatus(false).getBlacklistedTrackers()); - - // run a failing job to blacklist the tracker - JobConf jConf = mr.createJobConf(); - jConf.set(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, "1"); - jConf.setJobName("test-job-fail-once"); - jConf.setMapperClass(FailOnceMapper.class); - jConf.setReducerClass(IdentityReducer.class); - jConf.setNumMapTasks(1); - jConf.setNumReduceTasks(0); - - RunningJob job = - UtilsForTests.runJob(jConf, new Path("in"), new Path("out")); - job.waitForCompletion(); - - // check if the tracker is lost - // validate the total tracker count - assertEquals("Active tracker count mismatch", - 1, jt.getClusterStatus(false).getTaskTrackers()); - // validate blacklisted count - assertEquals("Blacklisted tracker count mismatch", - 1, jt.getClusterStatus(false).getBlacklistedTrackers()); - - // find the tracker to decommission - String hostToDecommission = - JobInProgress.convertTrackerNameToHostName( - jt.getBlacklistedTrackers()[0].getTaskTrackerName()); - LOG.info("Decommissioning host " + hostToDecommission); - - Set decom = new HashSet(1); - decom.add(hostToDecommission); - jt.decommissionNodes(decom); - - // check the cluster status and tracker size - assertEquals("Tracker is not lost upon host decommissioning", - 1, jt.getClusterStatus(false).getTaskTrackers()); - assertEquals("Blacklisted tracker count incorrect in cluster status after " - + "decommissioning", - 0, jt.getClusterStatus(false).getBlacklistedTrackers()); - assertEquals("Tracker is not lost upon host decommissioning", - 1, jt.taskTrackers().size()); - - stopCluster(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java deleted file mode 100644 index 4e812ad1f55..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestParallelInitialization.java +++ /dev/null @@ -1,253 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import junit.framework.TestCase; - -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobHistory; -import org.apache.hadoop.mapred.JobInProgress.KillInterruptedException; -import org.apache.hadoop.mapred.JobStatusChangeEvent.EventType; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -public class TestParallelInitialization extends TestCase { - - private static int jobCounter; - private static final int NUM_JOBS = 3; - IntWritable numJobsCompleted = new IntWritable(); - - static void resetCounters() { - jobCounter = 0; - } - - class FakeJobInProgress extends JobInProgress { - - public FakeJobInProgress(JobConf jobConf, - FakeTaskTrackerManager taskTrackerManager, - JobTracker jt) throws IOException { - super(new JobID("test", ++jobCounter), jobConf, jt); - this.startTime = System.currentTimeMillis(); - this.status = new JobStatus(getJobID(), 0f, 0f, JobStatus.PREP, - jobConf.getUser(), - jobConf.getJobName(), "", ""); - this.status.setJobPriority(JobPriority.NORMAL); - this.status.setStartTime(startTime); - this.jobHistory = new FakeJobHistory(); - } - - @Override - public synchronized void initTasks() throws IOException { - try { - int jobNumber = this.getJobID().getId(); - synchronized (numJobsCompleted) { - while (numJobsCompleted.get() != (NUM_JOBS - jobNumber)) { - numJobsCompleted.wait(); - } - numJobsCompleted.set(numJobsCompleted.get() + 1); - numJobsCompleted.notifyAll(); - LOG.info("JobNumber " + jobNumber + " succeeded"); - } - } catch (InterruptedException ie) {}; - this.status.setRunState(JobStatus.SUCCEEDED); - } - - @Override - synchronized void fail() { - this.status.setRunState(JobStatus.FAILED); - } - } - - static class FakeTaskTrackerManager implements TaskTrackerManager { - - int maps = 0; - int reduces = 0; - int maxMapTasksPerTracker = 2; - int maxReduceTasksPerTracker = 2; - List listeners = - new ArrayList(); - QueueManager queueManager; - - private Map trackers = - new HashMap(); - - public FakeTaskTrackerManager() { - JobConf conf = new JobConf(); - queueManager = new QueueManager(conf); - trackers.put("tt1", new TaskTrackerStatus("tt1", "tt1.host", 1, - new ArrayList(), 0, - maxMapTasksPerTracker, maxReduceTasksPerTracker)); - } - - public ClusterStatus getClusterStatus() { - int numTrackers = trackers.size(); - return new ClusterStatus(numTrackers, 0, - 10 * 60 * 1000, - maps, reduces, - numTrackers * maxMapTasksPerTracker, - numTrackers * maxReduceTasksPerTracker, - JobTrackerStatus.RUNNING); - } - - public int getNumberOfUniqueHosts() { - return 0; - } - - public Collection taskTrackers() { - return trackers.values(); - } - - public void addJobInProgressListener(JobInProgressListener listener) { - listeners.add(listener); - } - - public void removeJobInProgressListener(JobInProgressListener listener) { - listeners.remove(listener); - } - - - public QueueManager getQueueManager() { - return queueManager; - } - - public int getNextHeartbeatInterval() { - return JTConfig.JT_HEARTBEAT_INTERVAL_MIN_DEFAULT; - } - - public void killJob(JobID jobid) { - return; - } - - public JobInProgress getJob(JobID jobid) { - return null; - } - - public boolean killTask(TaskAttemptID attemptId, boolean shouldFail) { - return true; - } - - public void initJob(JobInProgress job) { - try { - JobStatus prevStatus = (JobStatus)job.getStatus().clone(); - job.initTasks(); - if (job.isJobEmpty()) { - completeEmptyJob(job); - } else if (!job.isSetupCleanupRequired()) { - job.completeSetup(); - } - JobStatus newStatus = (JobStatus)job.getStatus().clone(); - if (prevStatus.getRunState() != newStatus.getRunState()) { - JobStatusChangeEvent event = - new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, - newStatus); - for (JobInProgressListener listener : listeners) { - listener.jobUpdated(event); - } - } - } catch (KillInterruptedException kie) { - killJob(job.getJobID()); - } catch (IOException ioe) { - failJob(job); - } - } - - private synchronized void completeEmptyJob(JobInProgress job) { - job.completeEmptyJob(); - } - - public synchronized void failJob(JobInProgress job) { - JobStatus prevStatus = (JobStatus)job.getStatus().clone(); - job.fail(); - JobStatus newStatus = (JobStatus)job.getStatus().clone(); - if (prevStatus.getRunState() != newStatus.getRunState()) { - JobStatusChangeEvent event = - new JobStatusChangeEvent(job, EventType.RUN_STATE_CHANGED, prevStatus, - newStatus); - for (JobInProgressListener listener : listeners) { - listener.jobUpdated(event); - } - } - } - - // Test methods - - public void submitJob(JobInProgress job) throws IOException { - for (JobInProgressListener listener : listeners) { - listener.jobAdded(job); - } - } - } - - protected JobConf jobConf; - protected TaskScheduler scheduler; - private FakeTaskTrackerManager taskTrackerManager; - - @Override - protected void setUp() throws Exception { - resetCounters(); - jobConf = new JobConf(); - taskTrackerManager = new FakeTaskTrackerManager(); - scheduler = createTaskScheduler(); - scheduler.setConf(jobConf); - scheduler.setTaskTrackerManager(taskTrackerManager); - scheduler.start(); - } - - @Override - protected void tearDown() throws Exception { - if (scheduler != null) { - scheduler.terminate(); - } - } - - protected TaskScheduler createTaskScheduler() { - return new JobQueueTaskScheduler(); - } - - public void testParallelInitJobs() throws IOException { - FakeJobInProgress[] jobs = new FakeJobInProgress[NUM_JOBS]; - - // Submit NUM_JOBS jobs in order. The init code will ensure - // that the jobs get inited in descending order of Job ids - // i.e. highest job id first and the smallest last. - // If we were not doing parallel init, the first submitted job - // will be inited first and that will hang - - for (int i = 0; i < NUM_JOBS; i++) { - jobs[i] = new FakeJobInProgress(jobConf, taskTrackerManager, - UtilsForTests.getJobTracker()); - jobs[i].getStatus().setRunState(JobStatus.PREP); - taskTrackerManager.submitJob(jobs[i]); - } - - try { - Thread.sleep(1000); - } catch (InterruptedException ie) {} - - for (int i = 0; i < NUM_JOBS; i++) { - assertTrue(jobs[i].getStatus().getRunState() == JobStatus.SUCCEEDED); - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java deleted file mode 100644 index 6518c9327b0..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueAclsForCurrentUser.java +++ /dev/null @@ -1,172 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import junit.framework.TestCase; - -import org.apache.hadoop.mapreduce.MRConfig; - -import static org.apache.hadoop.mapred.QueueManagerTestUtils.*; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Unit test class to test queue acls - * - */ -public class TestQueueAclsForCurrentUser extends TestCase { - - private QueueManager queueManager; - private JobConf conf = null; - UserGroupInformation currentUGI = null; - String submitAcl = QueueACL.SUBMIT_JOB.getAclName(); - String adminAcl = QueueACL.ADMINISTER_JOBS.getAclName(); - - @Override - protected void tearDown() { - deleteQueuesConfigFile(); - } - - // No access for queues for the user currentUGI - private void setupConfForNoAccess() throws Exception { - currentUGI = UserGroupInformation.getLoginUser(); - String userName = currentUGI.getUserName(); - - String[] queueNames = {"qu1", "qu2"}; - // Only user u1 has access for queue qu1 - // Only group g2 has acls for the queue qu2 - createQueuesConfigFile( - queueNames, new String[]{"u1", " g2"}, new String[]{"u1", " g2"}); - - conf = new JobConf(); - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - - queueManager = new QueueManager(conf); - } - - /** - * sets up configuration for acls test. - * @return - */ - private void setupConf(boolean aclSwitch) throws Exception{ - currentUGI = UserGroupInformation.getLoginUser(); - String userName = currentUGI.getUserName(); - StringBuilder groupNames = new StringBuilder(""); - String[] ugiGroupNames = currentUGI.getGroupNames(); - int max = ugiGroupNames.length-1; - for(int j=0;j< ugiGroupNames.length;j++) { - groupNames.append(ugiGroupNames[j]); - if(j - * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.*; -import static org.apache.hadoop.mapred.QueueConfigurationParser.*; -import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName; -import static org.junit.Assert.*; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.QueueState; -import org.apache.hadoop.security.UserGroupInformation; -import org.codehaus.jackson.map.ObjectMapper; -import org.junit.After; -import org.junit.Test; -import org.w3c.dom.Document; -import org.w3c.dom.Element; -import java.io.StringWriter; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Properties; -import java.util.Set; - - -public class TestQueueManager { - - private static final Log LOG = LogFactory.getLog( - TestQueueManager.class); - - @After - public void tearDown() throws Exception { - deleteQueuesConfigFile(); - } - - // create UGI with the given user name and the fixed group name "myGroup" - private UserGroupInformation createUGI(String userName) { - return UserGroupInformation.createUserForTesting( - userName, new String[]{"myGroup"}); - } - - @Test - public void testDefault() throws Exception { - deleteQueuesConfigFile(); - QueueManager qm = new QueueManager(); - Queue root = qm.getRoot(); - assertEquals(root.getChildren().size(), 1); - assertEquals(root.getChildren().iterator().next().getName(), "default"); - assertNull(root.getChildren().iterator().next().getChildren()); - } - - @Test - public void testXMLParsing() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - Set rootQueues = qm.getRoot().getChildren(); - List names = new ArrayList(); - for (Queue q : rootQueues) { - names.add(q.getName()); - } - - //Size of root. - assertEquals(rootQueues.size(), 2); - - //check root level queues - assertTrue(names.contains("q1")); - assertTrue(names.contains("p1")); - - - //check for leaf names - Set leafNames = qm.getLeafQueueNames(); - Queue p = qm.getQueue("p1"); - Set children = p.getChildren(); - assertTrue(children.size() == 2); - - //check leaf level queues - assertTrue( - leafNames.contains( - "p1" + NAME_SEPARATOR + "p11")); - assertTrue( - leafNames.contains( - "p1" + NAME_SEPARATOR + "p12")); - - - Queue q = qm.getQueue( - "p1" + NAME_SEPARATOR + "p12"); - - assertTrue( - q.getAcls().get( - toFullPropertyName( - q.getName(), ACL_SUBMIT_JOB_TAG)).isUserAllowed( - createUGI("u1"))); - - assertTrue( - q.getAcls().get( - toFullPropertyName( - q.getName(), - ACL_ADMINISTER_JOB_TAG)) - .isUserAllowed(createUGI("u2"))); - assertTrue(q.getState().equals(QueueState.STOPPED)); - } - - @Test - public void testhasAccess() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocumentWithAcls(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - - UserGroupInformation ugi; - // test for acls access when acls are set with * - ugi = createUGI("u1"); - assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p12", - QueueACL.SUBMIT_JOB, ugi)); - ugi = createUGI("u2"); - assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p12", - QueueACL.ADMINISTER_JOBS, ugi)); - - // test for acls access when acls are not set with * - ugi = createUGI("u1"); - assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p11", - QueueACL.SUBMIT_JOB, ugi)); - ugi = createUGI("u2"); - assertTrue(qm.hasAccess("p1" + NAME_SEPARATOR + "p11", - QueueACL.ADMINISTER_JOBS, ugi)); - - // Test for acls access when acls are not specified but acls are enabled. - // By default, the queue acls for any queue are empty. - ugi = createUGI("u1"); - assertFalse(qm.hasAccess("p1" + NAME_SEPARATOR + "p13", - QueueACL.SUBMIT_JOB, ugi)); - ugi = createUGI("u2"); - assertFalse(qm.hasAccess("p1" + NAME_SEPARATOR + "p13", - QueueACL.ADMINISTER_JOBS, ugi)); - - assertTrue(qm.isRunning("p1" + NAME_SEPARATOR + "p13")); - } - - @Test - public void testQueueView() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - - for (Queue queue : qm.getRoot().getChildren()) { - checkHierarchy(queue, qm); - } - } - - private void checkHierarchy(Queue queue, QueueManager queueManager) { - JobQueueInfo jobQueueInfo = queueManager.getJobQueueInfo(queue.getName()); - assertEquals(queue.getName(),jobQueueInfo.getQueueName()); - assertEquals(queue.getState(),jobQueueInfo.getState()); - if (queue.getChildren() !=null && queue.getChildren().size() > 0) { - for (Queue childQueue : queue.getChildren()) { - checkHierarchy(childQueue, queueManager); - } - } - } - - @Test - public void testhasAccessForParent() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - - UserGroupInformation ugi = createUGI("u1"); - assertFalse(qm.hasAccess("p1", QueueACL.SUBMIT_JOB, ugi)); - } - - @Test - public void testValidation() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - Element queues = createQueuesNode(doc); - Element q1 = createQueue(doc, "q1"); - - q1.appendChild(createAcls(doc, "acl-submit-job", "u1")); - q1.appendChild(createAcls(doc, "acl-administer-jobs", "u2")); - q1.appendChild(createQueue(doc, "p15")); - q1.appendChild(createQueue(doc, "p16")); - - queues.appendChild(q1); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - try { - new QueueManager(QUEUES_CONFIG_FILE_PATH, false); - fail("Should throw an exception as configuration is wrong "); - } catch (RuntimeException re) { - LOG.info(re.getMessage()); - } - } - - @Test - public void testInvalidName() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - Element queues = createQueuesNode(doc); - Element q1 = createQueue(doc, ""); - queues.appendChild(q1); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - try { - new QueueManager(QUEUES_CONFIG_FILE_PATH, false); - fail("Should throw an exception as configuration is wrong "); - } catch (Exception re) { - re.printStackTrace(); - LOG.info(re.getMessage()); - } - deleteQueuesConfigFile(); - doc = createDocument(); - queues = createQueuesNode(doc); - q1 = doc.createElement("queue"); - queues.appendChild(q1); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - try { - new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - fail("Should throw an exception as configuration is wrong "); - } catch (RuntimeException re) { - re.printStackTrace(); - LOG.info(re.getMessage()); - } - } - - @Test - public void testMissingConfigFile() throws Exception { - deleteQueuesConfigFile(); // deletes file - - try { - new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - fail("Should throw an exception for missing file when " + - "explicitly passed."); - } catch (RuntimeException re) { - } - - // If we just want to pick up the queues from the class loader - // it should fall through to the default. The class loader is set to - // load CONFIG for the "mapred-queues.xml" resource, but it's missing - // so should fall through to mapred-queues-default.xml - QueueManager qm = new QueueManager(); - List rootQueues = - qm.getRoot().getJobQueueInfo().getChildren(); - assertEquals(1, rootQueues.size()); - assertEquals("default", rootQueues.get(0).getQueueName()); - } - - @Test - public void testEmptyProperties() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - Element queues = createQueuesNode(doc); - Element q1 = createQueue(doc, "q1"); - Element p = createProperties(doc, null); - q1.appendChild(p); - queues.appendChild(q1); - } - - @Test - public void testEmptyFile() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - try { - new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - fail("Should throw an exception as configuration is wrong "); - } catch (Exception re) { - re.printStackTrace(); - LOG.info(re.getMessage()); - } - } - - @Test - public void testJobQueueInfoGeneration() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - - List rootQueues = - qm.getRoot().getJobQueueInfo().getChildren(); - assertEquals(rootQueues.size(), 2); - List names = new ArrayList(); - for (JobQueueInfo q : rootQueues) { - names.add(q.getQueueName()); - if (q.getQueueName().equals("q1")) { - Properties p = q.getProperties(); - assertEquals(p.getProperty("capacity"), "10"); - assertEquals(p.getProperty("maxCapacity"), "35"); - - assertTrue(q.getChildren().isEmpty()); - } else if (q.getQueueName().equals("p1")) { - List children = q.getChildren(); - assertEquals(children.size(), 2); - for (JobQueueInfo child : children) { - if (child.getQueueName().equals( - "p1" + NAME_SEPARATOR + "p12")) { - assertEquals( - child.getQueueState(), QueueState.STOPPED.getStateName()); - } else if (child.getQueueName().equals( - "p1" + NAME_SEPARATOR + "p11")) { - assertEquals( - child.getQueueState(), QueueState.RUNNING.getStateName()); - } else { - fail("Only 2 children"); - } - } - } else { - fail("Only 2 queues with q1 and p1 "); - } - } - } - - /** - * Test the refresh of queues. - * - * @throws Exception - */ - @Test - public void testRefresh() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, true); - Queue beforeRefreshRoot = qm.getRoot(); - //remove the file and create new one. - Set rootQueues = beforeRefreshRoot.getChildren(); - for (Queue qs : rootQueues) { - if (qs.getName().equals("q1")) { - - assertEquals(qs.getProperties().getProperty("capacity"), "10"); - assertEquals(qs.getProperties().getProperty("maxCapacity"), "35"); - - } else if (qs.getName().equals("p1")) { - - Set children = qs.getChildren(); - for (Queue child : children) { - if (child.getName().equals( - "p1" + NAME_SEPARATOR + "p12")) { - assertTrue( - child.getAcls().get( - toFullPropertyName( - child.getName(), ACL_SUBMIT_JOB_TAG)) - .isUserAllowed(createUGI("u1"))); - - assertTrue( - child.getAcls().get( - toFullPropertyName( - child.getName(), - ACL_ADMINISTER_JOB_TAG)) - .isUserAllowed(createUGI("u2"))); - assertTrue(child.getState().equals(QueueState.STOPPED)); - } else { - assertTrue(child.getState().equals(QueueState.RUNNING)); - } - } - } - } - deleteQueuesConfigFile(); - doc = createDocument(); - refreshSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueConfigurationParser cp = new QueueConfigurationParser(QUEUES_CONFIG_FILE_PATH, true); - qm.getRoot().isHierarchySameAs(cp.getRoot()); - qm.setQueues( - cp.getRoot().getChildren().toArray( - new Queue[cp.getRoot().getChildren().size()])); - Queue afterRefreshRoot = qm.getRoot(); - //remove the file and create new one. - rootQueues = afterRefreshRoot.getChildren(); - for (Queue qs : rootQueues) { - if (qs.getName().equals("q1")) { - - assertEquals(qs.getProperties().getProperty("capacity"), "70"); - assertEquals(qs.getProperties().getProperty("maxCapacity"), "35"); - - } else if (qs.getName().equals("p1")) { - - Set children = qs.getChildren(); - for (Queue child : children) { - if (child.getName().equals( - "p1" + NAME_SEPARATOR + "p12")) { - assertTrue( - child.getAcls().get( - toFullPropertyName( - child.getName(), - ACL_SUBMIT_JOB_TAG)) - .isUserAllowed(createUGI("u3"))); - - assertTrue( - child.getAcls().get( - toFullPropertyName( - child.getName(), - ACL_ADMINISTER_JOB_TAG)) - .isUserAllowed(createUGI("u4"))); - assertTrue(child.getState().equals(QueueState.RUNNING)); - } else { - assertTrue(child.getState().equals(QueueState.STOPPED)); - } - } - } - } - } - - @Test - public void testRefreshWithInvalidFile() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - QueueManager qm = new QueueManager(QUEUES_CONFIG_FILE_PATH, false); - - deleteQueuesConfigFile(); - doc = createDocument(); - Element queues = createQueuesNode(doc); - Element q1 = createQueue(doc, ""); - queues.appendChild(q1); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - try { - QueueConfigurationParser cp = new QueueConfigurationParser(QUEUES_CONFIG_FILE_PATH, false); - - fail("Should throw an exception as configuration is wrong "); - } catch (Throwable re) { - re.printStackTrace(); - LOG.info(re.getMessage()); - } - } - - /** - * Class to store the array of queues retrieved by parsing the string - * that is dumped in Json format - */ - static class JsonQueueTree { - boolean acls_enabled; - - JsonQueue[] queues; - - public JsonQueue[] getQueues() { - return queues; - } - - public void setQueues(JsonQueue[] queues) { - this.queues = queues; - } - - public boolean isAcls_enabled() { - return acls_enabled; - } - - public void setAcls_enabled(boolean aclsEnabled) { - acls_enabled = aclsEnabled; - } - } - - /** - * Class to store the contents of each queue that is dumped in JSON format. - */ - static class JsonQueue { - String name; - String state; - String acl_submit_job; - String acl_administer_jobs; - JsonProperty[] properties; - JsonQueue[] children; - public String getName() { - return name; - } - public String getState() { - return state; - } - public JsonProperty[] getProperties() { - return properties; - } - public JsonQueue[] getChildren() { - return children; - } - public void setName(String name) { - this.name = name; - } - public void setState(String state) { - this.state = state; - } - public void setProperties(JsonProperty[] properties) { - this.properties = properties; - } - public void setChildren(JsonQueue[] children) { - this.children = children; - } - public String getAcl_submit_job() { - return acl_submit_job; - } - public void setAcl_submit_job(String aclSubmitJob) { - acl_submit_job = aclSubmitJob; - } - public String getAcl_administer_jobs() { - return acl_administer_jobs; - } - public void setAcl_administer_jobs(String aclAdministerJobs) { - acl_administer_jobs = aclAdministerJobs; - } - } - - /** - * Class to store the contents of attribute "properties" in Json dump - */ - static class JsonProperty { - String key; - String value; - public String getKey() { - return key; - } - public void setKey(String key) { - this.key = key; - } - public String getValue() { - return value; - } - public void setValue(String value) { - this.value = value; - } - } - - /** - * checks the format of the dump in JSON format when - * QueueManager.dumpConfiguration(Writer) is called. - * @throws Exception - */ - @Test - public void testDumpConfiguration() throws Exception { - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - - StringWriter out = new StringWriter(); - Configuration conf = new Configuration(false); - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - QueueManager.dumpConfiguration(out, QUEUES_CONFIG_FILE_PATH, conf); - - ObjectMapper mapper = new ObjectMapper(); - // parse the Json dump - JsonQueueTree queueTree = - mapper.readValue(out.toString(), JsonQueueTree.class); - - // check for the number of top-level queues - assertEquals(2, queueTree.getQueues().length); - - HashMap topQueues = new HashMap(); - for (JsonQueue topQueue : queueTree.getQueues()) { - topQueues.put(topQueue.getName(), topQueue); - } - - // check for consistency in number of children - assertEquals(2, topQueues.get("p1").getChildren().length); - - HashMap childQueues = new HashMap(); - for (JsonQueue child : topQueues.get("p1").getChildren()) { - childQueues.put(child.getName(), child); - } - - // check for consistency in state - assertEquals("stopped", childQueues.get("p1:p12").getState()); - - // check for consistency in properties - HashMap q1_properties = - new HashMap(); - for (JsonProperty prop : topQueues.get("q1").getProperties()) { - q1_properties.put(prop.getKey(), prop); - } - assertEquals("10", q1_properties.get("capacity").getValue()); - assertEquals("35", q1_properties.get("maxCapacity").getValue()); - - // check for acls - assertEquals("u1 ", childQueues.get("p1:p12").getAcl_submit_job()); - assertEquals("u2 ", childQueues.get("p1:p12").getAcl_administer_jobs()); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java deleted file mode 100644 index b46c75839cd..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerRefresh.java +++ /dev/null @@ -1,341 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; - -import static junit.framework.Assert.assertTrue; -import static junit.framework.Assert.assertFalse; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertNotNull; -import static junit.framework.Assert.fail; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.mapreduce.QueueState; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.*; - -import org.junit.After; -import org.junit.Test; - -/** - * Test the refresh feature of QueueManager. - */ -public class TestQueueManagerRefresh { - - private static final Log LOG = - LogFactory.getLog(TestQueueManagerRefresh.class); - - /** - * Remove the configuration file after the test's done. - */ - @After - public void tearDown() { - deleteQueuesConfigFile(); - } - - /** - * @return a simple hierarchy of JobQueueInfos - */ - static JobQueueInfo[] getSimpleQueueHierarchy() { - int numQs = 3; - JobQueueInfo[] queues = new JobQueueInfo[numQs]; - queues[0] = - newJobQueueInfo(new ArrayList(), null, "q1", - QueueState.UNDEFINED, null); - queues[1] = - newJobQueueInfo(new ArrayList(), null, "q1:q2", - QueueState.RUNNING, null); - queues[2] = - newJobQueueInfo(new ArrayList(), null, "q1:q3", - QueueState.RUNNING, null); - queues[0].addChild(queues[1]); - queues[0].addChild(queues[2]); - return queues; - } - - /** - * Test to verify that the refresh of queue properties fails if a new queue is - * added. - * - * @throws Exception - */ - @Test - public void testRefreshWithAddedQueues() - throws Exception { - - JobQueueInfo[] queues = getSimpleQueueHierarchy(); - - // write the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - QueueManager qManager = new QueueManager(); - - JobQueueInfo newQueue = - newJobQueueInfo(new ArrayList(), null, "q4", - QueueState.UNDEFINED, null); - queues[0].addChild(newQueue); - - // Rewrite the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - testRefreshFailureWithChangeOfHierarchy(qManager); - - } - - /** - * Test to verify that the refresh of queue properties fails if queues are - * removed. - * - * @throws Exception - */ - @Test - public void testRefreshWithRemovedQueues() - throws Exception { - - JobQueueInfo[] queues = getSimpleQueueHierarchy(); - - // write the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - QueueManager qManager = new QueueManager(); - - // Remove queue[2] - JobQueueInfo q2 = queues[2]; - queues[0].removeChild(q2); - - // Rewrite the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - testRefreshFailureWithChangeOfHierarchy(qManager); - } - - /** - * @param originalQManager - * @throws Exception - */ - private void testRefreshFailureWithChangeOfHierarchy( - QueueManager originalQManager) - throws Exception { - - // Make sure that isHierarchySame returns false. - QueueManager modifiedQueueManager = new QueueManager(); - assertFalse("Hierarchy changed after refresh!", - originalQManager.getRoot().isHierarchySameAs( - modifiedQueueManager.getRoot())); - - // Refresh the QueueManager and make sure it fails. - try { - originalQManager.refreshQueues(null, null); - fail("Queue-refresh should have failed!"); - } catch (Exception e) { - // Refresh failed as expected. Check the error message. - assertTrue( - "Exception message should point to a change in queue hierarchy!", - e.getMessage().contains( - QueueManager.MSG_REFRESH_FAILURE_WITH_CHANGE_OF_HIERARCHY)); - } - - // Make sure that the old configuration is retained. - List rootQueues = - originalQManager.getRoot().getJobQueueInfo().getChildren(); - assertTrue(rootQueues.size() == 1); - } - - /** - * Test to verify that the refresh of queue properties fails if scheduler - * fails to reload itself. - * - * @throws Exception - */ - // @Test - public void testRefreshWithSchedulerFailure() - throws Exception { - JobQueueInfo[] queues = getSimpleQueueHierarchy(); - - // write the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - QueueManager qManager = new QueueManager(); - - // No change in configuration. Just Refresh the QueueManager and make sure - // it fails. - try { - qManager.refreshQueues(null, - new MyTaskScheduler().new MyFailingQueueRefresher()); - fail("Queue-refresh should have failed!"); - } catch (Exception e) { - // Refresh failed as expected. Check the error message. - assertTrue( - "Exception message should point to a refresh-failure in scheduler!", - e.getMessage().contains( - QueueManager.MSG_REFRESH_FAILURE_WITH_SCHEDULER_FAILURE)); - } - } - - /** - * Test to verify that the refresh of scheduler properties passes smoothly. - * - * @throws Exception - */ - @Test - public void testRefreshOfSchedulerProperties() - throws Exception { - JobQueueInfo[] queues = getSimpleQueueHierarchy(); - - // Set some scheduler properties - for (JobQueueInfo jqi : queues) { - Properties props = new Properties(); - props.setProperty("testing.property", "testing.value." - + jqi.getQueueName()); - jqi.setProperties(props); - } - - // write the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - QueueManager qManager = new QueueManager(); - - MyTaskScheduler myScheduler = new MyTaskScheduler(); - - qManager.refreshQueues(null, myScheduler.new MyQueueRefresher()); - - // Verify that the scheduler props are set correctly by scheduler-refresh. - Map schedProps = myScheduler.getSchedulerProperties(); - for (JobQueueInfo jqi : queues) { - String expectedVal = "testing.value." + jqi.getQueueName(); - Properties qProperties = schedProps.get(jqi.getQueueName()); - assertNotNull("Properties should not be null for the SchedulerQueue " - + jqi.getQueueName(), qProperties); - String observedVal = qProperties.getProperty("testing.property"); - assertEquals("Properties for the SchedulerQueue " + jqi.getQueueName() - + " are not reloaded properly!", expectedVal, observedVal); - } - } - - /** - * Test to verify that the scheduling information per queue in the - * {@link QueueManager} is retained across queue-refresh. - * - * @throws Exception - */ - @Test - public void testSchedulingInfoAfterRefresh() - throws Exception { - - JobQueueInfo[] queues = getSimpleQueueHierarchy(); - - // write the configuration file - writeQueueConfigurationFile( - QUEUES_CONFIG_FILE_PATH, new JobQueueInfo[] { queues[0] }); - - QueueManager qManager = new QueueManager(); - - // Set some scheduling information for the queues in the QueueManager. - for (String qName : qManager.getLeafQueueNames()) { - qManager.setSchedulerInfo(qName, new String( - "scheduling-information-for-queue-" + qName)); - } - - qManager.refreshQueues(null, null); - - // Verify that the scheduling information is retained across refresh. - for (String qName : qManager.getLeafQueueNames()) { - assertEquals("scheduling-information-for-queue-" + qName, - qManager.getSchedulerInfo(qName)); - } - } - - static class MyTaskScheduler extends TaskScheduler { - - Map schedulerPropsMap = - new HashMap(); - - Map getSchedulerProperties() { - return schedulerPropsMap; - } - - class MyQueueRefresher extends QueueRefresher { - - private void updateSchedulerProps(JobQueueInfo jqi) { - LOG.info("Updating properties for SchedulerQueue " - + jqi.getQueueName()); - LOG.info("Putting " + jqi.getProperties() + " in " - + jqi.getQueueName()); - schedulerPropsMap.put(jqi.getQueueName(), jqi.getProperties()); - for (JobQueueInfo child : jqi.getChildren()) { - updateSchedulerProps(child); - } - } - - @Override - void refreshQueues(List newRootQueues) { - LOG.info("Refreshing scheduler's properties"); - for (JobQueueInfo jqi : newRootQueues) { - updateSchedulerProps(jqi); - } - } - } - - class MyFailingQueueRefresher extends QueueRefresher { - @Override - void refreshQueues(List newRootQueues) - throws Throwable { - throw new IOException("Scheduler cannot refresh the queues!"); - } - } - - @Override - public List assignTasks(TaskTracker taskTracker) { - return null; - } - - @Override - public Collection getJobs(String queueName) { - return null; - } - } - - static JobQueueInfo newJobQueueInfo(List children, - Properties props, String queueName, QueueState state, - String schedulingInfo) { - JobQueueInfo jqi = new JobQueueInfo(); - jqi.setChildren(children); - if (props != null) { - jqi.setProperties(props); - } - jqi.setQueueName(queueName); - jqi.setQueueState(state.getStateName()); - jqi.setSchedulingInfo(schedulingInfo); - return jqi; - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java deleted file mode 100644 index 4e60a040a90..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithDeprecatedConf.java +++ /dev/null @@ -1,156 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; - -import javax.security.auth.login.LoginException; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.QueueState; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.security.UserGroupInformation; -import static org.apache.hadoop.mapred.DeprecatedQueueConfigurationParser.*; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.*; -import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName; - -public class TestQueueManagerWithDeprecatedConf extends TestCase { - - static final Log LOG = LogFactory.getLog( - TestQueueManagerWithDeprecatedConf.class); - - String submitAcl = QueueACL.SUBMIT_JOB.getAclName(); - String adminAcl = QueueACL.ADMINISTER_JOBS.getAclName(); - - - public void testMultipleQueues() { - JobConf conf = new JobConf(); - conf.set(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY, - "q1,q2,Q3"); - QueueManager qMgr = new QueueManager(conf); - Set expQueues = new TreeSet(); - expQueues.add("q1"); - expQueues.add("q2"); - expQueues.add("Q3"); - verifyQueues(expQueues, qMgr.getLeafQueueNames()); - } - - public void testSchedulerInfo() { - JobConf conf = new JobConf(); - conf.set(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY, - "qq1,qq2"); - QueueManager qMgr = new QueueManager(conf); - qMgr.setSchedulerInfo("qq1", "queueInfoForqq1"); - qMgr.setSchedulerInfo("qq2", "queueInfoForqq2"); - assertEquals(qMgr.getSchedulerInfo("qq2"), "queueInfoForqq2"); - assertEquals(qMgr.getSchedulerInfo("qq1"), "queueInfoForqq1"); - } - - - public void testQueueManagerWithDeprecatedConf() throws IOException { - String queueConfigPath = - System.getProperty("test.build.extraconf", "build/test/extraconf"); - - File hadoopConfigFile = new File(queueConfigPath, "mapred-site.xml"); - try { - // queue properties with which the cluster is started. - Properties hadoopConfProps = new Properties(); - hadoopConfProps.put(DeprecatedQueueConfigurationParser. - MAPRED_QUEUE_NAMES_KEY, "default,q1,q2"); - hadoopConfProps.put(MRConfig.MR_ACLS_ENABLED, "true"); - - UserGroupInformation ugi = - UserGroupInformation.createRemoteUser("unknownUser"); - hadoopConfProps.put(toFullPropertyName( - "default", submitAcl), ugi.getUserName()); - hadoopConfProps.put(toFullPropertyName( - "q1", submitAcl), "u1"); - hadoopConfProps.put(toFullPropertyName( - "q2", submitAcl), "*"); - hadoopConfProps.put(toFullPropertyName( - "default", adminAcl), ugi.getUserName()); - hadoopConfProps.put(toFullPropertyName( - "q1", adminAcl), "u2"); - hadoopConfProps.put(toFullPropertyName( - "q2", adminAcl), "*"); - - UtilsForTests.setUpConfigFile(hadoopConfProps, hadoopConfigFile); - - Configuration conf = new JobConf(); - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - QueueManager queueManager = new QueueManager(conf); - //Testing job submission access to queues. - assertTrue("User Job Submission failed.", - queueManager.hasAccess("default", QueueACL. - SUBMIT_JOB, ugi)); - assertFalse("User Job Submission failed.", - queueManager.hasAccess("q1", QueueACL. - SUBMIT_JOB, ugi)); - assertTrue("User Job Submission failed.", - queueManager.hasAccess("q2", QueueACL. - SUBMIT_JOB, ugi)); - //Testing the administer-jobs acls - assertTrue("User Job Submission failed.", - queueManager.hasAccess("default", - QueueACL.ADMINISTER_JOBS, ugi)); - assertFalse("User Job Submission failed.", - queueManager.hasAccess("q1", QueueACL. - ADMINISTER_JOBS, ugi)); - assertTrue("User Job Submission failed.", - queueManager.hasAccess("q2", QueueACL. - ADMINISTER_JOBS, ugi)); - - } finally { - //Cleanup the configuration files in all cases - if(hadoopConfigFile.exists()) { - hadoopConfigFile.delete(); - } - } - } - - private void verifyQueues(Set expectedQueues, - Set actualQueues) { - assertEquals(expectedQueues.size(), actualQueues.size()); - for (String queue : expectedQueues) { - assertTrue(actualQueues.contains(queue)); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java deleted file mode 100644 index 2bf615069f2..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestQueueManagerWithJobTracker.java +++ /dev/null @@ -1,414 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import static org.apache.hadoop.mapred.QueueConfigurationParser.NAME_SEPARATOR; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.QUEUES_CONFIG_FILE_PATH; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.deleteQueuesConfigFile; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createAcls; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createDocument; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createProperties; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createQueue; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createQueuesNode; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createSimpleDocumentWithAcls; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createState; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.miniMRCluster; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.submitSleepJob; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.writeToFile; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.Properties; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.tools.MRAdmin; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobID; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.QueueState; -import org.apache.hadoop.mapreduce.JobStatus.State; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.AfterClass; -import org.junit.Test; -import org.w3c.dom.Document; -import org.w3c.dom.Element; - -public class TestQueueManagerWithJobTracker { - - private static Configuration conf; - - @AfterClass - public static void tearDown() throws Exception { - deleteQueuesConfigFile(); - } - - String adminUser = "adminUser"; - String adminGroup = "adminGroup"; - String deprecatedSuperGroup = "superGroup"; - - private void startCluster(boolean aclsEnabled) - throws Exception { - - deleteQueuesConfigFile(); - Document doc = createDocument(); - createSimpleDocumentWithAcls(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - conf = new Configuration(); - conf.set(MRJobConfig.SETUP_CLEANUP_NEEDED, "false"); - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, aclsEnabled); - conf.set(MRConfig.MR_SUPERGROUP, deprecatedSuperGroup); - conf.set(MRConfig.MR_ADMINS, adminUser + " " + adminGroup); - - JobConf jobConf = new JobConf(conf); - String namenode = "file:///"; - miniMRCluster = new MiniMRCluster(0, namenode, 3, null, null, jobConf); - - } - - /** - * Test to check that jobs cannot be submitted to a queue in STOPPED state - * @throws Exception - */ - @Test(expected = IOException.class) - public void testSubmitJobForStoppedQueue() throws Exception { - startCluster(true); - - submitSleepJob(10, 10, 100, 100, false, null, - "p1" + NAME_SEPARATOR + "p14", conf); - fail("queue p1:p14 is in stopped state and should not accept jobs"); - } - - /** - * Test to check that jobs cannot be submitted to a container queue - * @throws Exception - */ - @Test(expected = IOException.class) - public void testSubmitJobForContainerQueue() throws Exception { - startCluster(true); - - submitSleepJob(10, 10, 100, 100, false, null, "p1", conf); - fail("queue p1 is a container queue and cannot have jobs"); - } - - /** - * Tests the submission of job with specified acls - * @throws Exception - */ - @Test - public void testAclsForSubmitJob() throws Exception { - startCluster(true); - - Job job; - try { - // submit job to queue p1:p13 with unspecified acls - job = submitSleepJob(0, 0, 0, 0, true, "u1,g1", "p1" + NAME_SEPARATOR - + "p13", conf); - fail("user u1 cannot submit jobs to queue p1:p13"); - } catch (Exception e) { - } - - // check access to admins - job = submitSleepJob(0, 0, 0, 0, true, adminUser+ ",g1", - "p1" + NAME_SEPARATOR + "p13", conf); - assertTrue("Admin user cannot submit jobs to queue p1:p13", - job.isSuccessful()); - job = submitSleepJob(0, 0, 0, 0, true, "u1,"+ adminGroup, - "p1" + NAME_SEPARATOR + "p13", conf); - assertTrue("Admin group member cannot submit jobs to queue p1:p13", - job.isSuccessful()); - job = submitSleepJob(0, 0, 0, 0, true, "u1,"+ deprecatedSuperGroup, - "p1" + NAME_SEPARATOR + "p13", conf); - assertTrue("Deprecated super group member cannot submit jobs to queue" + - " p1:p13", job.isSuccessful()); - - // check for access to submit the job - try { - job = submitSleepJob(0, 0, 0, 0, false, "u2,g1", "p1" + NAME_SEPARATOR - + "p11", conf); - fail("user u2 cannot submit jobs to queue p1:p11"); - } catch (Exception e) { - } - // submit job to queue p1:p11 with acl-submit-job as u1 - job = submitSleepJob(0, 0, 0, 0, true, "u1,g1", "p1" - + NAME_SEPARATOR + "p11", conf); - assertTrue("Job submission for u1 failed in queue : p1:p11.", - job.isSuccessful()); - - // submit job to queue p1:p12 with acl-submit-job as * - job = submitSleepJob(0, 0, 0, 0, true, "u2,g1", "p1" - + NAME_SEPARATOR + "p12", conf); - assertTrue("Job submission for u2 failed in queue : p1:p12.", - job.isSuccessful()); - } - - /** - * Tests the accessibility to kill a job - * @throws Exception - */ - @Test - public void testAccessToKillJob() throws Exception { - startCluster(true); - - Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1" - + NAME_SEPARATOR + "p11", conf); - final JobConf jobConf = miniMRCluster.createJobConf(); - Cluster cluster = null; - JobID jobID = job.getStatus().getJobID(); - //Ensure that the jobinprogress is initied before we issue a kill - //signal to the job. - JobTracker tracker = miniMRCluster.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = tracker.getJob(org.apache.hadoop.mapred.JobID - .downgrade(jobID)); - tracker.initJob(jip); - try { - final Configuration userConf = - new Configuration(miniMRCluster.createJobConf()); - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting("someRandomUser", - new String[] { "someRandomGroup" }); - cluster = ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(userConf); - } - }); - cluster.getJob(jobID).killJob(); - fail("user 'someRandomeUser' is neither u1 nor in the administer group list"); - } catch (Exception e) { - final Configuration userConf = new Configuration(miniMRCluster.createJobConf()); - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting("u1",new String[]{"g1"}); - cluster = ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(userConf); - } - }); - cluster.getJob(jobID).killJob(); - // kill the running job - assertEquals("job submitted for u1 and queue p1:p11 is not killed.", - cluster.getJob(jobID).getStatus().getState(), (State.KILLED)); - } - - job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", "p1" + NAME_SEPARATOR - + "p12", conf); - jobID = job.getStatus().getJobID(); - //Ensure that the jobinprogress is initied before we issue a kill - //signal to the job. - jip = tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID)); - tracker.initJob(jip); - tracker.killJob(job.getJobID()); - // kill the job by the user who submitted the job - assertEquals("job submitted for u1 and queue p1:p11 is not killed.", - cluster.getJob(jobID).getStatus().getState(), (State.KILLED)); - - final Configuration userConf = new Configuration(miniMRCluster.createJobConf()); - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting("u1",new String[]{"g1"}); - cluster = ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(userConf); - } - }); - job = submitSleepJob(1, 1, 10, 10, false, "u1,g1", "p1" + NAME_SEPARATOR - + "p11", conf); - jobID = job.getStatus().getJobID(); - //Ensure that the jobinprogress is initied before we issue a kill - //signal to the job. - jip = tracker.getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID)); - tracker.initJob(jip); - ugi = - UserGroupInformation.createUserForTesting("u3",new String[]{"g3"}); - cluster = ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(jobConf); - } - }); - // try killing job with user not in administer list - try { - cluster.getJob(jobID).killJob(); - fail("u3 not in administer list"); - } catch (Exception e) { - ugi = - UserGroupInformation.createUserForTesting("u1",new String[]{"g1"}); - cluster = ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(jobConf); - } - }); - assertFalse(cluster.getJob(jobID).isComplete()); - cluster.getJob(jobID).killJob(); - // kill the running job - assertEquals("job submitted for u1 and queue p1:p11 is not killed.", - cluster.getJob(jobID).getStatus().getState(), (State.KILLED)); - } - // check kill access to admins - ugi = - UserGroupInformation.createUserForTesting("adminUser", new String[]{"g3"}); - checkAccessToKill(tracker, jobConf, ugi); - - ugi = - UserGroupInformation.createUserForTesting("u3", new String[]{adminGroup}); - checkAccessToKill(tracker, jobConf, ugi); - - ugi = - UserGroupInformation.createUserForTesting("u3", - new String[]{deprecatedSuperGroup}); - checkAccessToKill(tracker, jobConf, ugi); - - } - - private void checkAccessToKill(JobTracker tracker, final JobConf mrConf, - UserGroupInformation killer) throws IOException, InterruptedException, - ClassNotFoundException { - Job job = submitSleepJob(1, 1, 100, 100, false, "u1,g1", - "p1" + NAME_SEPARATOR + "p11", conf); - JobID jobID = job.getStatus().getJobID(); - //Ensure that the jobinprogress is initied before we issue a kill - //signal to the job. - JobInProgress jip = tracker.getJob( - org.apache.hadoop.mapred.JobID.downgrade(jobID)); - tracker.initJob(jip); - Cluster cluster = killer.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(mrConf); - } - }); - cluster.getJob(jobID).killJob(); - assertEquals("job not killed by " + killer, - cluster.getJob(jobID).getStatus().getState(), (State.KILLED)); - } - - /** - * Tests job submission after refresh - * @throws Exception - */ - @Test - public void testSubmitJobsAfterRefresh() throws Exception { - startCluster(true); - - // test for refresh - deleteQueuesConfigFile(); - Document doc = createDocument(); - refreshDocument(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - MRAdmin admin = new MRAdmin(miniMRCluster.createJobConf()); - admin.run(new String[] { "-refreshQueues" }); - try { - submitSleepJob(10, 10, 100, 100, false, "u1,g1", "p1" - + NAME_SEPARATOR + "p11", conf); - fail("user u1 is not in the submit jobs' list"); - } catch (Exception e) { - } - deleteQueuesConfigFile(); - doc = createDocument(); - createSimpleDocumentWithAcls(doc); - writeToFile(doc, QUEUES_CONFIG_FILE_PATH); - admin.run(new String[] { "-refreshQueues" }); - } - - private void refreshDocument(Document doc) { - Element queues = createQueuesNode(doc); - - // Create parent level queue q1. - Element q1 = createQueue(doc, "q1"); - Properties props = new Properties(); - props.setProperty("capacity", "10"); - props.setProperty("maxCapacity", "35"); - q1.appendChild(createProperties(doc, props)); - queues.appendChild(q1); - - // Create another parent level p1 - Element p1 = createQueue(doc, "p1"); - - // append child p11 to p1 - Element p11 = createQueue(doc, "p11"); - p11.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_SUBMIT_JOB_TAG, " ")); - p11.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_ADMINISTER_JOB_TAG, "u2")); - p1.appendChild(p11); - - Element p12 = createQueue(doc, "p12"); - - p12.appendChild(createState(doc, QueueState.RUNNING.getStateName())); - p12.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_SUBMIT_JOB_TAG, "*")); - p12.appendChild(createAcls(doc, - QueueConfigurationParser.ACL_ADMINISTER_JOB_TAG, "*")); - - // append p12 to p1. - p1.appendChild(p12); - // append child p13 to p1 - Element p13 = createQueue(doc, "p13"); - p13.appendChild(createState(doc, QueueState.RUNNING.getStateName())); - p1.appendChild(p13); - // append child p14 to p1 - Element p14 = createQueue(doc, "p14"); - p14.appendChild(createState(doc, QueueState.STOPPED.getStateName())); - p1.appendChild(p14); - queues.appendChild(p1); - } - - /** - * Tests job submission when acls are disabled - * @throws Exception - */ - @Test - public void testAclsDisabled() throws Exception { - startCluster(false); - - // submit job to queue p1:p11 by any user not in acls-submit-job - Job job = submitSleepJob(0, 0, 0, 0, true, "u2,g1", "p1" + NAME_SEPARATOR - + "p11", conf); - assertTrue("Job submitted for u2 in queue p1:p11 is not successful.", - job.isSuccessful()); - - // submit job to queue p1:p11 by user in acls-submit-job - job = submitSleepJob(0, 0, 0, 0, true, "u1,g1", "p1" + NAME_SEPARATOR - + "p11", conf); - assertTrue("Job submitted for u2 in queue p1:p11 is not successful.", - job.isSuccessful()); - - job = submitSleepJob(1, 1, 0, 0, false, "u1,g1", "p1" + NAME_SEPARATOR - + "p11", conf); - // kill the job by any user - final JobConf jobConf = miniMRCluster.createJobConf(); - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting("u3",new String[]{"g3"}); - Cluster cluster = ugi.doAs(new PrivilegedExceptionAction() { - public Cluster run() throws IOException { - return new Cluster(jobConf); - } - }); - JobID jobID = job.getStatus().getJobID(); - //Ensure that the jobinprogress is initied before we issue a kill - //signal to the job. - JobInProgress jip = miniMRCluster.getJobTrackerRunner().getJobTracker() - .getJob(org.apache.hadoop.mapred.JobID.downgrade(jobID)); - miniMRCluster.getJobTrackerRunner().getJobTracker().initJob(jip); - cluster.getJob(jobID).killJob(); - assertEquals("job submitted for u1 and queue p1:p11 is not killed.", - cluster.getJob(jobID).getStatus().getState(), (State.KILLED)); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java deleted file mode 100644 index 82e5b56f97e..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestRackAwareTaskPlacement.java +++ /dev/null @@ -1,183 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobHistory; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTrackerMetricsInst; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobCounter; -import org.apache.hadoop.mapreduce.JobSubmissionFiles; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.split.JobSplit; -import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; -import org.apache.hadoop.net.DNSToSwitchMapping; -import org.apache.hadoop.net.StaticMapping; -import org.mortbay.log.Log; - -/** - * A JUnit test to test configured task limits. - */ -public class TestRackAwareTaskPlacement extends TestCase { - - static String trackers[] = new String[] {"tracker_tracker1.r1.com:1000", - "tracker_tracker2.r1.com:1000", "tracker_tracker3.r2.com:1000", - "tracker_tracker4.r3.com:1000"}; - - static String[] allHosts = - new String[] {"tracker1.r1.com", "tracker2.r1.com", "tracker3.r2.com", - "tracker4.r3.com"}; - - static String[] allRacks = - new String[] { "/r1", "/r1", "/r2", "/r3"}; - - static FakeJobTracker jobTracker; - static String jtIdentifier = "test"; - private static int jobCounter; - private static FakeJobTrackerMetricsInst fakeInst; - - public static Test suite() { - TestSetup setup = - new TestSetup(new TestSuite(TestRackAwareTaskPlacement.class)) { - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setClass("topology.node.switch.mapping.impl", - StaticMapping.class, DNSToSwitchMapping.class); - conf.set(JTConfig.JT_INSTRUMENTATION, - FakeJobTrackerMetricsInst.class.getName()); - jobTracker = new FakeJobTracker(conf, new FakeClock(), trackers); - fakeInst = (FakeJobTrackerMetricsInst) jobTracker.getInstrumentation(); - // Set up the Topology Information - for (int i = 0; i < allHosts.length; i++) { - StaticMapping.addNodeToRack(allHosts[i], allRacks[i]); - } - for (String tracker : trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, tracker); - } - } - }; - return setup; - } - - static class MyFakeJobInProgress extends JobInProgress { - static JobID jobid; - int numMaps; - - MyFakeJobInProgress(JobConf jc, JobTracker jt) throws IOException { - super((jobid = new JobID(jtIdentifier, jobCounter ++)), jc, jt); - Path jobFile = new Path("Dummy"); - this.profile = new JobProfile(jc.getUser(), jobid, - jobFile.toString(), null, jc.getJobName(), - jc.getQueueName()); - this.jobHistory = new FakeJobHistory(); - } - - @Override - public void initTasks() throws IOException { - TaskSplitMetaInfo[] taskSplitMetaInfo = createSplits(jobId); - numMapTasks = taskSplitMetaInfo.length; - createMapTasks(null, taskSplitMetaInfo); - nonRunningMapCache = createCache(taskSplitMetaInfo, maxLevel); - tasksInited.set(true); - this.status.setRunState(JobStatus.RUNNING); - - } - - @Override - protected TaskSplitMetaInfo [] createSplits( - org.apache.hadoop.mapreduce.JobID jobId) throws IOException { - TaskSplitMetaInfo[] splits = new TaskSplitMetaInfo[numMaps]; - // Hand code for now. - // M0,2,3 reside in Host1 - // M1 resides in Host3 - // M4 resides in Host4 - String[] splitHosts0 = new String[] { allHosts[0] }; - - String[] splitHosts1 = new String[] { allHosts[2] }; - String[] splitHosts2 = new String[] { allHosts[3] }; - for (int i = 0; i < numMaps; i++) { - if (i == 0 || i == 2 || i == 3) { - splits[i] = new TaskSplitMetaInfo(splitHosts0, 0, 0); - } else if (i == 1) { - splits[i] = new TaskSplitMetaInfo(splitHosts1, 0, 0); - } else if (i == 4) { - splits[i] = new TaskSplitMetaInfo(splitHosts2, 0, 0); - } - } - - return splits; - } - } - @SuppressWarnings("deprecation") - public void testTaskPlacement() throws IOException { - JobConf conf = new JobConf(); - conf.setNumReduceTasks(0); - conf.setJobName("TestTaskPlacement"); - - MyFakeJobInProgress jip = new MyFakeJobInProgress(conf, jobTracker); - jip.numMaps = 5; - jip.initTasks(); - - // Tracker1 should get a rack local - TaskTrackerStatus tts = new TaskTrackerStatus(trackers[1], allHosts[1]); - jip.obtainNewMapTask(tts, 4, 4); - - // Tracker0 should get a data local - tts = new TaskTrackerStatus(trackers[0], allHosts[0]); - jip.obtainNewMapTask(tts, 4, 4); - - // Tracker2 should get a data local - tts = new TaskTrackerStatus(trackers[2], allHosts[2]); - jip.obtainNewMapTask(tts, 4, 4); - - // Tracker0 should get a data local - tts = new TaskTrackerStatus(trackers[0], allHosts[0]); - jip.obtainNewMapTask(tts, 4, 4); - - // Tracker1 should not get any locality at all - tts = new TaskTrackerStatus(trackers[1], allHosts[1]); - jip.obtainNewMapTask(tts, 4, 4); - - - Counters counters = jip.getCounters(); - assertEquals("Number of data local maps", 3, - counters.getCounter(JobCounter.DATA_LOCAL_MAPS)); - - assertEquals("Number of Rack-local maps", 1 , - counters.getCounter(JobCounter.RACK_LOCAL_MAPS)); - - assertEquals("Number of Other-local maps", 0, - counters.getCounter(JobCounter.OTHER_LOCAL_MAPS)); - // Also verify jobtracker instrumentation - assertEquals("Number of data local maps", 3, fakeInst.numDataLocalMaps); - assertEquals("Number of rack local maps", 1, fakeInst.numRackLocalMaps); - - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java deleted file mode 100644 index c446af4ee89..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestRecoveryManager.java +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapred.JobTracker.RecoveryManager; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.MRConfig; - -import static org.apache.hadoop.mapred.QueueManagerTestUtils.createQueuesConfigFile; -import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.UserGroupInformation; - -/** - * Test whether the {@link RecoveryManager} is able to tolerate job-recovery - * failures and the jobtracker is able to tolerate {@link RecoveryManager} - * failure. - */ -public class TestRecoveryManager extends TestCase { - private static final Log LOG = - LogFactory.getLog(TestRecoveryManager.class); - private static final Path TEST_DIR = - new Path(System.getProperty("test.build.data", "/tmp"), - "test-recovery-manager"); - - /** - * Tests the {@link JobTracker} against the exceptions thrown in - * {@link JobTracker.RecoveryManager}. It does the following : - * - submits 3 jobs - * - kills the jobtracker - * - Garble job.xml for one job causing it to fail in constructor - * and job.split for another causing it to fail in init. - * - delete the job temp/submit dir - * - restarts the jobtracker - * - checks if the jobtraker starts normally - */ - public void testJobTracker() throws Exception { - LOG.info("Testing jobtracker restart with faulty job"); - String signalFile = new Path(TEST_DIR, "signal").toString(); - JobConf conf = new JobConf(); - - FileSystem fs = FileSystem.get(new Configuration()); - fs.delete(TEST_DIR, true); // cleanup - - conf.set(JTConfig.JT_JOBHISTORY_BLOCK_SIZE, "1024"); - - MiniMRCluster mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); - - JobConf job1 = mr.createJobConf(); - - UtilsForTests.configureWaitingJobConf(job1, - new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output1"), 2, 0, - "test-recovery-manager", signalFile, signalFile); - - // submit the faulty job - RunningJob rJob1 = (new JobClient(job1)).submitJob(job1); - LOG.info("Submitted job " + rJob1.getID()); - - while (rJob1.mapProgress() < 0.5f) { - LOG.info("Waiting for job " + rJob1.getID() + " to be 50% done"); - UtilsForTests.waitFor(100); - } - - JobConf job2 = mr.createJobConf(); - - UtilsForTests.configureWaitingJobConf(job2, - new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output2"), 30, 0, - "test-recovery-manager", signalFile, signalFile); - - // submit the faulty job - RunningJob rJob2 = (new JobClient(job2)).submitJob(job2); - LOG.info("Submitted job " + rJob2.getID()); - - while (rJob2.mapProgress() < 0.5f) { - LOG.info("Waiting for job " + rJob2.getID() + " to be 50% done"); - UtilsForTests.waitFor(100); - } - - // kill the jobtracker - LOG.info("Stopping jobtracker"); - String sysDir = mr.getJobTrackerRunner().getJobTracker().getSystemDir(); - mr.stopJobTracker(); - - // delete the job.xml of job #1 causing the job to fail in submit Job - //while recovery itself - Path jobFile = - new Path(sysDir, rJob1.getID().toString() + "/" + JobTracker.JOB_INFO_FILE); - LOG.info("Deleting job token file : " + jobFile.toString()); - fs.delete(jobFile, false); // delete the job.xml file - - // create the job token file with 1 byte - FSDataOutputStream out = fs.create(jobFile); - out.write(1); - out.close(); - - // make sure that the jobtracker is in recovery mode - mr.getJobTrackerConf().setBoolean(JTConfig.JT_RESTART_ENABLED, true); - // start the jobtracker - LOG.info("Starting jobtracker"); - mr.startJobTracker(); - ClusterStatus status = - mr.getJobTrackerRunner().getJobTracker().getClusterStatus(false); - - // check if the jobtracker came up or not - assertEquals("JobTracker crashed!", - JobTrackerStatus.RUNNING, status.getJobTrackerStatus()); - - // assert the no of recovered jobs - assertEquals("No of recovered jobs not correct", - 1, mr.getJobTrackerRunner().getJobTracker(). - recoveryManager.getRecovered()); - - mr.shutdown(); - } - - /** - * Tests the {@link JobTracker.RecoveryManager} against the exceptions thrown - * during recovery. It does the following : - * - submits a job with HIGH priority and x tasks - * - allows it to complete 50% - * - submits another job with normal priority and y tasks - * - kills the jobtracker - * - restarts the jobtracker with max-tasks-per-job such that - * y < max-tasks-per-job < x - * - checks if the jobtraker starts normally and job#2 is recovered while - * job#1 is failed. - */ - public void testRecoveryManager() throws Exception { - LOG.info("Testing recovery-manager"); - String signalFile = new Path(TEST_DIR, "signal").toString(); - - // clean up - FileSystem fs = FileSystem.get(new Configuration()); - fs.delete(TEST_DIR, true); - - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_JOBHISTORY_BLOCK_SIZE, "1024"); - - MiniMRCluster mr = new MiniMRCluster(1, "file:///", 1, null, null, conf); - JobTracker jobtracker = mr.getJobTrackerRunner().getJobTracker(); - - JobConf job1 = mr.createJobConf(); - // set the high priority - job1.setJobPriority(JobPriority.HIGH); - - UtilsForTests.configureWaitingJobConf(job1, - new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output3"), 30, 0, - "test-recovery-manager", signalFile, signalFile); - - // submit the faulty job - JobClient jc = new JobClient(job1); - RunningJob rJob1 = jc.submitJob(job1); - LOG.info("Submitted first job " + rJob1.getID()); - - while (rJob1.mapProgress() < 0.5f) { - LOG.info("Waiting for job " + rJob1.getID() + " to be 50% done"); - UtilsForTests.waitFor(100); - } - - // now submit job2 - JobConf job2 = mr.createJobConf(); - - String signalFile1 = new Path(TEST_DIR, "signal1").toString(); - UtilsForTests.configureWaitingJobConf(job2, - new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output4"), 20, 0, - "test-recovery-manager", signalFile1, signalFile1); - - // submit the job - RunningJob rJob2 = (new JobClient(job2)).submitJob(job2); - LOG.info("Submitted job " + rJob2.getID()); - - // wait for it to init - JobInProgress jip = jobtracker.getJob(rJob2.getID()); - - while (!jip.inited()) { - LOG.info("Waiting for job " + jip.getJobID() + " to be inited"); - UtilsForTests.waitFor(100); - } - - // now submit job3 with inappropriate acls - final JobConf job3 = mr.createJobConf(); - UserGroupInformation ugi3 = - UserGroupInformation.createUserForTesting("abc", new String[]{"users"}); - - UtilsForTests.configureWaitingJobConf(job3, - new Path(TEST_DIR, "input"), new Path(TEST_DIR, "output5"), 1, 0, - "test-recovery-manager", signalFile, signalFile); - - // submit the job - RunningJob rJob3 = ugi3.doAs(new PrivilegedExceptionAction() { - public RunningJob run() throws IOException { - return (new JobClient(job3)).submitJob(job3); - } - }); - - LOG.info("Submitted job " + rJob3.getID() + " with different user"); - - jip = jobtracker.getJob(rJob3.getID()); - assertEquals("Restart count is not correct", - 0, jip.getNumRestarts()); - - while (!jip.inited()) { - LOG.info("Waiting for job " + jip.getJobID() + " to be inited"); - UtilsForTests.waitFor(100); - } - - // kill the jobtracker - LOG.info("Stopping jobtracker"); - mr.stopJobTracker(); - - // make sure that the jobtracker is in recovery mode - mr.getJobTrackerConf().setBoolean(JTConfig.JT_RESTART_ENABLED, - true); - mr.getJobTrackerConf().setInt(JTConfig.JT_TASKS_PER_JOB, 25); - - mr.getJobTrackerConf().setBoolean(MRConfig.MR_ACLS_ENABLED, true); - - UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - mr.getJobTrackerConf().set(toFullPropertyName( - "default", QueueACL.SUBMIT_JOB.getAclName()), ugi.getUserName()); - - // start the jobtracker - LOG.info("Starting jobtracker"); - mr.startJobTracker(); - UtilsForTests.waitForJobTracker(jc); - - jobtracker = mr.getJobTrackerRunner().getJobTracker(); - - // assert that job2 is recovered by the jobtracker as job1 would fail - assertEquals("Recovery manager failed to tolerate job failures", - 2, jobtracker.getAllJobs().length); - - // assert the no of recovered jobs - assertEquals("No of recovered jobs not correct", - 2, jobtracker.recoveryManager.getRecovered()); - assertEquals("Restart count is not correct", - 1, jobtracker.getJob(rJob2.getID()).getNumRestarts()); - // check if the job#1 has failed - JobStatus status = jobtracker.getJobStatus(rJob1.getID()); - assertEquals("Faulty job not failed", - JobStatus.FAILED, status.getRunState()); - - jip = jobtracker.getJob(rJob2.getID()); - assertFalse("Job should be running", jip.isComplete()); - - status = jobtracker.getJobStatus(rJob3.getID()); - assertNull("Job should be missing", status); - - mr.shutdown(); - } - - /** - * Test if the jobtracker waits for the info file to be created before - * starting. - */ - public void testJobTrackerInfoCreation() throws Exception { - LOG.info("Testing jobtracker.info file"); - MiniDFSCluster dfs = new MiniDFSCluster(new Configuration(), 1, true, null); - String namenode = (dfs.getFileSystem()).getUri().getHost() + ":" - + (dfs.getFileSystem()).getUri().getPort(); - // shut down the data nodes - dfs.shutdownDataNodes(); - - // start the jobtracker - JobConf conf = new JobConf(); - FileSystem.setDefaultUri(conf, namenode); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "127.0.0.1:0"); - - JobTracker jobtracker = new JobTracker(conf); - - // now check if the update restart count works fine or not - boolean failed = false; - try { - jobtracker.recoveryManager.updateRestartCount(); - } catch (IOException ioe) { - failed = true; - } - assertTrue("JobTracker created info files without datanodes!!!", failed); - - Path restartFile = jobtracker.recoveryManager.getRestartCountFile(); - Path tmpRestartFile = jobtracker.recoveryManager.getTempRestartCountFile(); - FileSystem fs = dfs.getFileSystem(); - assertFalse("Info file exists after update failure", - fs.exists(restartFile)); - assertFalse("Temporary restart-file exists after update failure", - fs.exists(restartFile)); - - // start 1 data node - dfs.startDataNodes(conf, 1, true, null, null, null, null); - dfs.waitActive(); - - failed = false; - try { - jobtracker.recoveryManager.updateRestartCount(); - } catch (IOException ioe) { - failed = true; - } - assertFalse("JobTracker failed to create info files with datanodes!!!", failed); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java deleted file mode 100644 index 2b47a38e577..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestResourceEstimation.java +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import org.apache.hadoop.mapreduce.split.JobSplit; - -import junit.framework.TestCase; - -public class TestResourceEstimation extends TestCase { - - - public void testResourceEstimator() throws Exception { - final int maps = 100; - final int reduces = 2; - final int singleMapOutputSize = 1000; - JobConf jc = new JobConf(); - JobID jid = new JobID("testJT", 0); - jc.setNumMapTasks(maps); - jc.setNumReduceTasks(reduces); - - JobInProgress jip = new JobInProgress(jid, jc, - UtilsForTests.getJobTracker()); - //unfortunately, we can't set job input size from here. - ResourceEstimator re = new ResourceEstimator(jip); - - for(int i = 0; i < maps / 10 ; ++i) { - - long estOutSize = re.getEstimatedMapOutputSize(); - System.out.println(estOutSize); - assertEquals(0, estOutSize); - - TaskStatus ts = new MapTaskStatus(); - ts.setOutputSize(singleMapOutputSize); - JobSplit.TaskSplitMetaInfo split = - new JobSplit.TaskSplitMetaInfo(new String[0], 0, 0); - TaskInProgress tip = new TaskInProgress(jid, "", split, null, jc, jip, 0, 1); - re.updateWithCompletedTask(ts, tip); - } - assertEquals(2* singleMapOutputSize, re.getEstimatedMapOutputSize()); - assertEquals(2* singleMapOutputSize * maps / reduces, re.getEstimatedReduceInputSize()); - - } - - public void testWithNonZeroInput() throws Exception { - final int maps = 100; - final int reduces = 2; - final int singleMapOutputSize = 1000; - final int singleMapInputSize = 500; - JobConf jc = new JobConf(); - JobID jid = new JobID("testJT", 0); - jc.setNumMapTasks(maps); - jc.setNumReduceTasks(reduces); - - JobInProgress jip = new JobInProgress(jid, jc, - UtilsForTests.getJobTracker()) { - long getInputLength() { - return singleMapInputSize*desiredMaps(); - } - }; - ResourceEstimator re = new ResourceEstimator(jip); - - for(int i = 0; i < maps / 10 ; ++i) { - - long estOutSize = re.getEstimatedMapOutputSize(); - System.out.println(estOutSize); - assertEquals(0, estOutSize); - - TaskStatus ts = new MapTaskStatus(); - ts.setOutputSize(singleMapOutputSize); - JobSplit.TaskSplitMetaInfo split = - new JobSplit.TaskSplitMetaInfo(new String[0], 0, - singleMapInputSize); - TaskInProgress tip = new TaskInProgress(jid, "", split, null, jc, jip, 0, 1); - re.updateWithCompletedTask(ts, tip); - } - - assertEquals(2* singleMapOutputSize, re.getEstimatedMapOutputSize()); - assertEquals(2* singleMapOutputSize * maps / reduces, re.getEstimatedReduceInputSize()); - - //add one more map task with input size as 0 - TaskStatus ts = new MapTaskStatus(); - ts.setOutputSize(singleMapOutputSize); - JobSplit.TaskSplitMetaInfo split = - new JobSplit.TaskSplitMetaInfo(new String[0], 0, 0); - TaskInProgress tip = new TaskInProgress(jid, "", split, null, jc, jip, 0, 1); - re.updateWithCompletedTask(ts, tip); - - long expectedTotalMapOutSize = (singleMapOutputSize*11) * - ((maps*singleMapInputSize)+maps)/((singleMapInputSize+1)*10+1); - assertEquals(2* expectedTotalMapOutSize/maps, re.getEstimatedMapOutputSize()); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java deleted file mode 100644 index 2c4d9998c46..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java +++ /dev/null @@ -1,263 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.mapred.lib.IdentityMapper; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; - -/** - * Tests various failures in setup/cleanup of job, like - * throwing exception, command line kill and lost tracker - */ -public class TestSetupAndCleanupFailure extends TestCase { - - final Path inDir = new Path("./input"); - final Path outDir = new Path("./output"); - static Path setupSignalFile = new Path("/setup-signal"); - static Path cleanupSignalFile = new Path("/cleanup-signal"); - - // Commiter with setupJob throwing exception - static class CommitterWithFailSetup extends FileOutputCommitter { - @Override - public void setupJob(JobContext context) throws IOException { - throw new IOException(); - } - } - - // Commiter with commitJob throwing exception - static class CommitterWithFailCommit extends FileOutputCommitter { - @Override - public void commitJob(JobContext context) throws IOException { - throw new IOException(); - } - } - - // Committer waits for a file to be created on dfs. - static class CommitterWithLongSetupAndCommit extends FileOutputCommitter { - - private void waitForSignalFile(FileSystem fs, Path signalFile) - throws IOException { - while (!fs.exists(signalFile)) { - try { - Thread.sleep(100); - } catch (InterruptedException ie) { - break; - } - } - } - - @Override - public void setupJob(JobContext context) throws IOException { - waitForSignalFile(FileSystem.get(context.getJobConf()), setupSignalFile); - super.setupJob(context); - } - - @Override - public void commitJob(JobContext context) throws IOException { - waitForSignalFile(FileSystem.get(context.getJobConf()), cleanupSignalFile); - super.commitJob(context); - } - } - - // Among these tips only one of the tasks will be running, - // get the taskid for that task - private TaskAttemptID getRunningTaskID(TaskInProgress[] tips) { - TaskAttemptID taskid = null; - while (taskid == null) { - for (TaskInProgress tip :tips) { - TaskStatus[] statuses = tip.getTaskStatuses(); - for (TaskStatus status : statuses) { - if (status.getRunState() == TaskStatus.State.RUNNING) { - taskid = status.getTaskID(); - break; - } - } - if (taskid != null) break; - } - try { - Thread.sleep(10); - } catch (InterruptedException ie) {} - } - return taskid; - } - - // Tests the failures in setup/cleanup job. Job should cleanly fail. - private void testFailCommitter(Class theClass, - JobConf jobConf) - throws IOException { - jobConf.setOutputCommitter(theClass); - RunningJob job = UtilsForTests.runJob(jobConf, inDir, outDir); - // wait for the job to finish. - job.waitForCompletion(); - assertEquals(JobStatus.FAILED, job.getJobState()); - } - - // launch job with CommitterWithLongSetupAndCleanup as committer - // and wait till the job is inited. - private RunningJob launchJobWithWaitingSetupAndCleanup(MiniMRCluster mr) - throws IOException { - // launch job with waiting setup/cleanup - JobConf jobConf = mr.createJobConf(); - jobConf.setOutputCommitter(CommitterWithLongSetupAndCommit.class); - RunningJob job = UtilsForTests.runJob(jobConf, inDir, outDir); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = jt.getJob(job.getID()); - while (!jip.inited()) { - try { - Thread.sleep(10); - } catch (InterruptedException ie) {} - } - return job; - } - - /** - * Tests setup and cleanup attempts getting killed from command-line - * and lost tracker - * - * @param mr - * @param dfs - * @param commandLineKill if true, test with command-line kill - * else, test with lost tracker - * @throws IOException - */ - private void testSetupAndCleanupKill(MiniMRCluster mr, - MiniDFSCluster dfs, - boolean commandLineKill) - throws Exception { - // launch job with waiting setup/cleanup - RunningJob job = launchJobWithWaitingSetupAndCleanup(mr); - - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = jt.getJob(job.getID()); - // get the running setup task id - TaskAttemptID setupID = getRunningTaskID(jip.getTasks(TaskType.JOB_SETUP)); - if (commandLineKill) { - killTaskFromCommandLine(job, setupID, jt); - } else { - killTaskWithLostTracker(mr, setupID); - } - // signal the setup to complete - UtilsForTests.writeFile(dfs.getNameNode(), - dfs.getFileSystem().getConf(), - setupSignalFile, (short)3); - // wait for maps and reduces to complete - while (job.reduceProgress() != 1.0f) { - try { - Thread.sleep(100); - } catch (InterruptedException ie) {} - } - // get the running cleanup task id - TaskAttemptID cleanupID = - getRunningTaskID(jip.getTasks(TaskType.JOB_CLEANUP)); - if (commandLineKill) { - killTaskFromCommandLine(job, cleanupID, jt); - } else { - killTaskWithLostTracker(mr, cleanupID); - } - // signal the cleanup to complete - UtilsForTests.writeFile(dfs.getNameNode(), - dfs.getFileSystem().getConf(), - cleanupSignalFile, (short)3); - // wait for the job to finish. - job.waitForCompletion(); - assertEquals(JobStatus.SUCCEEDED, job.getJobState()); - assertEquals(TaskStatus.State.KILLED, - jt.getTaskStatus(setupID).getRunState()); - assertEquals(TaskStatus.State.KILLED, - jt.getTaskStatus(cleanupID).getRunState()); - } - - // kill the task from command-line - // wait till it kill is reported back - private void killTaskFromCommandLine(RunningJob job, - TaskAttemptID taskid, - JobTracker jt) - throws IOException { - job.killTask(taskid, false); - // wait till the kill happens - while (jt.getTaskStatus(taskid).getRunState() != - TaskStatus.State.KILLED) { - try { - Thread.sleep(10); - } catch (InterruptedException ie) {} - } - - } - // kill the task by losing the tracker - private void killTaskWithLostTracker(MiniMRCluster mr, - TaskAttemptID taskid) { - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - String trackerName = jt.getTaskStatus(taskid).getTaskTracker(); - int trackerID = mr.getTaskTrackerID(trackerName); - assertTrue(trackerID != -1); - mr.stopTaskTracker(trackerID); - } - - // Tests the failures in setup/cleanup job. Job should cleanly fail. - // Also Tests the command-line kill for setup/cleanup attempts. - // tests the setup/cleanup attempts getting killed if - // they were running on a lost tracker - public void testWithDFS() throws Exception { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - final int taskTrackers = 4; - Configuration conf = new Configuration(); - dfs = new MiniDFSCluster(conf, 4, true, null); - fileSys = dfs.getFileSystem(); - JobConf jtConf = new JobConf(); - jtConf.setInt(TTConfig.TT_MAP_SLOTS, 1); - jtConf.setInt(TTConfig.TT_REDUCE_SLOTS, 1); - jtConf.setLong(JTConfig.JT_TRACKER_EXPIRY_INTERVAL, 10 * 1000); - mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1, - null, null, jtConf); - // test setup/cleanup throwing exceptions - testFailCommitter(CommitterWithFailSetup.class, mr.createJobConf()); - testFailCommitter(CommitterWithFailCommit.class, mr.createJobConf()); - // test the command-line kill for setup/cleanup attempts. - testSetupAndCleanupKill(mr, dfs, true); - // remove setup/cleanup signal files. - fileSys.delete(setupSignalFile , true); - fileSys.delete(cleanupSignalFile , true); - // test the setup/cleanup attempts getting killed if - // they were running on a lost tracker - testSetupAndCleanupKill(mr, dfs, false); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); - } - } - } - - public static void main(String[] argv) throws Exception { - TestSetupAndCleanupFailure td = new TestSetupAndCleanupFailure(); - td.testWithDFS(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java deleted file mode 100644 index 059d692eb82..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupTaskScheduling.java +++ /dev/null @@ -1,334 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeTaskInProgress; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.split.JobSplit; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.TaskType; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import junit.framework.TestCase; - -public class TestSetupTaskScheduling extends TestCase { - - public static final Log LOG = - LogFactory.getLog(TestSetupTaskScheduling.class); - - static String[] trackers = new String[] { "tracker_tracker1:1000", - "tracker_tracker2:1000", "tracker_tracker3:1000" }; - private static FakeJobTracker jobTracker; - - /** - * Fake JobInProgress that can return a hardcoded setup or - * cleanup task depending on the slot type passed in. - */ - static class FakeJobWithSetupTask - extends FakeObjectUtilities.FakeJobInProgress { - - FakeJobWithSetupTask(JobConf jobConf, - JobTracker tracker) throws IOException { - super(jobConf, tracker); - } - - /** - * Initialize tasks, including setup. - */ - @Override - public synchronized void initTasks() throws IOException { - super.initTasks(); - JobSplit.TaskSplitMetaInfo emptySplit = new JobSplit.TaskSplitMetaInfo(); - setup = new TaskInProgress[2]; - setup[0] = new TaskInProgress(getJobID(), "test", emptySplit, - jobtracker, getJobConf(), this, numMapTasks + 1, 1); - setup[1] = new TaskInProgress(getJobID(), "test", numMapTasks, - numReduceTasks + 1, jobtracker, getJobConf(), this, 1); - } - - /** - * Obtain a setup task on a map slot or reduce slot - * depending on what is free. - * - * Every call to this will return either a map or reduce - * setup task. No check is done to see if the task is already - * returned - */ - @Override - public Task obtainJobSetupTask(TaskTrackerStatus tts, - int clusterSize, - int numUniqueHosts, - boolean isMapSlot) - throws IOException{ - TaskInProgress tip = null; - if (isMapSlot) { - tip = setup[0]; - } else { - tip = setup[1]; - } - Task t = tip.getTaskToRun(tts.getHost()); - t.setJobSetupTask(); - return t; - } - } - - static class FakeJobWithTaskCleanupTask - extends FakeObjectUtilities.FakeJobInProgress { - - FakeJobWithTaskCleanupTask(JobConf jobConf, - JobTracker tracker) throws IOException { - super(jobConf, tracker); - } - - /** - * Initialize tasks(1 map and 1 reduce task each needs 2 slots, similar to - * tasks of a high RAM job). - */ - @Override - public synchronized void initTasks() throws IOException { - super.initTasks(); - - final int numSlotsPerTask = 2; - maps = new TaskInProgress[1]; - reduces = new TaskInProgress[1]; - - maps[0] = new FakeTaskInProgress(getJobID(), "test", - JobSplit.EMPTY_TASK_SPLIT, - jobtracker, getJobConf(), this, 0, numSlotsPerTask); - TaskAttemptID attemptId = new TaskAttemptID(maps[0].getTIPId(), 0); - - // make this task a taskCleanup task of a map task - mapCleanupTasks.add(attemptId); - TaskStatus stat = new MapTaskStatus(attemptId, 0.01f, 2, - TaskStatus.State.FAILED_UNCLEAN, "", "", trackers[0], - TaskStatus.Phase.MAP, new Counters()); - maps[0].updateStatus(stat); - - //similarly for reduce task's taskCleanup task - reduces[0] = new FakeTaskInProgress(getJobID(), "test", 1, - 0, jobtracker, getJobConf(), this, numSlotsPerTask); - attemptId = new TaskAttemptID(reduces[0].getTIPId(), 0); - - // make this task a taskCleanup task of a reduce task - reduceCleanupTasks.add(attemptId); - stat = new ReduceTaskStatus(attemptId, 0.01f, 2, - TaskStatus.State.FAILED_UNCLEAN, "", "", trackers[0], - TaskStatus.Phase.REDUCE, new Counters()); - reduces[0].updateStatus(stat); - } - } - - public void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - jobTracker = new FakeJobTracker(conf, new Clock(), trackers); - for (String tracker : trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, tracker); - } - } - - // create a job for testing setup tasks and reservations - FakeJobInProgress createJob(TaskType taskType) throws IOException { - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(false); - conf.setNumMapTasks(2); - conf.setNumReduceTasks(2); - conf.set(JobContext.REDUCE_FAILURES_MAXPERCENT, ".70"); - conf.set(JobContext.MAP_FAILURES_MAX_PERCENT, ".70"); - FakeJobInProgress job = null; - if (taskType == null) { - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - job = new FakeJobInProgress(conf, jobTracker); - } else if (taskType == TaskType.JOB_SETUP) { - job = new FakeJobWithSetupTask(conf, jobTracker); - } else if (taskType == TaskType.TASK_CLEANUP) { - job = new FakeJobWithTaskCleanupTask(conf, jobTracker); - } - job.setClusterSize(trackers.length); - job.initTasks(); - return job; - } - - // create a new TaskStatus and add to a list of status objects. - // useMapSlot param is needed only when taskType is TASK_CLEANUP. - void addNewTaskStatus(FakeJobInProgress job, TaskType taskType, - boolean useMapSlot, String tracker, List reports) - throws IOException { - TaskAttemptID task = null; - TaskStatus status = null; - if (taskType == TaskType.MAP) { - task = job.findMapTask(tracker); - status = new MapTaskStatus(task, 0.01f, 2, - TaskStatus.State.RUNNING, "", "", tracker, - TaskStatus.Phase.MAP, new Counters()); - } else if (taskType == TaskType.TASK_CLEANUP) { - if (useMapSlot) { - status = job.maps[0].taskStatuses.get( - new TaskAttemptID(job.maps[0].getTIPId(), 0)); - } else { - status = job.reduces[0].taskStatuses.get( - new TaskAttemptID(job.reduces[0].getTIPId(), 0)); - } - } else { - task = job.findReduceTask(tracker); - status = new ReduceTaskStatus(task, 0.01f, 2, - TaskStatus.State.RUNNING, "", "", tracker, - TaskStatus.Phase.REDUCE, new Counters()); - } - reports.add(status); - } - - // create a TaskTrackerStatus - TaskTrackerStatus createTaskTrackerStatus(String tracker, - List reports) { - TaskTrackerStatus ttStatus = - new TaskTrackerStatus(tracker, - JobInProgress.convertTrackerNameToHostName(tracker), - 0, reports, 0, 2, 2); - return ttStatus; - } - - /** - * Test that a setup task can be run against a map slot - * if it is free. - * @throws IOException - */ - public void testSetupTaskReturnedForFreeMapSlots() throws IOException { - // create a job with a setup task. - FakeJobInProgress job = createJob(TaskType.JOB_SETUP); - jobTracker.jobs.put(job.getJobID(), job); - - // create a status simulating a free tasktracker - List reports = new ArrayList(); - TaskTrackerStatus ttStatus - = createTaskTrackerStatus(trackers[2], reports); - - // verify that a setup task can be assigned to a map slot. - List tasks = jobTracker.getSetupAndCleanupTasks(ttStatus); - assertEquals(1, tasks.size()); - assertTrue(tasks.get(0).isJobSetupTask()); - assertTrue(tasks.get(0).isMapTask()); - jobTracker.jobs.clear(); - } - - /** - * Test to check that map slots are counted when returning - * a setup task. - * @throws IOException - */ - public void testMapSlotsCountedForSetup() throws IOException { - // create a job with a setup task. - FakeJobInProgress job = createJob(TaskType.JOB_SETUP); - jobTracker.jobs.put(job.getJobID(), job); - - // create another job for reservation - FakeJobInProgress job1 = createJob(null); - jobTracker.jobs.put(job1.getJobID(), job1); - - // create TT status for testing getSetupAndCleanupTasks - List taskStatuses = new ArrayList(); - addNewTaskStatus(job, TaskType.MAP, true, trackers[0], taskStatuses); - TaskTrackerStatus ttStatus - = createTaskTrackerStatus(trackers[0], taskStatuses); - - // test that there should be no map setup task returned. - List tasks = jobTracker.getSetupAndCleanupTasks(ttStatus); - assertEquals(1, tasks.size()); - assertTrue(tasks.get(0).isJobSetupTask()); - assertFalse(tasks.get(0).isMapTask()); - jobTracker.jobs.clear(); - } - - /** - * Test to check that reduce slots are also counted when returning - * a setup task. - * @throws IOException - */ - public void testReduceSlotsCountedForSetup() throws IOException { - // create a job with a setup task. - FakeJobInProgress job = createJob(TaskType.JOB_SETUP); - jobTracker.jobs.put(job.getJobID(), job); - - // create another job for reservation - FakeJobInProgress job1 = createJob(null); - jobTracker.jobs.put(job1.getJobID(), job1); - - // create TT status for testing getSetupAndCleanupTasks - List reports = new ArrayList(); - // because free map slots are checked first in code, - // we fill up map slots also. - addNewTaskStatus(job1, TaskType.MAP, true, trackers[1], reports); - addNewTaskStatus(job1, TaskType.REDUCE, false,trackers[1], reports); - TaskTrackerStatus ttStatus - = createTaskTrackerStatus(trackers[1], reports); - - // test that there should be no setup task returned, - // as both map and reduce slots are occupied. - List tasks = jobTracker.getSetupAndCleanupTasks(ttStatus); - assertNull(tasks); - jobTracker.jobs.clear(); - } - - void validateNumSlotsUsedForTaskCleanup(TaskTrackerStatus ttStatus) - throws IOException { - List tasks = jobTracker.getSetupAndCleanupTasks(ttStatus); - - assertEquals("Actual number of taskCleanup tasks is not same as expected", 1, tasks.size()); - LOG.info("taskCleanup task is " + tasks.get(0)); - assertTrue(tasks.get(0).isTaskCleanupTask()); - - // slots needed for taskCleanup task should be 1(even for high RAM jobs) - assertEquals("TaskCleanup task should not need more than 1 slot.", - 1, tasks.get(0).getNumSlotsRequired()); - } - - /** - * Test to check that map slots are counted when returning - * a taskCleanup task. - * @throws IOException - */ - public void testNumSlotsUsedForTaskCleanup() throws IOException { - // Create a high RAM job with a map task's cleanup task and a reduce task's - // cleanup task. Make this Fake job a high RAM job by setting the slots - // required for map/reduce task to 2. - FakeJobInProgress job = createJob(TaskType.TASK_CLEANUP); - jobTracker.jobs.put(job.getJobID(), job); - - // create TT status for testing getSetupAndCleanupTasks - List taskStatuses = new ArrayList(); - TaskTrackerStatus ttStatus = - createTaskTrackerStatus(trackers[0], taskStatuses);//create dummy status - - // validate mapTaskCleanup task - validateNumSlotsUsedForTaskCleanup(ttStatus); - - // validate reduceTaskCleanup task - validateNumSlotsUsedForTaskCleanup(ttStatus); - - jobTracker.jobs.clear(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java deleted file mode 100644 index c861d4d27e4..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupWorkDir.java +++ /dev/null @@ -1,235 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.DataOutputStream; -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; - -import junit.framework.TestCase; - -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.filecache.DistributedCache; -import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager; - -/** - * Verifies if TaskRunner.SetupWorkDir() is cleaning up files/dirs pointed - * to by symlinks under work dir. - */ -public class TestSetupWorkDir extends TestCase { - - /** - * Creates 1 subdirectory and 1 file under dir2. Creates 1 subdir, 1 file, - * 1 symlink to a dir and a symlink to a file under dir1. - * Creates dir1/subDir, dir1/file, dir2/subDir, dir2/file, - * dir1/symlinkSubDir->dir2/subDir, dir1/symlinkFile->dir2/file. - */ - static void createSubDirsAndSymLinks(JobConf jobConf, Path dir1, Path dir2) - throws IOException { - FileSystem fs = FileSystem.getLocal(jobConf); - createSubDirAndFile(fs, dir1); - createSubDirAndFile(fs, dir2); - // now create symlinks under dir1 that point to file/dir under dir2 - FileUtil.symLink(dir2+"/subDir", dir1+"/symlinkSubDir"); - FileUtil.symLink(dir2+"/file", dir1+"/symlinkFile"); - } - - static void createSubDirAndFile(FileSystem fs, Path dir) throws IOException { - Path subDir = new Path(dir, "subDir"); - fs.mkdirs(subDir); - createFile(fs, dir, "file"); - } - - /** - * Create a file - * - * @param fs filesystem - * @param dir directory location of the file - * @param fileName filename - * @throws IOException - */ - static void createFile(FileSystem fs, Path dir, String fileName) - throws IOException { - Path p = new Path(dir, fileName); - DataOutputStream out = fs.create(p); - out.writeBytes("dummy input"); - out.close(); - } - - void createEmptyDir(FileSystem fs, Path dir) throws IOException { - if (fs.exists(dir)) { - fs.delete(dir, true); - } - if (!fs.mkdirs(dir)) { - throw new IOException("Unable to create directory " + dir); - } - } - - /** - * Validates if TaskRunner.setupWorkDir() is properly cleaning up the - * contents of workDir and creating tmp dir under it (even though workDir - * contains symlinks to files/directories). - */ - public void testSetupWorkDir() throws IOException { - Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), - "testSetupWorkDir"); - Path myWorkDir = new Path(rootDir, "./work"); - Path myTargetDir = new Path(rootDir, "./tmp"); - JobConf jConf = new JobConf(); - FileSystem fs = FileSystem.getLocal(jConf); - createEmptyDir(fs, myWorkDir); - createEmptyDir(fs, myTargetDir); - - // create subDirs and symlinks under work dir - createSubDirsAndSymLinks(jConf, myWorkDir, myTargetDir); - - assertTrue("Did not create symlinks/files/dirs properly. Check " - + myWorkDir + " and " + myTargetDir, - (fs.listStatus(myWorkDir).length == 4) && - (fs.listStatus(myTargetDir).length == 2)); - - // let us disable creation of symlinks in setupWorkDir() - jConf.set(MRJobConfig.CACHE_SYMLINK, "no"); - - // Deletion of myWorkDir should not affect contents of myTargetDir. - // myTargetDir is like $user/jobcache/distcache - TaskRunner.setupWorkDir(jConf, new File(myWorkDir.toUri().getPath())); - - // Contents of myWorkDir should be cleaned up and a tmp dir should be - // created under myWorkDir - assertTrue(myWorkDir + " is not cleaned up properly.", - fs.exists(myWorkDir) && (fs.listStatus(myWorkDir).length == 1)); - - // Make sure that the dir under myWorkDir is tmp - assertTrue(fs.listStatus(myWorkDir)[0].getPath().toUri().getPath() - .toString().equals(myWorkDir.toString() + "/tmp")); - - // Make sure that myTargetDir is not changed/deleted - assertTrue("Dir " + myTargetDir + " seem to be modified.", - fs.exists(myTargetDir) && (fs.listStatus(myTargetDir).length == 2)); - - // cleanup - fs.delete(rootDir, true); - } - - /** - * Validates distributed cache symlink getting created fine - * - * @throws IOException, URISyntaxException - */ - public void testSetupWorkDirDistCacheSymlinkValid() - throws IOException, URISyntaxException { - JobConf jConf = new JobConf(); - FileSystem fs = FileSystem.getLocal(jConf); - - Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), - "testSetupWorkDirSymlinkFailure"); - - // create file for DistributedCache and set it - Path myTargetDir = new Path(rootDir, "./tmp"); - createEmptyDir(fs, myTargetDir); - createFile(fs, myTargetDir, "cacheFile.txt"); - TrackerDistributedCacheManager.setLocalFiles(jConf, - (myTargetDir.toString()+Path.SEPARATOR+"cacheFile.txt")); - assertTrue("Did not create cache file in " + myTargetDir, - (fs.listStatus(myTargetDir).length == 1)); - - // let us enable creation of symlinks in setupWorkDir() - jConf.set(MRJobConfig.CACHE_SYMLINK, "yes"); - - // add a valid symlink - Path myWorkDir = new Path(rootDir, "./work"); - createEmptyDir(fs, myWorkDir); - DistributedCache.addCacheFile(new URI(myWorkDir.toString() + - Path.SEPARATOR + "file.txt#valid"), jConf); - - // setupWorkDir should create symlinks - TaskRunner.setupWorkDir(jConf, new File(myWorkDir.toUri().getPath())); - - // myWorkDir should have 2 entries, a tmp dir and the symlink valid - assertTrue(myWorkDir + " does not have cache symlink.", - fs.exists(myWorkDir) && (fs.listStatus(myWorkDir).length == 2)); - - // make sure work dir has symlink valid - boolean foundValid = false; - for (FileStatus fstat : fs.listStatus(myWorkDir)) { - if (fstat.getPath().toUri() != null && - fstat.getPath().toUri().getPath().toString() - .equals(myWorkDir.toString() + Path.SEPARATOR+ "valid")) { - foundValid = true; - } - } - - assertTrue("Valid symlink not created", foundValid); - - // cleanup - fs.delete(rootDir, true); - } - - /** - * Invalid distributed cache files errors out with IOException - * - * @throws IOException, URISyntaxException - */ - public void testSetupWorkDirDistCacheSymlinkInvalid() - throws IOException, URISyntaxException { - JobConf jConf = new JobConf(); - FileSystem fs = FileSystem.getLocal(jConf); - - Path rootDir = new Path(System.getProperty("test.build.data", "/tmp"), - "testSetupWorkDirSymlinkFailure"); - - // create file for DistributedCache and set it - Path myTargetDir = new Path(rootDir, "./tmp"); - createEmptyDir(fs, myTargetDir); - createFile(fs, myTargetDir, "cacheFile.txt"); - TrackerDistributedCacheManager.setLocalFiles(jConf, (myTargetDir.toString() + - Path.SEPARATOR+"cacheFile.txt")); - assertTrue("Did not create cache file in " + myTargetDir, - (fs.listStatus(myTargetDir).length == 1)); - - // let us enable creation of symlinks in setupWorkDir() - jConf.set(MRJobConfig.CACHE_SYMLINK, "yes"); - - // add an invalid symlink - Path myWorkDir = new Path(rootDir, "./work"); - createEmptyDir(fs, myWorkDir); - DistributedCache.addCacheFile(new URI(myWorkDir.toString() + - Path.SEPARATOR+"file.txt#invalid/abc"), jConf); - - // setupWorkDir should throw exception - try { - TaskRunner.setupWorkDir(jConf, new File(myWorkDir.toUri().getPath())); - assertFalse("TaskRunner.setupWorkDir() did not throw exception when" + - " given invalid cache file", true); - } catch(IOException e) { - // this is correct behavior - assertTrue(myWorkDir + " does not have cache symlink.", - fs.exists(myWorkDir) && (fs.listStatus(myWorkDir).length == 0)); - } - - // cleanup - fs.delete(rootDir, true); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java deleted file mode 100644 index fbb2867c63e..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSeveral.java +++ /dev/null @@ -1,443 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.*; - -import java.io.BufferedReader; -import java.io.ByteArrayOutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.security.PrivilegedExceptionAction; -import java.util.Iterator; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.mapred.TestJobInProgressListener.MyListener; -import org.apache.hadoop.mapred.UtilsForTests.FailMapper; -import org.apache.hadoop.mapred.UtilsForTests.KillMapper; -import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.After; -import org.junit.Before; -import org.junit.Ignore; -import org.junit.Test; - -/** - * This is a test case that tests several miscellaneous functionality. - * This is intended for a fast test and encompasses the following: - * TestJobName - * TestJobClient - * TestJobDirCleanup - * TestJobKillAndFail - * TestUserDefinedCounters - * TestJobInProgressListener - * TestJobHistory - * TestMiniMRClassPath - * TestMiniMRWithDFSWithDistinctUsers - */ - -@SuppressWarnings("deprecation") -public class TestSeveral { - - static final UserGroupInformation DFS_UGI = - TestMiniMRWithDFSWithDistinctUsers.createUGI("dfs", true); - static final UserGroupInformation TEST1_UGI = - TestMiniMRWithDFSWithDistinctUsers.createUGI("pi", false); - static final UserGroupInformation TEST2_UGI = - TestMiniMRWithDFSWithDistinctUsers.createUGI("wc", false); - - private static MiniMRCluster mrCluster = null; - private static MiniDFSCluster dfs = null; - private static FileSystem fs = null; - private static MyListener myListener = null; - - private int numReduces = 5; - private static final int numTT = 5; - - @Before - public void setUp() throws Exception { - - Configuration conf = new Configuration(); - conf.setInt("dfs.replication", 1); - dfs = new MiniDFSCluster(conf, numTT, true, null); - fs = DFS_UGI.doAs(new PrivilegedExceptionAction() { - public FileSystem run() throws IOException { - return dfs.getFileSystem(); - } - }); - - TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/user", "mapred", - "mapred", (short)01777); - TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/mapred", "mapred", - "mapred", (short)01777); - TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, - conf.get(JTConfig.JT_STAGING_AREA_ROOT), - "mapred", "mapred", (short)01777); - - UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); - - // Create a TestJobInProgressListener.MyListener and associate - // it with the MiniMRCluster - - myListener = new MyListener(); - conf.set(JTConfig.JT_IPC_HANDLER_COUNT, "1"); - mrCluster = new MiniMRCluster(0, 0, - numTT, fs.getUri().toString(), - 1, null, null, MR_UGI, new JobConf()); - // make cleanup inline sothat validation of existence of these directories - // can be done - mrCluster.setInlineCleanupThreads(); - - mrCluster.getJobTrackerRunner().getJobTracker() - .addJobInProgressListener(myListener); - } - - @After - public void tearDown() throws Exception { - if (fs != null) { fs.close(); } - if (dfs != null) { dfs.shutdown(); } - if (mrCluster != null) { mrCluster.shutdown(); } - } - - /** - * Utility class to create input for the jobs - * @param inDir - * @param conf - * @throws IOException - */ - private void makeInput(Path inDir, JobConf conf) throws IOException { - FileSystem inFs = inDir.getFileSystem(conf); - - if (inFs.exists(inDir)) { - inFs.delete(inDir, true); - } - inFs.mkdirs(inDir); - Path inFile = new Path(inDir, "part-0"); - DataOutputStream file = inFs.create(inFile); - for (int i = 0; i < numReduces; i++) { - file.writeBytes("b a\n"); - } - file.close(); - } - - /** - * Clean the Output directories before running a Job - * @param fs - * @param outDir - */ - private void clean(FileSystem fs, Path outDir) { - try { - fs.delete(outDir, true); - } catch (Exception e) {} - } - - private void verifyOutput(FileSystem fs, Path outDir) throws IOException { - Path[] outputFiles = FileUtil.stat2Paths( - fs.listStatus(outDir, new Utils.OutputFileUtils.OutputFilesFilter())); - assertEquals(numReduces, outputFiles.length); - InputStream is = fs.open(outputFiles[0]); - BufferedReader reader = new BufferedReader(new InputStreamReader(is)); - String s = reader.readLine().split("\t")[1]; - assertEquals("b a",s); - assertNull(reader.readLine()); - reader.close(); - } - - - @SuppressWarnings("unchecked") - static class DoNothingReducer extends MapReduceBase implements - Reducer { - public void reduce(WritableComparable key, Iterator val, - OutputCollector output, - Reporter reporter) - throws IOException { // Do nothing - } - } - - /** - * Submit a job with a complex name (TestJobName.testComplexName) - * Check the status of the job as successful (TestJobKillAndFail) - * Check that the task tracker directory is cleaned up (TestJobDirCleanup) - * Create some user defined counters and check them (TestUserDefinedCounters) - * Job uses a reducer from an External Jar (TestMiniMRClassPath) - * Check task directories (TestMiniMRWithDFS) - * Check if the listener notifications are received(TestJobInProgressListener) - * Verify if priority changes to the job are reflected (TestJobClient) - * Validate JobHistory file format, content, userlog location (TestJobHistory) - * - * @throws Exception - * - * TODO fix testcase - */ - @Test - @Ignore - public void testSuccessfulJob() throws Exception { - final JobConf conf = mrCluster.createJobConf(); - - // Set a complex Job name (TestJobName) - conf.setJobName("[name][some other value that gets" + - " truncated internally that this test attempts to aggravate]"); - conf.setInputFormat(TextInputFormat.class); - conf.setOutputFormat(TextOutputFormat.class); - - conf.setMapOutputKeyClass(LongWritable.class); - conf.setMapOutputValueClass(Text.class); - - conf.setOutputKeyClass(LongWritable.class); - conf.setOutputValueClass(Text.class); - - conf.setCompressMapOutput(true); - - // Set the Mapper class to a Counting Mapper that defines user - // defined counters - conf.setMapperClass(TestUserDefinedCounters.CountingMapper.class); - - conf.set("mapred.reducer.class", "testjar.ExternalIdentityReducer"); - - conf.setLong(org.apache.hadoop.mapreduce.lib.input. - FileInputFormat.SPLIT_MINSIZE, 1024*1024); - - conf.setNumReduceTasks(numReduces); - conf.setJobPriority(JobPriority.HIGH); - conf.setJar("build/test/mapred/testjar/testjob.jar"); - - String pattern = - TaskAttemptID.getTaskAttemptIDsPattern(null, null, TaskType.MAP, 1, null); - conf.setKeepTaskFilesPattern(pattern); - - final Path inDir = new Path("./test/input"); - final Path outDir = new Path("./test/output"); - - TEST1_UGI.doAs(new PrivilegedExceptionAction() { - public Void run() { - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - return null; - } - }); - - clean(fs, outDir); - final RunningJob job = TEST1_UGI.doAs(new PrivilegedExceptionAction() { - public RunningJob run() throws IOException { - makeInput(inDir, conf); - JobClient jobClient = new JobClient(conf); - return jobClient.submitJob(conf); - } - }); - - final JobID jobId = job.getID(); - - while (job.getJobState() != JobStatus.RUNNING) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - - // Check for JobInProgress Listener notification - assertFalse("Missing event notification for a running job", - myListener.contains(jobId, true)); - - job.waitForCompletion(); - - assertTrue(job.isComplete()); - assertEquals(JobStatus.SUCCEEDED,job.getJobState()); - - // check if the job success was notified - assertFalse("Missing event notification for a successful job", - myListener.contains(jobId, false)); - - // Check Task directories - TaskAttemptID taskid = new TaskAttemptID( - new TaskID(jobId, TaskType.MAP, 1),0); - TestMiniMRWithDFS.checkTaskDirectories(mrCluster, TEST1_UGI.getUserName(), - new String[] { jobId.toString() }, new String[] { taskid.toString() }); - - ByteArrayOutputStream out = new ByteArrayOutputStream(); - int exitCode = TestJobClient.runTool(conf, new JobClient(), - new String[] { "-counter", jobId.toString(), - "org.apache.hadoop.mapreduce.TaskCounter", "MAP_INPUT_RECORDS" }, - out); - assertEquals(0, exitCode); - assertEquals(numReduces, Integer.parseInt(out.toString().trim())); - - // Verify if user defined counters have been updated properly - TestUserDefinedCounters.verifyCounters(job, numTT); - - // Verify job priority change (TestJobClient) - TestJobClient.verifyJobPriority(jobId.toString(), "HIGH", conf); - - // Basic check if the job did run fine - TEST1_UGI.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException { - verifyOutput(outDir.getFileSystem(conf), outDir); - - - //TestJobHistory - TestJobHistory.validateJobHistoryFileFormat( - mrCluster.getJobTrackerRunner().getJobTracker().getJobHistory(), - jobId, conf, "SUCCEEDED", false); - - TestJobHistory.validateJobHistoryFileContent(mrCluster, job, conf); - - // Since we keep setKeepTaskFilesPattern, these files should still be - // present and will not be cleaned up. - for(int i=0; i < numTT; ++i) { - Path jobDirPath = - new Path(mrCluster.getTaskTrackerLocalDir(i), TaskTracker - .getJobCacheSubdir(TEST1_UGI.getUserName())); - boolean b = FileSystem.getLocal(conf).delete(jobDirPath, true); - assertTrue(b); - } - return null; - } - }); - - } - - /** - * Submit a job with BackSlashed name (TestJobName) that will fail - * Test JobHistory User Location to none (TetsJobHistory) - * Verify directory up for the Failed Job (TestJobDirCleanup) - * Verify Event is generated for the failed job (TestJobInProgressListener) - * - * @throws Exception - * - * TODO fix testcase - */ - @Test - @Ignore - public void testFailedJob() throws Exception { - JobConf conf = mrCluster.createJobConf(); - - // Name with regex - conf.setJobName("name \\Evalue]"); - - conf.setInputFormat(TextInputFormat.class); - - conf.setOutputKeyClass(LongWritable.class); - conf.setOutputValueClass(Text.class); - conf.setMapperClass(FailMapper.class); - conf.setOutputFormat(NullOutputFormat.class); - conf.setJobPriority(JobPriority.HIGH); - - conf.setLong(JobContext.MAP_MAX_ATTEMPTS, 1); - - conf.setNumReduceTasks(0); - - final Path inDir = new Path("./wc/input"); - final Path outDir = new Path("./wc/output"); - - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - - clean(fs, outDir); - makeInput(inDir, conf); - - JobClient jobClient = new JobClient(conf); - RunningJob job = jobClient.submitJob(conf); - JobID jobId = job.getID(); - job.waitForCompletion(); - - assertTrue(job.isComplete()); - assertEquals(JobStatus.FAILED, job.getJobState()); - - // check if the job failure was notified - assertFalse("Missing event notification on failing a running job", - myListener.contains(jobId)); - - TestJobDirCleanup.verifyJobDirCleanup(mrCluster, numTT, job.getID()); - } - - /** - * Submit a job that will get Killed with a Regex Name (TestJobName) - * Verify Job Directory Cleanup (TestJobDirCleanup) - * Verify Even is generated for Killed Job (TestJobInProgressListener) - * - * @throws Exception - * - * TODO fix testcase - */ - @Test - @Ignore - public void testKilledJob() throws Exception { - JobConf conf = mrCluster.createJobConf(); - - // Name with regex - conf.setJobName("name * abc + Evalue]"); - - conf.setInputFormat(TextInputFormat.class); - - conf.setOutputKeyClass(LongWritable.class); - conf.setOutputValueClass(Text.class); - conf.setMapperClass(KillMapper.class); - conf.setOutputFormat(NullOutputFormat.class); - conf.setNumReduceTasks(0); - - conf.setLong(JobContext.MAP_MAX_ATTEMPTS, 2); - - final Path inDir = new Path("./wc/input"); - final Path outDir = new Path("./wc/output"); - final Path histDir = new Path("./wc/history"); - - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - - clean(fs, outDir); - makeInput(inDir, conf); - - JobClient jobClient = new JobClient(conf); - RunningJob job = jobClient.submitJob(conf); - - while (job.getJobState() != JobStatus.RUNNING) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - job.killJob(); - - job.waitForCompletion(); - - assertTrue(job.isComplete()); - assertEquals(JobStatus.KILLED, job.getJobState()); - - // check if the job failure was notified - assertFalse("Missing event notification on killing a running job", - myListener.contains(job.getID())); - - TestJobDirCleanup.verifyJobDirCleanup(mrCluster, numTT, job.getID()); - } - -} - - diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java deleted file mode 100644 index 9de261b3136..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestShuffleExceptionCount.java +++ /dev/null @@ -1,245 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.assertEquals; - -import java.io.IOException; -import java.util.Collection; -import java.util.Map; - -import org.apache.hadoop.mapred.TaskTracker.ShuffleServerMetrics; -import org.apache.hadoop.metrics.ContextFactory; -import org.apache.hadoop.metrics.MetricsContext; -import org.apache.hadoop.metrics.spi.OutputRecord; -import org.junit.Test; - -public class TestShuffleExceptionCount { - - public static class TestMapOutputServlet extends TaskTracker.MapOutputServlet { - - public void checkException(IOException ie, String exceptionMsgRegex, - String exceptionStackRegex, ShuffleServerMetrics shuffleMetrics) { - super.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - } - - } - - @Test - public void testCheckException() throws IOException, InterruptedException, - ClassNotFoundException, InstantiationException, IllegalAccessException { - TestMapOutputServlet testServlet = new TestMapOutputServlet(); - JobConf conf = new JobConf(); - conf.setUser("testuser"); - conf.setJobName("testJob"); - conf.setSessionId("testSession"); - - // setup metrics context factory - ContextFactory factory = ContextFactory.getFactory(); - factory.setAttribute("mapred.class", - "org.apache.hadoop.metrics.spi.NoEmitMetricsContext"); - - TaskTracker tt = new TaskTracker(); - tt.setConf(conf); - ShuffleServerMetrics shuffleMetrics = tt.new ShuffleServerMetrics(conf); - - // first test with only MsgRegex set but doesn't match - String exceptionMsgRegex = "Broken pipe"; - String exceptionStackRegex = null; - IOException ie = new IOException("EOFException"); - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - - MetricsContext context = factory.getContext("mapred"); - shuffleMetrics.doUpdates(context); - Map> records = context.getAllRecords(); - Collection col = records.get("shuffleOutput"); - OutputRecord outputRecord = col.iterator().next(); - assertEquals(0, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with only MsgRegex set that does match - ie = new IOException("Broken pipe"); - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - - shuffleMetrics.doUpdates(context); - assertEquals(1, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with neither set, make sure incremented - exceptionStackRegex = null; - exceptionMsgRegex = null; - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - shuffleMetrics.doUpdates(context); - assertEquals(2, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with only StackRegex set doesn't match - exceptionStackRegex = ".*\\.doesnt\\$SelectSet\\.wakeup.*"; - exceptionMsgRegex = null; - ie.setStackTrace(constructStackTrace()); - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - shuffleMetrics.doUpdates(context); - assertEquals(2, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with only StackRegex set does match - exceptionStackRegex = ".*\\.SelectorManager\\$SelectSet\\.wakeup.*"; - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - shuffleMetrics.doUpdates(context); - assertEquals(3, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with both regex set and matches - exceptionMsgRegex = "Broken pipe"; - ie.setStackTrace(constructStackTraceTwo()); - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - shuffleMetrics.doUpdates(context); - assertEquals(4, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with both regex set and only msg matches - exceptionStackRegex = ".*[1-9]+BOGUSREGEX"; - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - shuffleMetrics.doUpdates(context); - assertEquals(4, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - - // test with both regex set and only stack matches - exceptionStackRegex = ".*\\.SelectorManager\\$SelectSet\\.wakeup.*"; - exceptionMsgRegex = "EOFException"; - testServlet.checkException(ie, exceptionMsgRegex, exceptionStackRegex, - shuffleMetrics); - shuffleMetrics.doUpdates(context); - assertEquals(4, outputRecord.getMetric("shuffle_exceptions_caught") - .intValue()); - } - - /* - * Construction exception like: java.io.IOException: Broken pipe at - * sun.nio.ch.EPollArrayWrapper.interrupt(Native Method) at - * sun.nio.ch.EPollArrayWrapper.interrupt(EPollArrayWrapper.java:256) at - * sun.nio.ch.EPollSelectorImpl.wakeup(EPollSelectorImpl.java:175) at - * org.mortbay - * .io.nio.SelectorManager$SelectSet.wakeup(SelectorManager.java:831) at - * org.mortbay - * .io.nio.SelectorManager$SelectSet.doSelect(SelectorManager.java:709) at - * org.mortbay.io.nio.SelectorManager.doSelect(SelectorManager.java:192) at - * org - * .mortbay.jetty.nio.SelectChannelConnector.accept(SelectChannelConnector.java - * :124) at - * org.mortbay.jetty.AbstractConnector$Acceptor.run(AbstractConnector. - * java:708) at - * org.mortbay.thread.QueuedThreadPool$PoolThread.run(QueuedThreadPool - * .java:582) - */ - private StackTraceElement[] constructStackTrace() { - StackTraceElement[] stack = new StackTraceElement[9]; - stack[0] = new StackTraceElement("sun.nio.ch.EPollArrayWrapper", - "interrupt", "", -2); - stack[1] = new StackTraceElement("sun.nio.ch.EPollArrayWrapper", - "interrupt", "EPollArrayWrapper.java", 256); - stack[2] = new StackTraceElement("sun.nio.ch.EPollSelectorImpl", "wakeup", - "EPollSelectorImpl.java", 175); - stack[3] = new StackTraceElement( - "org.mortbay.io.nio.SelectorManager$SelectSet", "wakeup", - "SelectorManager.java", 831); - stack[4] = new StackTraceElement( - "org.mortbay.io.nio.SelectorManager$SelectSet", "doSelect", - "SelectorManager.java", 709); - stack[5] = new StackTraceElement("org.mortbay.io.nio.SelectorManager", - "doSelect", "SelectorManager.java", 192); - stack[6] = new StackTraceElement( - "org.mortbay.jetty.nio.SelectChannelConnector", "accept", - "SelectChannelConnector.java", 124); - stack[7] = new StackTraceElement( - "org.mortbay.jetty.AbstractConnector$Acceptor", "run", - "AbstractConnector.java", 708); - stack[8] = new StackTraceElement( - "org.mortbay.thread.QueuedThreadPool$PoolThread", "run", - "QueuedThreadPool.java", 582); - - return stack; - } - - /* - * java.io.IOException: Broken pipe at - * sun.nio.ch.EPollArrayWrapper.interrupt(Native Method) at - * sun.nio.ch.EPollArrayWrapper.interrupt(EPollArrayWrapper.java:256) at - * sun.nio.ch.EPollSelectorImpl.wakeup(EPollSelectorImpl.java:175) at - * org.mortbay - * .io.nio.SelectorManager$SelectSet.wakeup(SelectorManager.java:831) at - * org.mortbay - * .io.nio.SelectChannelEndPoint.updateKey(SelectChannelEndPoint.java:335) at - * org - * .mortbay.io.nio.SelectChannelEndPoint.blockWritable(SelectChannelEndPoint - * .java:278) at - * org.mortbay.jetty.AbstractGenerator$Output.blockForOutput(AbstractGenerator - * .java:545) at - * org.mortbay.jetty.AbstractGenerator$Output.flush(AbstractGenerator - * .java:572) at - * org.mortbay.jetty.HttpConnection$Output.flush(HttpConnection.java:1012) at - * org - * .mortbay.jetty.AbstractGenerator$Output.write(AbstractGenerator.java:651)at - * org - * .mortbay.jetty.AbstractGenerator$Output.write(AbstractGenerator.java:580) - * at - */ - private StackTraceElement[] constructStackTraceTwo() { - StackTraceElement[] stack = new StackTraceElement[11]; - stack[0] = new StackTraceElement("sun.nio.ch.EPollArrayWrapper", - "interrupt", "", -2); - stack[1] = new StackTraceElement("sun.nio.ch.EPollArrayWrapper", - "interrupt", "EPollArrayWrapper.java", 256); - stack[2] = new StackTraceElement("sun.nio.ch.EPollSelectorImpl", "wakeup", - "EPollSelectorImpl.java", 175); - stack[3] = new StackTraceElement( - "org.mortbay.io.nio.SelectorManager$SelectSet", "wakeup", - "SelectorManager.java", 831); - stack[4] = new StackTraceElement( - "org.mortbay.io.nio.SelectChannelEndPoint", "updateKey", - "SelectChannelEndPoint.java", 335); - stack[5] = new StackTraceElement( - "org.mortbay.io.nio.SelectChannelEndPoint", "blockWritable", - "SelectChannelEndPoint.java", 278); - stack[6] = new StackTraceElement( - "org.mortbay.jetty.AbstractGenerator$Output", "blockForOutput", - "AbstractGenerator.java", 545); - stack[7] = new StackTraceElement( - "org.mortbay.jetty.AbstractGenerator$Output", "flush", - "AbstractGenerator.java", 572); - stack[8] = new StackTraceElement("org.mortbay.jetty.HttpConnection$Output", - "flush", "HttpConnection.java", 1012); - stack[9] = new StackTraceElement( - "org.mortbay.jetty.AbstractGenerator$Output", "write", - "AbstractGenerator.java", 651); - stack[10] = new StackTraceElement( - "org.mortbay.jetty.AbstractGenerator$Output", "write", - "AbstractGenerator.java", 580); - - return stack; - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java deleted file mode 100644 index 70722dced24..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestShuffleJobToken.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - - -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.net.URLConnection; -import java.security.GeneralSecurityException; - -import javax.crypto.SecretKey; - -import org.apache.hadoop.http.HttpServer; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapreduce.security.SecureShuffleUtils; -import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier; -import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; -import org.apache.hadoop.security.token.Token; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import static org.junit.Assert.assertTrue; - -public class TestShuffleJobToken { - private static HttpServer server; - private static URL baseUrl; - private static File dir; - private static final String JOB_ID = "job_20091117075357176_0001"; - private static final String BAD_JOB_ID = "job_20091117075357176_0002"; - - // create fake url - private URL getMapOutputURL(String host) throws MalformedURLException { - // Get the base url - StringBuffer url = new StringBuffer(host); - url.append("mapOutput?"); - url.append("job=" + JOB_ID + "&"); - url.append("reduce=0&"); - url.append("map=attempt"); - - return new URL(url.toString()); - } - - @Before - public void setUp() throws Exception { - dir = new File(System.getProperty("build.webapps", "build/webapps") + "/test"); - System.out.println("dir="+dir.getAbsolutePath()); - if(!dir.exists()) { - assertTrue(dir.mkdirs()); - } - server = new HttpServer("test", "0.0.0.0", 0, true); - server.addServlet("shuffle", "/mapOutput", TaskTracker.MapOutputServlet.class); - server.start(); - int port = server.getPort(); - baseUrl = new URL("http://localhost:" + port + "/"); - } - - @After - public void tearDown() throws Exception { - if(dir.exists()) - dir.delete(); - if(server!=null) - server.stop(); - } - - - /** - * try positive and negative case with invalid urlHash - */ - @Test - public void testInvalidJobToken() - throws IOException, GeneralSecurityException { - - URL url = getMapOutputURL(baseUrl.toString()); - String enc_str = SecureShuffleUtils.buildMsgFrom(url); - URLConnection connectionGood = url.openConnection(); - - TaskTracker tt = new TaskTracker(); - JobTokenSecretManager jtSecretManager = new JobTokenSecretManager(); - // create fake TaskTracker - needed for keys storage - JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(JOB_ID)); - Token jt = new Token(identifier, - jtSecretManager); - SecretKey tokenSecret = JobTokenSecretManager.createSecretKey(jt.getPassword()); - addJobToken(tt, JOB_ID, jt); // fake id - server.setAttribute("task.tracker", tt); - - // encode the url - String urlHashGood = SecureShuffleUtils.generateHash(enc_str.getBytes(), tokenSecret); // valid hash - - // another the key - JobTokenIdentifier badIdentifier = new JobTokenIdentifier(new Text(BAD_JOB_ID)); - Token badToken = new Token(badIdentifier, - jtSecretManager); - SecretKey badSecret = JobTokenSecretManager.createSecretKey(badToken.getPassword()); - String urlHashBad = SecureShuffleUtils.generateHash(enc_str.getBytes(), badSecret); // invalid hash - - // put url hash into http header - connectionGood.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, urlHashGood); - - // valid url hash should not fail with security error - try { - connectionGood.getInputStream(); - } catch (IOException ie) { - String msg = ie.getLocalizedMessage(); - if(msg.contains("Server returned HTTP response code: 401 for URL:")) { - fail("securtity failure with valid urlHash:"+ie); - } - System.out.println("valid urlhash passed validation"); - } - // invalid url hash - URLConnection connectionBad = url.openConnection(); - connectionBad.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH, urlHashBad); - - try { - connectionBad.getInputStream(); - fail("Connection should've failed because of invalid urlHash"); - } catch (IOException ie) { - String msg = ie.getLocalizedMessage(); - if(!msg.contains("Server returned HTTP response code: 401 for URL:")) { - fail("connection failed with other then validation error:"+ie); - } - System.out.println("validation worked, failed with:"+ie); - } - } - /*Note that this method is there for a unit testcase (TestShuffleJobToken)*/ - void addJobToken(TaskTracker tt, String jobIdStr, Token token) { - JobID jobId = JobID.forName(jobIdStr); - TaskTracker.RunningJob rJob = new TaskTracker.RunningJob(jobId); - synchronized (tt.runningJobs) { - tt.runningJobs.put(jobId, rJob); - } - tt.getJobTokenSecretManager().addTokenForJob(jobIdStr, token); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java deleted file mode 100644 index f7eefe9649b..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSpeculativeExecution.java +++ /dev/null @@ -1,333 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker; -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTrackerMetricsInst; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -public class TestSpeculativeExecution extends TestCase { - - FakeJobInProgress job; - static FakeJobTracker jobTracker; - static class SpecFakeClock extends FakeClock { - long SPECULATIVE_LAG = TaskInProgress.SPECULATIVE_LAG; - public void advanceBySpeculativeLag() { - time += SPECULATIVE_LAG; - } - }; - static SpecFakeClock clock; - static final Log LOG = LogFactory.getLog(TestSpeculativeExecution.class); - private static FakeJobTrackerMetricsInst fakeInst; - - - static String trackers[] = new String[] {"tracker_tracker1:1000", - "tracker_tracker2:1000", "tracker_tracker3:1000", - "tracker_tracker4:1000", "tracker_tracker5:1000"}; - - public static Test suite() { - TestSetup setup = - new TestSetup(new TestSuite(TestSpeculativeExecution.class)) { - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.set(JTConfig.JT_INSTRUMENTATION, - FakeJobTrackerMetricsInst.class.getName()); - jobTracker = new FakeJobTracker(conf, (clock = new SpecFakeClock()), - trackers); - fakeInst = (FakeJobTrackerMetricsInst) jobTracker.getInstrumentation(); - for (String tracker : trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, tracker); - } - } - protected void tearDown() throws Exception { - //delete the build/test/logs/ dir - } - }; - return setup; - } - - public void testRunningTaskCountWithSpeculation() throws IOException { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[8]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(true); - conf.setNumMapTasks(3); - conf.setNumReduceTasks(3); - conf.setFloat(JobContext.SPECULATIVE_SLOWTASK_THRESHOLD, 0.5f); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - - //Check for runningMap counts first - //schedule maps - taskAttemptID[0] = job.findMapTask(trackers[0]); - taskAttemptID[1] = job.findMapTask(trackers[1]); - taskAttemptID[2] = job.findMapTask(trackers[2]); - - clock.advance(5000); - job.finishTask(taskAttemptID[0]); - clock.advance(1000); - job.finishTask(taskAttemptID[1]); - clock.advanceBySpeculativeLag(); - - //we should get a speculative task now - taskAttemptID[3] = job.findMapTask(trackers[3]); - int oldRunningMap = job.runningMaps(); - LOG.info("No of running maps before fail was " + oldRunningMap); - job.failTask(taskAttemptID[2]); - assertEquals( - "Running maps count should be updated from " + oldRunningMap + " to " + - (oldRunningMap - 1), job.runningMaps(), oldRunningMap - 1); - LOG.info(" Job running maps after fail " + job.runningMaps()); - - clock.advance(5000); - job.finishTask(taskAttemptID[3]); - - //check for runningReduce count. - taskAttemptID[4] = job.findReduceTask(trackers[0]); - taskAttemptID[5] = job.findReduceTask(trackers[1]); - taskAttemptID[6] = job.findReduceTask(trackers[2]); - - clock.advance(5000); - job.finishTask(taskAttemptID[4]); - clock.advance(1000); - job.finishTask(taskAttemptID[5]); - - clock.advanceBySpeculativeLag(); - taskAttemptID[7] = job.findReduceTask(trackers[4]); - - int oldRunningReduces = job.runningReduces(); - job.failTask(taskAttemptID[6]); - LOG.info( - " No of running Reduces before fail " + oldRunningReduces); - LOG.info( - " No of runing reduces after fail " + job.runningReduces()); - assertEquals( - "Running reduces count should be updated from " + oldRunningReduces + - " to " + (oldRunningReduces - 1), job.runningReduces(), - oldRunningReduces - 1); - // Verify total speculative tasks by jobtracker instrumentation - assertEquals("Total speculative maps", 1, fakeInst.numSpeculativeMaps); - assertEquals("Total speculative reduces", 1, - fakeInst.numSpeculativeReduces); - LOG.info("Total speculative maps = " + fakeInst.numSpeculativeMaps); - LOG.info("Total speculative reduces = " + fakeInst.numSpeculativeReduces); - - job.finishTask(taskAttemptID[7]); - } - - public void testIsSlowTracker() throws IOException { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[20]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(true); - conf.setNumMapTasks(10); - conf.setNumReduceTasks(0); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - //schedule some tasks - taskAttemptID[0] = job.findMapTask(trackers[0]); - taskAttemptID[1] = job.findMapTask(trackers[0]); - taskAttemptID[2] = job.findMapTask(trackers[0]); - taskAttemptID[3] = job.findMapTask(trackers[1]); - taskAttemptID[4] = job.findMapTask(trackers[1]); - taskAttemptID[5] = job.findMapTask(trackers[1]); - taskAttemptID[6] = job.findMapTask(trackers[2]); - taskAttemptID[7] = job.findMapTask(trackers[2]); - taskAttemptID[8] = job.findMapTask(trackers[2]); - clock.advance(1000); - //Some tasks finish in 1 second (on trackers[0]) - job.finishTask(taskAttemptID[0]); - job.finishTask(taskAttemptID[1]); - job.finishTask(taskAttemptID[2]); - clock.advance(1000); - //Some tasks finish in 2 second (on trackers[1]) - job.finishTask(taskAttemptID[3]); - job.finishTask(taskAttemptID[4]); - job.finishTask(taskAttemptID[5]); - assertEquals("Tracker "+ trackers[0] + " expected to be not slow ", - job.isSlowTracker(trackers[0]), false); - clock.advance(100000); - //After a long time, some tasks finished on trackers[2] - job.finishTask(taskAttemptID[6]); - job.finishTask(taskAttemptID[7]); - job.finishTask(taskAttemptID[8]); - assertEquals("Tracker "+ trackers[2] + " expected to be slow ", - job.isSlowTracker(trackers[2]), true); - // Verify total speculative tasks by jobtracker instrumentation - assertEquals("Total speculative maps", 1, fakeInst.numSpeculativeMaps); - assertEquals("Total speculative reduces", 1, - fakeInst.numSpeculativeReduces); - LOG.info("Total speculative maps = " + fakeInst.numSpeculativeMaps); - LOG.info("Total speculative reduces = " + fakeInst.numSpeculativeReduces); - } - - public void testTaskToSpeculate() throws IOException { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[6]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(true); - conf.setNumMapTasks(5); - conf.setNumReduceTasks(5); - conf.setFloat(JobContext.SPECULATIVE_SLOWTASK_THRESHOLD, 0.5f); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - //schedule maps - taskAttemptID[0] = job.findReduceTask(trackers[0]); - taskAttemptID[1] = job.findReduceTask(trackers[1]); - taskAttemptID[2] = job.findReduceTask(trackers[2]); - taskAttemptID[3] = job.findReduceTask(trackers[3]); - taskAttemptID[4] = job.findReduceTask(trackers[3]); - clock.advance(5000); - job.finishTask(taskAttemptID[0]); - clock.advance(1000); - job.finishTask(taskAttemptID[1]); - clock.advance(20000); - clock.advanceBySpeculativeLag(); - //we should get a speculative task now - taskAttemptID[5] = job.findReduceTask(trackers[4]); - assertEquals(taskAttemptID[5].getTaskID().getId(),2); - clock.advance(5000); - job.finishTask(taskAttemptID[5]); - - taskAttemptID[5] = job.findReduceTask(trackers[4]); - assertEquals(taskAttemptID[5].getTaskID().getId(),3); - - // Verify total speculative tasks by jobtracker instrumentation - assertEquals("Total speculative maps", 1, fakeInst.numSpeculativeMaps); - assertEquals("Total speculative reduces", 3, - fakeInst.numSpeculativeReduces); - LOG.info("Total speculative maps = " + fakeInst.numSpeculativeMaps); - LOG.info("Total speculative reduces = " + fakeInst.numSpeculativeReduces); - } - - /* - * Tests the fact that we choose tasks with lesser progress - * among the possible candidates for speculation - */ - public void testTaskLATEScheduling() throws IOException { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[20]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(true); - conf.setNumMapTasks(5); - conf.setNumReduceTasks(0); - conf.setFloat(JobContext.SPECULATIVE_SLOWTASK_THRESHOLD, 0.5f); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - - taskAttemptID[0] = job.findMapTask(trackers[0]); - taskAttemptID[1] = job.findMapTask(trackers[1]); - taskAttemptID[2] = job.findMapTask(trackers[2]); - taskAttemptID[3] = job.findMapTask(trackers[3]); - clock.advance(2000); - job.finishTask(taskAttemptID[0]); - job.finishTask(taskAttemptID[1]); - job.finishTask(taskAttemptID[2]); - clock.advance(250000); - taskAttemptID[4] = job.findMapTask(trackers[3]); - clock.advanceBySpeculativeLag(); - //by doing the above clock adjustments, we bring the progress rate of - //taskID 3 lower than 4. For taskID 3, the rate is 85/317000 - //and for taskID 4, the rate is 20/65000. But when we ask for a spec task - //now, we should get back taskID 4 (since that is expected to complete - //later than taskID 3). - job.progressMade(taskAttemptID[3], 0.85f); - job.progressMade(taskAttemptID[4], 0.20f); - taskAttemptID[5] = job.findMapTask(trackers[4]); - assertEquals(taskAttemptID[5].getTaskID().getId(),4); - // Verify total speculative tasks by jobtracker instrumentation - assertEquals("Total speculative maps", 2, fakeInst.numSpeculativeMaps); - assertEquals("Total speculative reduces", 3, - fakeInst.numSpeculativeReduces); - LOG.info("Total speculative maps = " + fakeInst.numSpeculativeMaps); - LOG.info("Total speculative reduces = " + fakeInst.numSpeculativeReduces); - } - - /* - * Tests the fact that we only launch a limited number of speculative tasks, - * even though we have a lot of tasks in RUNNING state - */ - public void testAtSpeculativeCap() throws IOException { - //The expr which is evaluated for determining whether - //atSpeculativeCap should - //return true or false is - //(#speculative-tasks < max (10, 0.01*#slots, 0.1*#running-tasks) - - //Tests the fact that the max tasks launched is 0.1 * #running-tasks - assertEquals(speculativeCap(1200,800,20), 40); - //Tests the fact that the max tasks launched is 10 - assertEquals(speculativeCap(1200,1150,20), 10); - //Tests the fact that the max tasks launched is 0.01 * #slots - assertEquals(speculativeCap(1200,1150,4000), 20); - // Verify total speculative tasks by jobtracker instrumentation - assertEquals("Total speculative maps", 72, fakeInst.numSpeculativeMaps); - assertEquals("Total speculative reduces", 3, - fakeInst.numSpeculativeReduces); - LOG.info("Total speculative maps = " + fakeInst.numSpeculativeMaps); - LOG.info("Total speculative reduces = " + fakeInst.numSpeculativeReduces); - } - - private int speculativeCap(int totalTasks, int numEarlyComplete, int slots) - throws IOException { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[1500]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(true); - conf.setNumMapTasks(totalTasks); - conf.setNumReduceTasks(0); - jobTracker.setNumSlots(slots); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.initTasks(); - int i; - for (i = 0; i < totalTasks; i++) { - taskAttemptID[i] = job.findMapTask(trackers[0]); - } - clock.advance(5000); - for (i = 0; i < numEarlyComplete; i++) { - job.finishTask(taskAttemptID[i]); - } - - clock.advanceBySpeculativeLag(); - - for (i = numEarlyComplete; i < totalTasks; i++) { - job.progressMade(taskAttemptID[i], 0.85f); - } - clock.advance(50000); - for (i = 0; i < (totalTasks - numEarlyComplete); i++) { - taskAttemptID[i] = job.findMapTask(trackers[1]); - clock.advance(2000); - if (taskAttemptID[i] != null) { - //add some good progress constantly for the different - //task-attempts so that - //the tasktracker doesn't get into the slow trackers category - job.progressMade(taskAttemptID[i], 0.99f); - } else { - break; - } - } - return i; - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java deleted file mode 100644 index 18792dc7a34..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSubmitJob.java +++ /dev/null @@ -1,263 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.net.URI; -import java.security.PrivilegedExceptionAction; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.protocol.ClientProtocol; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.ToolRunner; -import org.junit.Test; - -/** - * Test job submission. This test checks if - * - basic : job submission via jobclient - * - cleanup : job client crashes while submitting - * - invalid job config - * - invalid memory config - * - */ -public class TestSubmitJob { - static final Log LOG = LogFactory.getLog(TestSubmitJob.class); - - private static Path TEST_DIR = - new Path(System.getProperty("test.build.data","/tmp"), - "job-submission-testing"); - - - /** - * Test to verify that jobs with invalid memory requirements are killed at the - * JT. - * - * @throws Exception - */ - @SuppressWarnings("deprecation") - @Test - public void testJobWithInvalidMemoryReqs() throws Exception { - MiniMRCluster mrCluster = null; - try { - JobConf jtConf = new JobConf(); - jtConf.setLong(MRConfig.MAPMEMORY_MB, 1 * 1024L); - jtConf.setLong(MRConfig.REDUCEMEMORY_MB, 2 * 1024L); - jtConf.setLong(JTConfig.JT_MAX_MAPMEMORY_MB, 3 * 1024L); - jtConf.setLong(JTConfig.JT_MAX_REDUCEMEMORY_MB, 4 * 1024L); - - mrCluster = new MiniMRCluster(0, "file:///", 0, null, null, jtConf); - - JobConf clusterConf = mrCluster.createJobConf(); - - // No map-memory configuration - JobConf jobConf = new JobConf(clusterConf); - jobConf.setMemoryForReduceTask(1 * 1024L); - runJobAndVerifyFailure(jobConf, JobConf.DISABLED_MEMORY_LIMIT, 1 * 1024L, - "Invalid job requirements."); - - // No reduce-memory configuration - jobConf = new JobConf(clusterConf); - jobConf.setMemoryForMapTask(1 * 1024L); - runJobAndVerifyFailure(jobConf, 1 * 1024L, JobConf.DISABLED_MEMORY_LIMIT, - "Invalid job requirements."); - - // Invalid map-memory configuration - jobConf = new JobConf(clusterConf); - jobConf.setMemoryForMapTask(4 * 1024L); - jobConf.setMemoryForReduceTask(1 * 1024L); - runJobAndVerifyFailure(jobConf, 4 * 1024L, 1 * 1024L, - "Exceeds the cluster's max-memory-limit."); - - // No reduce-memory configuration - jobConf = new JobConf(clusterConf); - jobConf.setMemoryForMapTask(1 * 1024L); - jobConf.setMemoryForReduceTask(5 * 1024L); - runJobAndVerifyFailure(jobConf, 1 * 1024L, 5 * 1024L, - "Exceeds the cluster's max-memory-limit."); - } finally { - if (mrCluster != null) - mrCluster.shutdown(); - } - } - - @SuppressWarnings("deprecation") - private void runJobAndVerifyFailure(JobConf jobConf, long memForMapTasks, - long memForReduceTasks, String expectedMsg) - throws Exception, - IOException { - String[] args = { "-m", "0", "-r", "0", "-mt", "0", "-rt", "0" }; - boolean throwsException = false; - String msg = null; - try { - ToolRunner.run(jobConf, new SleepJob(), args); - } catch (RemoteException re) { - throwsException = true; - msg = re.unwrapRemoteException().getMessage(); - } - assertTrue(throwsException); - assertNotNull(msg); - - String overallExpectedMsg = - "(" + memForMapTasks + " memForMapTasks " + memForReduceTasks - + " memForReduceTasks): " + expectedMsg; - assertTrue("Observed message - " + msg - + " - doesn't contain expected message - " + overallExpectedMsg, msg - .contains(overallExpectedMsg)); - } - - @SuppressWarnings("deprecation") - static ClientProtocol getJobSubmitClient(JobConf conf, - UserGroupInformation ugi) - throws IOException { - return (ClientProtocol) RPC.getProxy(ClientProtocol.class, - ClientProtocol.versionID, JobTracker.getAddress(conf), ugi, - conf, NetUtils.getSocketFactory(conf, ClientProtocol.class)); - } - - /** - * Submit a job and check if the files are accessible to other users. - */ - @SuppressWarnings("deprecation") - @Test - public void testSecureJobExecution() throws Exception { - LOG.info("Testing secure job submission/execution"); - MiniMRCluster mr = null; - Configuration conf = new Configuration(); - final MiniDFSCluster dfs = new MiniDFSCluster(conf, 1, true, null); - try { - FileSystem fs = - TestMiniMRWithDFSWithDistinctUsers.DFS_UGI.doAs(new PrivilegedExceptionAction() { - public FileSystem run() throws IOException { - return dfs.getFileSystem(); - } - }); - TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/user", "mapred", "mapred", (short)01777); - TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, "/mapred", "mapred", "mapred", (short)01777); - TestMiniMRWithDFSWithDistinctUsers.mkdir(fs, conf.get(JTConfig.JT_STAGING_AREA_ROOT), - "mapred", "mapred", (short)01777); - - UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); - mr = new MiniMRCluster(0, 0, 1, dfs.getFileSystem().getUri().toString(), - 1, null, null, MR_UGI); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - - // cleanup - dfs.getFileSystem().delete(TEST_DIR, true); - - final Path mapSignalFile = new Path(TEST_DIR, "map-signal"); - final Path reduceSignalFile = new Path(TEST_DIR, "reduce-signal"); - - // create a ugi for user 1 - UserGroupInformation user1 = - TestMiniMRWithDFSWithDistinctUsers.createUGI("user1", false); - Path inDir = new Path("/user/input"); - Path outDir = new Path("/user/output"); - final JobConf job = mr.createJobConf(); - - UtilsForTests.configureWaitingJobConf(job, inDir, outDir, 2, 0, - "test-submit-job", mapSignalFile.toString(), - reduceSignalFile.toString()); - job.set(UtilsForTests.getTaskSignalParameter(true), - mapSignalFile.toString()); - job.set(UtilsForTests.getTaskSignalParameter(false), - reduceSignalFile.toString()); - LOG.info("Submit job as the actual user (" + user1.getUserName() + ")"); - final JobClient jClient = - user1.doAs(new PrivilegedExceptionAction() { - public JobClient run() throws IOException { - return new JobClient(job); - } - }); - RunningJob rJob = user1.doAs(new PrivilegedExceptionAction() { - public RunningJob run() throws IOException { - return jClient.submitJob(job); - } - }); - JobID id = rJob.getID(); - LOG.info("Running job " + id); - - // create user2 - UserGroupInformation user2 = - TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false); - final JobConf conf_other = mr.createJobConf(); - - FileSystem fs2 = user2.doAs(new PrivilegedExceptionAction() { - @Override - public FileSystem run() throws Exception { - return FileSystem.get(conf_other); - } - }); - - // try accessing mapred.system.dir/jobid/* - try { - Path path = new Path(jt.getSystemDir()); - LOG.info("Try listing the mapred-system-dir as the user (" - + user2.getUserName() + ")"); - fs2.listStatus(path); - fail("JobTracker system dir is accessible to others"); - } catch (IOException ioe) { - assertTrue(ioe.toString(), - ioe.toString().contains("Permission denied")); - } - // try accessing ~/.staging/jobid/* - JobInProgress jip = jt.getJob(id); - Path jobSubmitDirpath = - new Path(jip.getJobConf().get("mapreduce.job.dir")); - try { - LOG.info("Try accessing the job folder for job " + id + " as the user (" - + user2.getUserName() + ")"); - fs2.listStatus(jobSubmitDirpath); - fail("User's staging folder is accessible to others"); - } catch (IOException ioe) { - assertTrue(ioe.toString(), - ioe.toString().contains("Permission denied")); - } - UtilsForTests.signalTasks(dfs, fs, true, mapSignalFile.toString(), - reduceSignalFile.toString()); - // wait for job to be done - UtilsForTests.waitTillDone(jClient); - - // check if the staging area is cleaned up - LOG.info("Check if job submit dir is cleanup or not"); - assertFalse(fs.exists(jobSubmitDirpath)); - } finally { - if (mr != null) { - mr.shutdown(); - } - if (dfs != null) { - dfs.shutdown(); - } - } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java deleted file mode 100644 index d952ab83464..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskChildOptsParsing.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.*; - -import org.junit.Test; - -public class TestTaskChildOptsParsing { - - @SuppressWarnings("deprecation") - private static final TaskAttemptID TASK_ID = new TaskAttemptID(); - private static final String[] EXPECTED_RESULTS = new String[]{"-Dfoo=bar", "-Dbaz=biz"}; - - private void performTest(String input) { - String[] result = TaskRunner.parseChildJavaOpts(input, TASK_ID); - assertArrayEquals(EXPECTED_RESULTS, result); - } - - @Test - public void testParseChildJavaOptsLeadingSpace() { - performTest(" -Dfoo=bar -Dbaz=biz"); - } - - @Test - public void testParseChildJavaOptsTrailingSpace() { - performTest("-Dfoo=bar -Dbaz=biz "); - } - - @Test - public void testParseChildJavaOptsOneSpace() { - performTest("-Dfoo=bar -Dbaz=biz"); - } - - @Test - public void testParseChildJavaOptsMulitpleSpaces() { - performTest("-Dfoo=bar -Dbaz=biz"); - } - - @Test - public void testParseChildJavaOptsOneTab() { - performTest("-Dfoo=bar\t-Dbaz=biz"); - } - - @Test - public void testParseChildJavaOptsMultipleTabs() { - performTest("-Dfoo=bar\t\t-Dbaz=biz"); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java deleted file mode 100644 index 5357e2ee422..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskFail.java +++ /dev/null @@ -1,288 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.OutputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.net.URL; -import java.net.HttpURLConnection; - -import junit.framework.TestCase; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.mapreduce.TaskType; - -public class TestTaskFail extends TestCase { - - private static final Log LOG = LogFactory.getLog( - TestTaskFail.class); - - private static String taskLog = "Task attempt log"; - static String cleanupLog = "cleanup attempt log"; - - public static class MapperClass extends MapReduceBase - implements Mapper { - String taskid; - public void configure(JobConf job) { - taskid = job.get(JobContext.TASK_ATTEMPT_ID); - } - public void map (LongWritable key, Text value, - OutputCollector output, - Reporter reporter) throws IOException { - System.err.println(taskLog); - assertFalse(Boolean.getBoolean(System - .getProperty("hadoop.tasklog.iscleanup"))); - if (taskid.endsWith("_0")) { - throw new IOException(); - } else if (taskid.endsWith("_1")) { - System.exit(-1); - } else if (taskid.endsWith("_2")) { - throw new Error(); - } - } - } - - static class CommitterWithLogs extends FileOutputCommitter { - public void abortTask(TaskAttemptContext context) throws IOException { - System.err.println(cleanupLog); - String attemptId = System.getProperty("hadoop.tasklog.taskid"); - assertNotNull(attemptId); - if (attemptId.endsWith("_0")) { - assertFalse(Boolean.getBoolean(System - .getProperty("hadoop.tasklog.iscleanup"))); - } else { - assertTrue(Boolean.getBoolean(System - .getProperty("hadoop.tasklog.iscleanup"))); - } - super.abortTask(context); - } - } - - static class CommitterWithFailTaskCleanup extends FileOutputCommitter { - public void abortTask(TaskAttemptContext context) throws IOException { - System.err.println(cleanupLog); - System.exit(-1); - } - } - - static class CommitterWithFailTaskCleanup2 extends FileOutputCommitter { - public void abortTask(TaskAttemptContext context) throws IOException { - System.err.println(cleanupLog); - throw new IOException(); - } - } - - /** access a url, ignoring some IOException such as the page does not exist */ - static int getHttpStatusCode(String urlstring, String userName, - String method) throws IOException { - LOG.info("Accessing " + urlstring + " as user " + userName); - URL url = new URL(urlstring + "&user.name=" + userName); - HttpURLConnection connection = (HttpURLConnection)url.openConnection(); - connection.setRequestMethod(method); - if (method.equals("POST")) { - String encodedData = "action=kill&user.name=" + userName; - connection.setRequestProperty("Content-Type", - "application/x-www-form-urlencoded"); - connection.setRequestProperty("Content-Length", - Integer.toString(encodedData.length())); - connection.setDoOutput(true); - - OutputStream os = connection.getOutputStream(); - os.write(encodedData.getBytes()); - } - connection.connect(); - - return connection.getResponseCode(); - } - - public RunningJob launchJob(JobConf conf, - Path inDir, - Path outDir, - String input) - throws IOException { - // set up the input file system and write input text. - FileSystem inFs = inDir.getFileSystem(conf); - FileSystem outFs = outDir.getFileSystem(conf); - outFs.delete(outDir, true); - if (!inFs.mkdirs(inDir)) { - throw new IOException("Mkdirs failed to create " + inDir.toString()); - } - { - // write input into input file - DataOutputStream file = inFs.create(new Path(inDir, "part-0")); - file.writeBytes(input); - file.close(); - } - - // configure the mapred Job - conf.setMapperClass(MapperClass.class); - conf.setReducerClass(IdentityReducer.class); - conf.setNumReduceTasks(0); - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - conf.setSpeculativeExecution(false); - String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data", - "/tmp")).toString().replace(' ', '+'); - conf.set("test.build.data", TEST_ROOT_DIR); - // return the RunningJob handle. - return new JobClient(conf).submitJob(conf); - } - - private void validateAttempt(TaskInProgress tip, TaskAttemptID attemptId, - TaskStatus ts, boolean isCleanup, - boolean containsCleanupLog, JobTracker jt) - throws IOException { - assertEquals(isCleanup, tip.isCleanupAttempt(attemptId)); - assertTrue(ts != null); - assertEquals(TaskStatus.State.FAILED, ts.getRunState()); - // validate tasklogs for task attempt - String log = MapReduceTestUtil.readTaskLog( - TaskLog.LogName.STDERR, attemptId, false); - assertTrue(log.contains(taskLog)); - // access the logs from web url - TaskTrackerStatus ttStatus = jt.getTaskTracker( - tip.machineWhereTaskRan(attemptId)).getStatus(); - String tasklogUrl = TaskLogServlet.getTaskLogUrl("localhost", - String.valueOf(ttStatus.getHttpPort()), attemptId.toString()) + - "&filter=STDERR"; - assertEquals(HttpURLConnection.HTTP_OK, - getHttpStatusCode(tasklogUrl, tip.getUser(), "GET")); - if (containsCleanupLog) { - // validate task logs: tasklog should contain both task logs - // and cleanup logs - assertTrue(log.contains(cleanupLog)); - } - if (isCleanup) { - // validate tasklogs for cleanup attempt - log = MapReduceTestUtil.readTaskLog( - TaskLog.LogName.STDERR, attemptId, true); - assertTrue(log.contains(cleanupLog)); - // access the cleanup attempt's logs from web url - ttStatus = jt.getTaskTracker(tip.machineWhereCleanupRan(attemptId)) - .getStatus(); - String cleanupTasklogUrl = TaskLogServlet.getTaskLogUrl("localhost", - String.valueOf(ttStatus.getHttpPort()), attemptId.toString()) - + "&filter=STDERR&cleanup=true"; - assertEquals(HttpURLConnection.HTTP_OK, - getHttpStatusCode(cleanupTasklogUrl, tip.getUser(), "GET")); - - // Task-cleanup task should not be scheduled on the node that the task just failed - if (jt.taskTrackers().size() >= 2) { - String trackerRanTask = tip.machineWhereTaskRan(attemptId); - String trackerRanCleanupTask = tip.machineWhereCleanupRan(attemptId); - assertFalse(trackerRanTask.equals(trackerRanCleanupTask)); - } - } - } - - private void validateJob(RunningJob job, JobTracker jt, boolean cleanupNeeded) - throws IOException { - assertEquals(JobStatus.SUCCEEDED, job.getJobState()); - - JobID jobId = job.getID(); - // construct the task id of first map task - // this should not be cleanup attempt since the first attempt - // fails with an exception - TaskAttemptID attemptId = - new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 0), 0); - TaskInProgress tip = jt.getTip(attemptId.getTaskID()); - TaskStatus ts = jt.getTaskStatus(attemptId); - // task logs will contain cleanup message because the task is failed by - // throwing IOException - validateAttempt(tip, attemptId, ts, false, true, jt); - - attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 0), 1); - // this should be cleanup attempt since the second attempt fails - // with System.exit - ts = jt.getTaskStatus(attemptId); - validateAttempt(tip, attemptId, ts, cleanupNeeded, false, jt); - - attemptId = new TaskAttemptID(new TaskID(jobId, TaskType.MAP, 0), 2); - // this should be cleanup attempt since the third attempt fails - // with Error - ts = jt.getTaskStatus(attemptId); - validateAttempt(tip, attemptId, ts, cleanupNeeded, false, jt); - } - - public void testWithDFS() throws IOException { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - final int taskTrackers = 4; - - Configuration conf = new Configuration(); - dfs = new MiniDFSCluster(conf, 4, true, null); - fileSys = dfs.getFileSystem(); - mr = new MiniMRCluster(taskTrackers, fileSys.getUri().toString(), 1); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - final Path inDir = new Path("./input"); - final Path outDir = new Path("./output"); - String input = "The quick brown fox\nhas many silly\nred fox sox\n"; - // launch job with fail tasks - JobConf jobConf = mr.createJobConf(); - // turn down the completion poll interval from the 5 second default - // for better test performance. - jobConf.set(Job.COMPLETION_POLL_INTERVAL_KEY, "50"); - jobConf.setOutputCommitter(CommitterWithLogs.class); - RunningJob rJob = launchJob(jobConf, inDir, outDir, input); - rJob.waitForCompletion(); - validateJob(rJob, jt, true); - // launch job with fail tasks and fail-cleanups - fileSys.delete(outDir, true); - jobConf.setOutputCommitter(CommitterWithFailTaskCleanup.class); - rJob = launchJob(jobConf, inDir, outDir, input); - rJob.waitForCompletion(); - validateJob(rJob, jt, true); - fileSys.delete(outDir, true); - jobConf.setOutputCommitter(CommitterWithFailTaskCleanup2.class); - rJob = launchJob(jobConf, inDir, outDir, input); - rJob.waitForCompletion(); - validateJob(rJob, jt, true); - // launch job with task-cleanup switched off - fileSys.delete(outDir, true); - jobConf.setOutputCommitter(CommitterWithFailTaskCleanup.class); - jobConf.setBoolean(MRJobConfig.TASK_CLEANUP_NEEDED, false); - rJob = launchJob(jobConf, inDir, outDir, input); - rJob.waitForCompletion(); - validateJob(rJob, jt, false); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); } - } - } - - public static void main(String[] argv) throws Exception { - TestTaskFail td = new TestTaskFail(); - td.testWithDFS(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java deleted file mode 100644 index f455bcb944e..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncher.java +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.*; - -import java.io.IOException; -import java.util.LinkedHashMap; - -import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; -import org.apache.hadoop.mapred.TaskTracker.TaskLauncher; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.junit.Test; - -/** - * Tests {@link TaskLauncher} - * - */ -public class TestTaskLauncher { - private static int expectedLaunchAttemptId = 1; - - private static class MyTaskTracker extends TaskTracker { - // override startNewTask just to set the runState, - // not to launch the task really - @Override - void startNewTask(TaskInProgress tip) { - assertEquals(expectedLaunchAttemptId, tip.getTask().getTaskID().getId()); - tip.getStatus().setRunState(TaskStatus.State.RUNNING); - } - } - - /** - * Tests the case "task waiting to be launched is killed externally". - * - * Launches a task which will wait for ever to get slots. Kill the - * task and see if launcher is able to come out of the wait and pickup a - * another task. - * - * @throws IOException - */ - @Test - public void testExternalKillForLaunchTask() throws IOException { - // setup a TaskTracker - JobConf ttConf = new JobConf(); - ttConf.setInt(TTConfig.TT_MAP_SLOTS, 4); - TaskTracker tt = new MyTaskTracker(); - tt.runningTasks = new LinkedHashMap(); - tt.setConf(ttConf); - tt.setIndexCache(new IndexCache(ttConf)); - tt.setTaskMemoryManagerEnabledFlag(); - - // Set up TaskTracker instrumentation - tt.setTaskTrackerInstrumentation( - TaskTracker.createInstrumentation(tt, tt.getJobConf())); - - // start map-task launcher with four slots - TaskLauncher mapLauncher = tt.new TaskLauncher(TaskType.MAP, 4); - mapLauncher.start(); - - // launch a task which requires five slots - String jtId = "test"; - TaskAttemptID attemptID = new TaskAttemptID(jtId, 1, TaskType.MAP, 0, 0); - Task task = new MapTask(null, attemptID, 0, null, 5); - mapLauncher.addToTaskQueue(new LaunchTaskAction(task)); - // verify that task is added to runningTasks - TaskInProgress killTip = tt.runningTasks.get(attemptID); - assertNotNull(killTip); - - // wait for a while for launcher to pick up the task - // this loop waits atmost for 30 seconds - for (int i = 0; i < 300; i++) { - if (mapLauncher.getNumWaitingTasksToLaunch() == 0) { - break; - } - UtilsForTests.waitFor(100); - } - assertEquals("Launcher didnt pick up the task " + attemptID + "to launch", - 0, mapLauncher.getNumWaitingTasksToLaunch()); - - // Now, that launcher has picked up the task, it waits until all five slots - // are available. i.e. it waits for-ever - // lets kill the task so that map launcher comes out - tt.processKillTaskAction(new KillTaskAction(attemptID)); - assertEquals(TaskStatus.State.KILLED, killTip.getRunState()); - - // launch another attempt which requires only one slot - TaskAttemptID runningAttemptID = new TaskAttemptID(jtId, 1, TaskType.MAP, - 0, expectedLaunchAttemptId); - mapLauncher.addToTaskQueue(new LaunchTaskAction(new MapTask(null, - runningAttemptID, 0, null, 1))); - TaskInProgress runningTip = tt.runningTasks.get(runningAttemptID); - assertNotNull(runningTip); - - // wait for a while for the task to be launched - // this loop waits at most for 30 seconds - for (int i = 0; i < 300; i++) { - if (runningTip.getRunState().equals(TaskStatus.State.RUNNING)) { - break; - } - UtilsForTests.waitFor(100); - } - - // verify that the task went to running - assertEquals(TaskStatus.State.RUNNING, runningTip.getRunState()); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java deleted file mode 100644 index 21378f16ddb..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLauncherThreaded.java +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - -import java.io.IOException; -import java.util.LinkedHashMap; -import java.util.TreeMap; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; -import org.apache.hadoop.mapred.TaskTracker.TaskLauncher; -import org.apache.hadoop.mapred.TaskTracker.RunningJob; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Test; - -/** - * Tests {@link TaskLauncherThreaded} - */ -public class TestTaskLauncherThreaded { - private static int jobLocalizedCount = 0; - private static int jobLaunchCount = 0; - private static boolean quitWaiting = false; - private static boolean firstJobStarted = false; - private static boolean firstJobFinished = false; - - private static class MyTaskTracker extends TaskTracker { - - // stub out functions called from startNewTask - @Override - RunningJob localizeJob(TaskInProgress tip) - throws IOException, InterruptedException { - if (firstJobStarted == false) { - firstJobStarted = true; - while (quitWaiting == false) { - Thread.sleep(100); - } - firstJobFinished = true; - } - // mock out a RunningJob - RunningJob rjob = mock(RunningJob.class); - when(rjob.getJobConf()).thenReturn(new JobConf()); - jobLocalizedCount++; - - return rjob; - } - - @Override - protected void launchTaskForJob(TaskInProgress tip, JobConf jobConf, - UserGroupInformation ugi) throws IOException { - jobLaunchCount++; - } - } - - /** - * Tests the case "task localizing doesn't block other tasks". - * - * Launches one task that simulates a task doing large localization, - * then starts a second task and verifies that second task is not - * blocked waiting behind the first task. - * - * @throws IOException - */ - @Test - public void testLocalizationNotBlockingOtherTasks() throws IOException { - // setup a TaskTracker - JobConf ttConf = new JobConf(); - ttConf.setInt("mapred.tasktracker.map.tasks.maximum", 4); - TaskTracker tt = new MyTaskTracker(); - - tt.runningJobs = new TreeMap(); - tt.runningTasks = new LinkedHashMap(); - tt.setIndexCache(new IndexCache(ttConf)); - tt.setTaskMemoryManagerEnabledFlag(); - - // start map-task launcher with four slots - TaskLauncher mapLauncher = tt.new TaskLauncher(TaskType.MAP, 4); - mapLauncher.start(); - - // launch a task which simulates large localization - String jtId = "test"; - TaskAttemptID attemptID = new TaskAttemptID(jtId, 1, true, 0, 0); - Task task = new MapTask(null, attemptID, 0, null, 2); - mapLauncher.addToTaskQueue(new LaunchTaskAction(task)); - // verify that task is added to runningTasks - TaskInProgress runningTip = tt.runningTasks.get(attemptID); - assertNotNull(runningTip); - - // wait for a while for the first task to start initializing - // this loop waits at most for 30 seconds - for (int i = 0; i < 300; i++) { - if (firstJobStarted == true) { - break; - } - UtilsForTests.waitFor(100); - } - - // Now start a second task and make sure it doesn't wait while first one initializes - String secondjtId = "test2"; - TaskAttemptID secondAttemptID = new TaskAttemptID(secondjtId, 1, true, 0, 0); - Task secondTask = new MapTask(null, secondAttemptID, 0, null, 2); - mapLauncher.addToTaskQueue(new LaunchTaskAction(secondTask)); - // verify that task is added to runningTasks - TaskInProgress secondRunningTip = tt.runningTasks.get(secondAttemptID); - assertNotNull(secondRunningTip); - - // wait for a while for the second task to be launched - // this loop waits at most for 30 seconds - for (int i = 0; i < 300; i++) { - if (jobLaunchCount > 0) { - break; - } - UtilsForTests.waitFor(100); - } - - assertEquals("Second task didn't run or both ran", 1, jobLocalizedCount); - assertEquals("second task didn't try to launch", 1, jobLaunchCount); - assertFalse("Second task didn't finish first task initializing", firstJobFinished); - - // tell first task to stop waiting - quitWaiting = true; - - // wait for a while for the first task finishes initializing - // this loop waits at most for 30 seconds - for (int i = 0; i < 300; i++) { - if (firstJobFinished == true) { - break; - } - UtilsForTests.waitFor(100); - } - assertTrue("First task didn't finish initializing", firstJobFinished); - - // wait for a while for the first task finishes - // this loop waits at most for 30 seconds - for (int i = 0; i < 300; i++) { - if (jobLaunchCount > 1) { - break; - } - UtilsForTests.waitFor(100); - } - assertEquals("Both tasks didn't run", 2, jobLocalizedCount); - assertEquals("First task didn't try to launch", 2, jobLaunchCount); - - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java deleted file mode 100644 index c8ecac75374..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLimits.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import junit.framework.TestCase; -import java.io.IOException; - -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; - -/** - * A JUnit test to test configured task limits. - */ -public class TestTaskLimits extends TestCase { - - static void runTest(int maxTasks, int numMaps, int numReds, - boolean shouldFail) throws Exception { - JobConf conf = new JobConf(); - conf.setInt(JTConfig.JT_TASKS_PER_JOB, maxTasks); - conf.set(JTConfig.JT_IPC_HANDLER_COUNT, "1"); - MiniMRCluster mr = new MiniMRCluster(0, "file:///", 1, null, null, conf); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobConf jc = mr.createJobConf(); - jc.setNumMapTasks(numMaps); - jc.setNumReduceTasks(numReds); - JobInProgress jip = new JobInProgress(new JobID(), jc, jt); - boolean failed = false; - try { - jip.checkTaskLimits(); - } catch (IOException e) { - failed = true; - } - assertEquals(shouldFail, failed); - mr.shutdown(); - } - - public void testBeyondLimits() throws Exception { - // Max tasks is 4, Requested is 8, shouldFail = true - runTest(4, 8, 0, true); - } - - public void testTaskWithinLimits() throws Exception { - // Max tasks is 4, requested is 4, shouldFail = false - runTest(4, 4, 0, false); - } - - - public void testTaskWithoutLimits() throws Exception { - // No task limit, requested is 16, shouldFail = false - runTest(-1, 8, 8, false); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java deleted file mode 100644 index a61276cff22..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskLogServlet.java +++ /dev/null @@ -1,160 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.*; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; - -import javax.servlet.http.HttpServletResponse; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileUtil; -import org.junit.Before; -import org.junit.Test; -import org.mortbay.jetty.testing.HttpTester; -import org.mortbay.jetty.testing.ServletTester; - -public class TestTaskLogServlet { - private static final Log LOG = LogFactory.getLog(TestTaskLogServlet.class); - private ServletTester tester; - - @Before - public void setup() throws Exception { - tester = new ServletTester(); - tester.setContextPath("/"); - tester.addServlet(TaskLogServlet.class, "/tasklog"); - tester.start(); - } - - @Test - public void testMissingParameters() throws Exception { - HttpTester request = new HttpTester(); - request.setMethod("GET"); - request.setURI("/tasklog"); - request.setVersion("HTTP/1.0"); - - HttpTester response = new HttpTester(); - response.parse(tester.getResponses(request.generate())); - - assertEquals(400,response.getStatus()); - } - - private void setupValidLogs(String attemptIdStr) throws IOException { - TaskAttemptID attemptId = TaskAttemptID.forName(attemptIdStr); - File logDir = TaskLog.getAttemptDir(attemptId, false); - FileUtil.fullyDelete(logDir); - logDir.mkdirs(); - assertTrue(logDir.exists()); - - // Now make the logs with some HTML in the output - TaskLog.syncLogs(logDir.getAbsolutePath(), attemptId, false); - makeLog(new File(logDir, "stderr"), "this is stderr"); - makeLog(new File(logDir, "stdout"), "this is stdout"); - makeLog(new File(logDir, "syslog"), "this is syslog"); - TaskLog.syncLogs(logDir.getAbsolutePath(), attemptId, false); - } - - @Test - public void testHtmlLogs() throws Exception { - String attemptIdStr = "attempt_123_0001_m_000001_0"; - setupValidLogs(attemptIdStr); - - HttpTester request = new HttpTester(); - request.setMethod("GET"); - request.setURI("/tasklog?attemptid=" + attemptIdStr); - request.setVersion("HTTP/1.0"); - - // Make sure all the contents show up and properly escaped - HttpTester response = doRequest(request); - assertEquals(HttpServletResponse.SC_OK, response.getStatus()); - assertEquals("text/html; charset=utf-8", response.getHeader("content-type")); - assertTrue(response.getContent().contains("<b>this is stderr")); - assertTrue(response.getContent().contains("<b>this is stdout")); - assertTrue(response.getContent().contains("<b>this is syslog")); - - // Only read a small chunk of each file <***b>thi***s - // (should still be escaped) - request.setURI("/tasklog?attemptid=" + attemptIdStr - + "&start=1&end=6"); - response = doRequest(request); - assertEquals(HttpServletResponse.SC_OK, response.getStatus()); - assertEquals("text/html; charset=utf-8", response.getHeader("content-type")); - assertFalse(response.getContent().contains("<b")); - assertFalse(response.getContent().contains("this is")); - assertTrue(response.getContent().contains("b>thi")); - } - - @Test - public void testPlaintextLogs() throws Exception { - String attemptIdStr = "attempt_123_0001_m_000001_0"; - setupValidLogs(attemptIdStr); - - HttpTester request = new HttpTester(); - request.setMethod("GET"); - request.setURI("/tasklog?plaintext=true&attemptid=" + attemptIdStr); - request.setVersion("HTTP/1.0"); - - // Make sure all the contents show up and properly escaped - HttpTester response = doRequest(request); - // Bad request because we require a 'filter' - assertEquals(HttpServletResponse.SC_BAD_REQUEST, response.getStatus()); - - // Try again with filter - request.setURI("/tasklog?plaintext=true&filter=stdout&attemptid=" + attemptIdStr); - response = doRequest(request); - - // Response should be text/plain, not be escaped - assertEquals("text/plain; charset=utf-8", response.getHeader("content-type")); - assertEquals("this is stdout", response.getContent()); - - // Test range request - request.setURI("/tasklog?plaintext=true&filter=stdout" + - "&attemptid=" + attemptIdStr + - "&start=1&end=6"); - response = doRequest(request); - - // Response should be text/plain, not be escaped - assertEquals("text/plain; charset=utf-8", response.getHeader("content-type")); - assertEquals("b>thi", response.getContent()); - } - - private HttpTester doRequest(HttpTester request) throws Exception { - String reqStr = request.generate(); - LOG.info("Testing request: " + reqStr); - String respStr = tester.getResponses(reqStr); - LOG.info("Response: " + respStr); - HttpTester response = new HttpTester(); - response.parse(respStr); - return response; - } - - private void makeLog(File f, String contents) throws IOException { - LOG.info("Creating log at " + f); - FileWriter fw = new FileWriter(f); - try { - fw.write(contents); - } finally { - fw.close(); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java deleted file mode 100644 index 65ae794cee0..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskOutputSize.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.apache.hadoop.mapreduce.TaskCompletionEvent; -import org.junit.After; -import org.junit.Test; -import static org.junit.Assert.*; - -public class TestTaskOutputSize { - private static Path rootDir = new Path(System.getProperty("test.build.data", - "/tmp"), "test"); - - @After - public void tearDown() { - FileUtil.fullyDelete(new File(rootDir.toString())); - } - - @Test - public void testTaskOutputSize() throws Exception { - MiniMRCluster mr = new MiniMRCluster(1, "file:///", 1); - Path inDir = new Path(rootDir, "input"); - Path outDir = new Path(rootDir, "output"); - Job job = MapReduceTestUtil.createJob(mr.createJobConf(), inDir, outDir, 1, 1); - job.waitForCompletion(true); - assertTrue("Job failed", job.isSuccessful()); - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - for (TaskCompletionEvent tce : job.getTaskCompletionEvents(0, 100)) { - TaskStatus ts = jt.getTaskStatus(TaskAttemptID.downgrade(tce - .getTaskAttemptId())); - if (tce.isMapTask()) { - assertTrue( - "map output size is not found for " + tce.getTaskAttemptId(), ts - .getOutputSize() > 0); - } else { - assertEquals("task output size not expected for " - + tce.getTaskAttemptId(), -1, ts.getOutputSize()); - } - } - - // test output sizes for job with no reduces - job = MapReduceTestUtil.createJob(mr.createJobConf(), inDir, outDir, 1, 0); - job.waitForCompletion(true); - assertTrue("Job failed", job.isSuccessful()); - for (TaskCompletionEvent tce : job.getTaskCompletionEvents(0, 100)) { - TaskStatus ts = jt.getTaskStatus(TaskAttemptID.downgrade(tce - .getTaskAttemptId())); - assertEquals("task output size not expected for " - + tce.getTaskAttemptId(), -1, ts.getOutputSize()); - } - - // test output sizes for failed job - job = MapReduceTestUtil.createFailJob(mr.createJobConf(), outDir, inDir); - job.waitForCompletion(true); - assertFalse("Job not failed", job.isSuccessful()); - for (TaskCompletionEvent tce : job.getTaskCompletionEvents(0, 100)) { - TaskStatus ts = jt.getTaskStatus(TaskAttemptID.downgrade(tce - .getTaskAttemptId())); - assertEquals("task output size not expected for " - + tce.getTaskAttemptId(), -1, ts.getOutputSize()); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java deleted file mode 100644 index 36483e44b41..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerBlacklisting.java +++ /dev/null @@ -1,504 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.List; -import java.util.Set; -import java.util.Map.Entry; - -import javax.security.auth.login.LoginException; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -import org.apache.hadoop.mapred.ClusterStatus.BlackListInfo; -import org.apache.hadoop.mapred.JobTracker.ReasonForBlackListing; -import org.apache.hadoop.mapred.TaskTrackerStatus.TaskTrackerHealthStatus; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; - -public class TestTaskTrackerBlacklisting extends TestCase { - - static String trackers[] = new String[] { "tracker_tracker1:1000", - "tracker_tracker2:1000", "tracker_tracker3:1000" }; - - static String hosts[] = new String[] { "tracker1", "tracker2", "tracker3" }; - - private static FakeJobTracker jobTracker; - - private static FakeJobTrackerClock clock; - - private static short responseId; - - private static final Set nodeUnHealthyReasonSet = - EnumSet.of(ReasonForBlackListing.NODE_UNHEALTHY); - - private static final Set exceedsFailuresReasonSet = - EnumSet.of(ReasonForBlackListing.EXCEEDING_FAILURES); - - private static final Set - unhealthyAndExceedsFailure = EnumSet.of( - ReasonForBlackListing.NODE_UNHEALTHY, - ReasonForBlackListing.EXCEEDING_FAILURES); - - // Add extra millisecond where timer granularity is too coarse - private static final long aDay = 24 * 60 * 60 * 1000 + 1; - - private static class FakeJobTrackerClock extends Clock { - boolean jumpADay = false; - - @Override - long getTime() { - if (!jumpADay) { - return super.getTime(); - } else { - long now = super.getTime(); - return now + aDay; - } - } - } - - static class FakeJobTracker extends - org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker { - - FakeJobTracker(JobConf conf, Clock clock, String[] tts) throws IOException, - InterruptedException, LoginException { - super(conf, clock, tts); - } - - @Override - synchronized void finalizeJob(JobInProgress job) { - List blackListedTrackers = job.getBlackListedTrackers(); - for (String tracker : blackListedTrackers) { - incrementFaults(tracker); - } - } - } - - static class FakeJobInProgress extends - org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress { - HashMap trackerToFailureMap; - - FakeJobInProgress(JobConf jobConf, JobTracker tracker) throws IOException { - super(jobConf, tracker); - // initObjects(tracker, numMaps, numReduces); - trackerToFailureMap = new HashMap(); - } - - public void failTask(TaskAttemptID taskId) { - super.failTask(taskId); - TaskInProgress tip = jobtracker.taskidToTIPMap.get(taskId); - addFailuresToTrackers(tip.machineWhereTaskRan(taskId)); - } - - public void addFailuresToTrackers(String trackerName) { - Integer numOfFailures = trackerToFailureMap.get(trackerName); - if (numOfFailures == null) { - numOfFailures = 0; - } - trackerToFailureMap.put(trackerName, numOfFailures + 1); - } - - public List getBlackListedTrackers() { - ArrayList blackListedTrackers = new ArrayList(); - for (Entry entry : trackerToFailureMap.entrySet()) { - Integer failures = entry.getValue(); - String tracker = entry.getKey(); - if (failures.intValue() >= this.getJobConf() - .getMaxTaskFailuresPerTracker()) { - blackListedTrackers.add(JobInProgress - .convertTrackerNameToHostName(tracker)); - } - } - return blackListedTrackers; - } - } - - public static Test suite() { - TestSetup setup = - new TestSetup(new TestSuite(TestTaskTrackerBlacklisting.class)) { - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setInt(JTConfig.JT_MAX_TRACKER_BLACKLISTS, 1); - - jobTracker = - new FakeJobTracker(conf, (clock = new FakeJobTrackerClock()), - trackers); - sendHeartBeat(null, true); - } - protected void tearDown() throws Exception { - //delete the build/test/logs/ dir - } - }; - return setup; - } - - private static void sendHeartBeat(TaskTrackerHealthStatus status, - boolean initialContact) - throws IOException { - for (String tracker : trackers) { - TaskTrackerStatus tts = new TaskTrackerStatus(tracker, JobInProgress - .convertTrackerNameToHostName(tracker)); - if (status != null) { - TaskTrackerHealthStatus healthStatus = tts.getHealthStatus(); - healthStatus.setNodeHealthy(status.isNodeHealthy()); - healthStatus.setHealthReport(status.getHealthReport()); - healthStatus.setLastReported(status.getLastReported()); - } - jobTracker.heartbeat(tts, false, initialContact, - false, responseId); - } - responseId++; - } - - public void testTrackerBlacklistingForJobFailures() throws Exception { - runBlackListingJob(jobTracker, trackers); - assertEquals("Tracker 1 not blacklisted", jobTracker - .getBlacklistedTrackerCount(), 1); - checkReasonForBlackListing(hosts[0], exceedsFailuresReasonSet); - clock.jumpADay = true; - sendHeartBeat(null, false); - assertEquals("Tracker 1 still blacklisted after a day", 0, jobTracker - .getBlacklistedTrackerCount()); - //Cleanup the blacklisted trackers. - //Tracker is black listed due to failure count, so clock has to be - //forwarded by a day. - clock.jumpADay = false; - } - - public void testNodeHealthBlackListing() throws Exception { - TaskTrackerHealthStatus status = getUnhealthyNodeStatus("ERROR"); - //Blacklist tracker due to node health failures. - sendHeartBeat(status, false); - for (String host : hosts) { - checkReasonForBlackListing(host, nodeUnHealthyReasonSet); - } - status.setNodeHealthy(true); - status.setLastReported(System.currentTimeMillis()); - status.setHealthReport(""); - //white list tracker so the further test cases can be - //using trackers. - sendHeartBeat(status, false); - assertEquals("Trackers still blacklisted after healthy report", 0, - jobTracker.getBlacklistedTrackerCount()); - } - - - /** - * Test case to check if the task tracker node health failure statistics - * is populated correctly. - * - * We check the since start property and assume that other properties would - * be populated in a correct manner. - */ - public void testTaskTrackerNodeHealthFailureStatistics() throws Exception { - //populate previous failure count, as the job tracker is bought up only - //once in setup of test cases to run all node health blacklist stuff. - int failureCount = getFailureCountSinceStart(jobTracker, trackers[0]); - sendHeartBeat(null, false); - for(String tracker: trackers) { - assertEquals("Failure count updated wrongly for tracker : " + tracker, - failureCount, getFailureCountSinceStart(jobTracker, tracker)); - } - - TaskTrackerHealthStatus status = getUnhealthyNodeStatus("ERROR"); - sendHeartBeat(status, false); - //When the node fails due to health check, the statistics is - //incremented. - failureCount++; - for(String tracker: trackers) { - assertEquals("Failure count updated wrongly for tracker : " + tracker, - failureCount, getFailureCountSinceStart(jobTracker, tracker)); - } - //even if the node reports unhealthy in next status update we dont - //increment it. We increment the statistics if the node goes back to - //healthy and then becomes unhealthy. - sendHeartBeat(status, false); - for(String tracker: trackers) { - assertEquals("Failure count updated wrongly for tracker : " + tracker, - failureCount, getFailureCountSinceStart(jobTracker, tracker)); - } - //make nodes all healthy, but the failure statistics should be - //carried forward. - sendHeartBeat(null, false); - for(String tracker: trackers) { - assertEquals("Failure count updated wrongly for tracker : " + tracker, - failureCount, getFailureCountSinceStart(jobTracker, tracker)); - } - } - - private int getFailureCountSinceStart(JobTracker jt, String tracker) { - JobTrackerStatistics jtStats = jt.getStatistics(); - StatisticsCollector collector = jtStats.collector; - collector.update(); - return jtStats.getTaskTrackerStat(tracker).healthCheckFailedStat - .getValues().get(StatisticsCollector.SINCE_START).getValue(); - } - - public void testBlackListingWithFailuresAndHealthStatus() throws Exception { - runBlackListingJob(jobTracker, trackers); - assertEquals("Tracker 1 not blacklisted", 1, - jobTracker.getBlacklistedTrackerCount()); - checkReasonForBlackListing(hosts[0], exceedsFailuresReasonSet); - TaskTrackerHealthStatus status = getUnhealthyNodeStatus("ERROR"); - - sendHeartBeat(status, false); - - assertEquals("All trackers not blacklisted", 3, - jobTracker.getBlacklistedTrackerCount()); - checkReasonForBlackListing(hosts[0], unhealthyAndExceedsFailure); - checkReasonForBlackListing(hosts[1], nodeUnHealthyReasonSet); - checkReasonForBlackListing(hosts[2], nodeUnHealthyReasonSet); - - clock.jumpADay = true; - sendHeartBeat(status, false); - - assertEquals("All trackers not blacklisted", 3, - jobTracker.getBlacklistedTrackerCount()); - - for (String host : hosts) { - checkReasonForBlackListing(host, nodeUnHealthyReasonSet); - } - //clear blacklisted trackers due to node health reasons. - sendHeartBeat(null, false); - - assertEquals("All trackers not white listed", 0, - jobTracker.getBlacklistedTrackerCount()); - //Clear the blacklisted trackers due to failures. - clock.jumpADay = false; - } - - public void testBlacklistingReasonString() throws Exception { - String error = "ERROR"; - String error1 = "ERROR1"; - TaskTrackerHealthStatus status = getUnhealthyNodeStatus(error); - sendHeartBeat(status, false); - - assertEquals("All trackers not blacklisted", 3, - jobTracker.getBlacklistedTrackerCount()); - - checkReasonForBlackListing(hosts[0], nodeUnHealthyReasonSet); - checkReasonForBlackListing(hosts[1], nodeUnHealthyReasonSet); - checkReasonForBlackListing(hosts[2], nodeUnHealthyReasonSet); - for (int i = 0; i < hosts.length; i++) { - //Replace new line as we are adding new line - //in getFaultReport - assertEquals("Blacklisting reason string not correct for host " + i, - error, - jobTracker.getFaultReport(hosts[i]).replace("\n", "")); - } - status.setNodeHealthy(false); - status.setLastReported(System.currentTimeMillis()); - status.setHealthReport(error1); - sendHeartBeat(status, false); - checkReasonForBlackListing(hosts[0], nodeUnHealthyReasonSet); - checkReasonForBlackListing(hosts[1], nodeUnHealthyReasonSet); - checkReasonForBlackListing(hosts[2], nodeUnHealthyReasonSet); - for (int i = 0; i < hosts.length; i++) { - //Replace new line as we are adding new line - //in getFaultReport - assertEquals("Blacklisting reason string not correct for host " + i, - error1, - jobTracker.getFaultReport(hosts[i]).replace("\n", "")); - } - //clear the blacklisted trackers with node health reasons. - sendHeartBeat(null, false); - } - - private TaskTrackerHealthStatus getUnhealthyNodeStatus(String error) { - TaskTrackerHealthStatus status = new TaskTrackerHealthStatus(); - status.setNodeHealthy(false); - status.setLastReported(System.currentTimeMillis()); - status.setHealthReport(error); - return status; - } - - public void testBlackListingWithTrackerReservation() throws Exception { - JobConf conf = new JobConf(); - conf.setNumMapTasks(1); - conf.setNumReduceTasks(1); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - TaskTracker tt1 = jobTracker.getTaskTracker(trackers[0]); - TaskTracker tt2 = jobTracker.getTaskTracker(trackers[1]); - tt1.reserveSlots(TaskType.MAP, job, 1); - tt1.reserveSlots(TaskType.REDUCE, job, 1); - tt2.reserveSlots(TaskType.MAP, job, 1); - tt2.reserveSlots(TaskType.REDUCE, job, 1); - assertEquals("Tracker 1 not reserved for the job 1", 2, job - .getNumReservedTaskTrackersForMaps()); - assertEquals("Tracker 1 not reserved for the job 1", 2, job - .getNumReservedTaskTrackersForReduces()); - runBlackListingJob(jobTracker, trackers); - assertEquals("Tracker 1 not unreserved for the job 1", 1, job - .getNumReservedTaskTrackersForMaps()); - assertEquals("Tracker 1 not unreserved for the job 1", 1, job - .getNumReservedTaskTrackersForReduces()); - assertEquals("Tracker 1 not blacklisted", 1, jobTracker - .getBlacklistedTrackerCount()); - checkReasonForBlackListing(hosts[0], exceedsFailuresReasonSet); - - TaskTrackerHealthStatus status = getUnhealthyNodeStatus("ERROR"); - sendHeartBeat(status, false); - assertEquals("All trackers not blacklisted", 3, - jobTracker.getBlacklistedTrackerCount()); - - checkReasonForBlackListing(hosts[0], unhealthyAndExceedsFailure); - checkReasonForBlackListing(hosts[1], nodeUnHealthyReasonSet); - checkReasonForBlackListing(hosts[2], nodeUnHealthyReasonSet); - - assertEquals("Tracker 1 not unreserved for the job 1", 0, job - .getNumReservedTaskTrackersForMaps()); - assertEquals("Tracker 1 not unreserved for the job 1", 0, job - .getNumReservedTaskTrackersForReduces()); - //white list all trackers for health reasons and failure counts - clock.jumpADay = true; - sendHeartBeat(null, false); - } - - /** - * Test case to test if the cluster status is populated with the right - * blacklist information, which would be used by the {@link JobClient} to - * display information on the Command Line interface. - * - */ - public void testClusterStatusBlacklistedReason() throws Exception { - String error = "ERROR"; - String errorWithNewLines = "ERROR\nERROR"; - String expectedErrorReport = "ERROR:ERROR"; - // Create an unhealthy tracker health status. - Collection blackListedTrackerInfo = jobTracker - .getBlackListedTrackers(); - - assertTrue("The blacklisted tracker nodes is not empty.", - blackListedTrackerInfo.isEmpty()); - - TaskTrackerHealthStatus status = getUnhealthyNodeStatus(errorWithNewLines); - // make all tracker unhealthy - sendHeartBeat(status, false); - assertEquals("All trackers not blacklisted", 3, jobTracker - .getBlacklistedTrackerCount()); - // Verify the new method .getBlackListedTracker() which is - // used by the ClusterStatus to set the list of blacklisted - // tracker. - blackListedTrackerInfo = jobTracker.getBlackListedTrackers(); - - // Check if all the black listed tracker information is obtained - // in new method. - assertEquals("Blacklist tracker info does not contain all trackers", 3, - blackListedTrackerInfo.size()); - // verify all the trackers are blacklisted for health reasons. - // Also check the health report. - for (BlackListInfo bi : blackListedTrackerInfo) { - assertEquals("Tracker not blacklisted for health reason", - ReasonForBlackListing.NODE_UNHEALTHY.toString().trim(), bi - .getReasonForBlackListing().trim()); - assertTrue("Tracker blacklist report does not match", - bi.toString().endsWith(expectedErrorReport)); - } - // reset the tracker health status back to normal. - sendHeartBeat(null, false); - runBlackListingJob(jobTracker, trackers); - sendHeartBeat(status, false); - blackListedTrackerInfo = jobTracker.getBlackListedTrackers(); - for (BlackListInfo bi : blackListedTrackerInfo) { - if (bi.getTrackerName().equals(trackers[0])) { - assertTrue( - "Reason for blacklisting of tracker 1 does not contain Unhealthy reasons", - bi.getReasonForBlackListing().contains( - ReasonForBlackListing.NODE_UNHEALTHY.toString().trim())); - assertTrue( - "Reason for blacklisting of tracker 1 does not contain Unhealthy reasons", - bi.getReasonForBlackListing().contains( - ReasonForBlackListing.EXCEEDING_FAILURES.toString().trim())); - assertTrue("Blacklist failure does not contain failure report string", - bi.getBlackListReport().contains("failures on the tracker")); - } else { - assertEquals("Tracker not blacklisted for health reason", - ReasonForBlackListing.NODE_UNHEALTHY.toString().trim(), bi - .getReasonForBlackListing().trim()); - } - assertTrue("Tracker blacklist report does not match", bi - .getBlackListReport().trim().contains(error)); - } - clock.jumpADay = true; - sendHeartBeat(null, false); - } - - /** - * Runs a job which blacklists the first of the tracker - * which is passed to the method. - * - * @param jobTracker JobTracker instance - * @param trackers array of trackers, the method would blacklist - * first element of the array - * @return A job in progress object. - * @throws Exception - */ - static FakeJobInProgress runBlackListingJob(JobTracker jobTracker, - String[] trackers) throws Exception { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[3]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(false); - conf.setNumMapTasks(0); - conf.setNumReduceTasks(5); - conf.set(JobContext.REDUCE_FAILURES_MAXPERCENT, ".70"); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - conf.setMaxTaskFailuresPerTracker(1); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.setClusterSize(trackers.length); - job.initTasks(); - - taskAttemptID[0] = job.findReduceTask(trackers[0]); - taskAttemptID[1] = job.findReduceTask(trackers[1]); - taskAttemptID[2] = job.findReduceTask(trackers[2]); - job.finishTask(taskAttemptID[1]); - job.finishTask(taskAttemptID[2]); - job.failTask(taskAttemptID[0]); - - taskAttemptID[0] = job.findReduceTask(trackers[0]); - job.failTask(taskAttemptID[0]); - - taskAttemptID[0] = job.findReduceTask(trackers[1]); - job.finishTask(taskAttemptID[0]); - taskAttemptID[0] = job.findReduceTask(trackers[1]); - taskAttemptID[1] = job.findReduceTask(trackers[2]); - job.finishTask(taskAttemptID[0]); - job.finishTask(taskAttemptID[1]); - - jobTracker.finalizeJob(job); - return job; - } - - private void checkReasonForBlackListing(String host, - Set reasonsForBlackListing) { - Set rfbs = jobTracker.getReasonForBlackList(host); - assertEquals("Reasons for blacklisting of " + host + " does not match", - reasonsForBlackListing, rfbs); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java deleted file mode 100644 index 4aea9055314..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerInstrumentation.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.junit.Test; -import static org.junit.Assert.*; - -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; - -public class TestTaskTrackerInstrumentation { - private static final Log LOG = LogFactory.getLog( - TestTaskTrackerInstrumentation.class); - - @Test - public void testCreateInstrumentationWithSingleClass() { - // Check that if only a single instrumentation class is given, - // that class is used directly - JobConf conf = new JobConf(); - conf.set(TTConfig.TT_INSTRUMENTATION, - DummyTaskTrackerInstrumentation.class.getName()); - TaskTracker tracker = new TaskTracker(); - TaskTrackerInstrumentation inst = - TaskTracker.createInstrumentation(tracker, conf); - assertEquals(DummyTaskTrackerInstrumentation.class.getName(), - inst.getClass().getName()); - } - - @Test - public void testCreateInstrumentationWithMultipleClasses() { - // Set up configuration to create two dummy instrumentation objects - JobConf conf = new JobConf(); - String dummyClass = DummyTaskTrackerInstrumentation.class.getName(); - String classList = dummyClass + "," + dummyClass; - conf.set(TTConfig.TT_INSTRUMENTATION, classList); - TaskTracker tracker = new TaskTracker(); - - // Check that a composite instrumentation object is created - TaskTrackerInstrumentation inst = - TaskTracker.createInstrumentation(tracker, conf); - assertEquals(CompositeTaskTrackerInstrumentation.class.getName(), - inst.getClass().getName()); - - // Check that each member of the composite is a dummy instrumentation - CompositeTaskTrackerInstrumentation comp = - (CompositeTaskTrackerInstrumentation) inst; - List insts = comp.getInstrumentations(); - assertEquals(2, insts.size()); - assertEquals(DummyTaskTrackerInstrumentation.class.getName(), - insts.get(0).getClass().getName()); - assertEquals(DummyTaskTrackerInstrumentation.class.getName(), - insts.get(1).getClass().getName()); - } - - @Test - public void testCreateInstrumentationWithDefaultClass() { - // Check that if no instrumentation class is given, the default - // class (TaskTrackerMetricsInst) is used. - JobConf conf = new JobConf(); - TaskTracker tracker = new TaskTracker(); - tracker.setConf(conf); // Needed to avoid NullPointerExcepton in - // TaskTrackerMetricsInst constructor - TaskTrackerInstrumentation inst = - TaskTracker.createInstrumentation(tracker, conf); - assertEquals(TaskTrackerMetricsInst.class.getName(), - inst.getClass().getName()); - } - - @Test - public void testCreateInstrumentationWithEmptyParam() { - // Check that if an empty string is given, the default instrumentation - // class (TaskTrackerMetricsInst) is used. An error message should also - // be written to the log, but we do not capture that. - JobConf conf = new JobConf(); - conf.set(TTConfig.TT_INSTRUMENTATION, ""); - TaskTracker tracker = new TaskTracker(); - tracker.setConf(conf); // Needed to avoid NullPointerExcepton in - // TaskTrackerMetricsInst constructor - TaskTrackerInstrumentation inst = - TaskTracker.createInstrumentation(tracker, conf); - assertEquals(TaskTrackerMetricsInst.class.getName(), - inst.getClass().getName()); - } - - @Test - public void testCreateInstrumentationWithInvalidParam() { - // Check that if an invalid class list is given, the default - // instrumentation class (TaskTrackerMetricsInst) is used. An error - // should also be written to the log, but we do not capture that. - JobConf conf = new JobConf(); - conf.set(TTConfig.TT_INSTRUMENTATION, "XYZ,ZZY"); - TaskTracker tracker = new TaskTracker(); - tracker.setConf(conf); // Needed to avoid NullPointerExcepton in - // TaskTrackerMetricsInst constructor - TaskTrackerInstrumentation inst = - TaskTracker.createInstrumentation(tracker, conf); - assertEquals(TaskTrackerMetricsInst.class.getName(), - inst.getClass().getName()); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java deleted file mode 100644 index d1a2d054f64..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java +++ /dev/null @@ -1,1075 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintWriter; -import java.util.LinkedHashMap; -import java.util.TreeMap; -import java.util.jar.JarOutputStream; -import java.util.zip.ZipEntry; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.LocalDirAllocator; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.security.TokenCache; -import org.apache.hadoop.mapreduce.server.tasktracker.Localizer; -import org.apache.hadoop.mapreduce.util.MRAsyncDiskService; - -import static org.apache.hadoop.mapred.QueueManager.toFullPropertyName; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.Shell; -import org.apache.hadoop.mapred.JvmManager.JvmEnv; -import org.apache.hadoop.mapred.TaskController.JobInitializationContext; -import org.apache.hadoop.mapred.TaskController.TaskControllerContext; -import org.apache.hadoop.mapred.TaskTracker.RunningJob; -import org.apache.hadoop.mapred.TaskTracker.TaskInProgress; -import org.apache.hadoop.mapred.UtilsForTests.InlineCleanupQueue; - -import junit.framework.TestCase; - -/** - * Test to verify localization of a job and localization of a task on a - * TaskTracker. - * - */ -public class TestTaskTrackerLocalization extends TestCase { - - private static File TEST_ROOT_DIR = - new File(System.getProperty("test.build.data", "/tmp")); - private File ROOT_MAPRED_LOCAL_DIR; - private File HADOOP_LOG_DIR; - private static File PERMISSION_SCRIPT_DIR; - private static File PERMISSION_SCRIPT_FILE; - private static final String PERMISSION_SCRIPT_CONTENT = "ls -l -d $1 | " + - "awk '{print $1\":\"$3\":\"$4}'"; - - private int numLocalDirs = 6; - private static final Log LOG = - LogFactory.getLog(TestTaskTrackerLocalization.class); - - protected TaskTracker tracker; - protected UserGroupInformation taskTrackerUGI; - protected TaskController taskController; - protected JobConf trackerFConf; - private JobConf localizedJobConf; - protected JobID jobId; - protected TaskAttemptID taskId; - protected Task task; - protected String[] localDirs; - protected static LocalDirAllocator lDirAlloc = - new LocalDirAllocator(MRConfig.LOCAL_DIR); - protected Path attemptWorkDir; - protected File[] attemptLogFiles; - protected JobConf localizedTaskConf; - private TaskInProgress tip; - private JobConf jobConf; - private File jobConfFile; - - /** - * Dummy method in this base class. Only derived classes will define this - * method for checking if a test can be run. - */ - protected boolean canRun() { - return true; - } - - @Override - protected void setUp() - throws Exception { - if (!canRun()) { - return; - } - TEST_ROOT_DIR = - new File(System.getProperty("test.build.data", "/tmp"), getClass() - .getSimpleName()); - if (!TEST_ROOT_DIR.exists()) { - TEST_ROOT_DIR.mkdirs(); - } - - ROOT_MAPRED_LOCAL_DIR = new File(TEST_ROOT_DIR, "mapred/local"); - ROOT_MAPRED_LOCAL_DIR.mkdirs(); - - HADOOP_LOG_DIR = new File(TEST_ROOT_DIR, "logs"); - HADOOP_LOG_DIR.mkdir(); - System.setProperty("hadoop.log.dir", HADOOP_LOG_DIR.getAbsolutePath()); - - trackerFConf = new JobConf(); - - trackerFConf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); - localDirs = new String[numLocalDirs]; - for (int i = 0; i < numLocalDirs; i++) { - localDirs[i] = new File(ROOT_MAPRED_LOCAL_DIR, "0_" + i).getPath(); - } - trackerFConf.setStrings(MRConfig.LOCAL_DIR, localDirs); - trackerFConf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - - // Create the job configuration file. Same as trackerConf in this test. - jobConf = new JobConf(trackerFConf); - // Set job view ACLs in conf sothat validation of contents of jobACLsFile - // can be done against this value. Have both users and groups - String jobViewACLs = "user1,user2, group1,group2"; - jobConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, jobViewACLs); - - jobConf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 0); - jobConf.setUser(getJobOwner().getShortUserName()); - - String queue = "default"; - // set job queue name in job conf - jobConf.setQueueName(queue); - // Set queue admins acl in job conf similar to what JobClient does so that - // it goes into job conf also. - jobConf.set(toFullPropertyName(queue, - QueueACL.ADMINISTER_JOBS.getAclName()), - "qAdmin1,qAdmin2 qAdminsGroup1,qAdminsGroup2"); - - Job job = Job.getInstance(jobConf); - String jtIdentifier = "200907202331"; - jobId = new JobID(jtIdentifier, 1); - - // JobClient uploads the job jar to the file system and sets it in the - // jobConf. - uploadJobJar(job); - - // JobClient uploads the jobConf to the file system. - jobConfFile = uploadJobConf(job.getConfiguration()); - - // create jobTokens file - uploadJobTokensFile(); - - taskTrackerUGI = UserGroupInformation.getCurrentUser(); - startTracker(); - - // Set up the task to be localized - taskId = - new TaskAttemptID(jtIdentifier, jobId.getId(), TaskType.MAP, 1, 0); - createTask(); - - // mimic register task - // create the tip - tip = tracker.new TaskInProgress(task, trackerFConf); - } - - private void startTracker() throws IOException { - // Set up the TaskTracker - tracker = new TaskTracker(); - tracker.setConf(trackerFConf); - tracker.setTaskLogCleanupThread(new UserLogCleaner(trackerFConf)); - initializeTracker(); - } - - private void initializeTracker() throws IOException { - tracker.setIndexCache(new IndexCache(trackerFConf)); - tracker.setTaskMemoryManagerEnabledFlag(); - - // for test case system FS is the local FS - tracker.systemFS = FileSystem.getLocal(trackerFConf); - tracker.setLocalFileSystem(tracker.systemFS); - tracker.systemDirectory = new Path(TEST_ROOT_DIR.getAbsolutePath()); - - tracker.runningTasks = new LinkedHashMap(); - tracker.runningJobs = new TreeMap(); - tracker.setAsyncDiskService(new MRAsyncDiskService(trackerFConf)); - tracker.getAsyncDiskService().cleanupAllVolumes(); - - // Set up TaskTracker instrumentation - tracker.setTaskTrackerInstrumentation( - TaskTracker.createInstrumentation(tracker, trackerFConf)); - - // setup task controller - taskController = createTaskController(); - taskController.setConf(trackerFConf); - taskController.setup(); - tracker.setTaskController(taskController); - tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(), localDirs, - taskController)); - } - - protected TaskController createTaskController() { - return new DefaultTaskController(); - } - - private void createTask() - throws IOException { - task = new MapTask(jobConfFile.toURI().toString(), taskId, 1, null, 1); - task.setConf(jobConf); // Set conf. Set user name in particular. - task.setUser(jobConf.getUser()); - } - - protected UserGroupInformation getJobOwner() throws IOException { - return UserGroupInformation.getCurrentUser(); - } - - /** - * static block setting up the permission script which would be used by the - * checkFilePermissions - */ - static { - PERMISSION_SCRIPT_DIR = new File(TEST_ROOT_DIR, "permission_script_dir"); - PERMISSION_SCRIPT_FILE = new File(PERMISSION_SCRIPT_DIR, "getperms.sh"); - - if(PERMISSION_SCRIPT_FILE.exists()) { - PERMISSION_SCRIPT_FILE.delete(); - } - - if(PERMISSION_SCRIPT_DIR.exists()) { - PERMISSION_SCRIPT_DIR.delete(); - } - - PERMISSION_SCRIPT_DIR.mkdir(); - - try { - PrintWriter writer = new PrintWriter(PERMISSION_SCRIPT_FILE); - writer.write(PERMISSION_SCRIPT_CONTENT); - writer.close(); - } catch (FileNotFoundException fe) { - fail(); - } - PERMISSION_SCRIPT_FILE.setExecutable(true, true); - } - - /** - * @param job - * @throws IOException - * @throws FileNotFoundException - */ - private void uploadJobJar(Job job) - throws IOException, - FileNotFoundException { - File jobJarFile = new File(TEST_ROOT_DIR, "jobjar-on-dfs.jar"); - JarOutputStream jstream = - new JarOutputStream(new FileOutputStream(jobJarFile)); - ZipEntry ze = new ZipEntry("lib/lib1.jar"); - jstream.putNextEntry(ze); - jstream.closeEntry(); - ze = new ZipEntry("lib/lib2.jar"); - jstream.putNextEntry(ze); - jstream.closeEntry(); - jstream.finish(); - jstream.close(); - job.setJar(jobJarFile.toURI().toString()); - } - - /** - * @param conf - * @return - * @throws FileNotFoundException - * @throws IOException - */ - protected File uploadJobConf(Configuration conf) - throws FileNotFoundException, - IOException { - File jobConfFile = new File(TEST_ROOT_DIR, "jobconf-on-dfs.xml"); - FileOutputStream out = new FileOutputStream(jobConfFile); - conf.writeXml(out); - out.close(); - return jobConfFile; - } - - /** - * create fake JobTokens file - * @return - * @throws IOException - */ - protected void uploadJobTokensFile() throws IOException { - - File dir = new File(TEST_ROOT_DIR, jobId.toString()); - if(!dir.exists()) - assertTrue("faild to create dir="+dir.getAbsolutePath(), dir.mkdirs()); - // writing empty file, we don't need the keys for this test - new Credentials().writeTokenStorageFile(new Path("file:///" + dir, - TokenCache.JOB_TOKEN_HDFS_FILE), new Configuration()); - } - - @Override - protected void tearDown() - throws Exception { - if (!canRun()) { - return; - } - FileUtil.fullyDelete(TEST_ROOT_DIR); - } - - protected static String[] getFilePermissionAttrs(String path) - throws IOException { - String[] command = {"bash",PERMISSION_SCRIPT_FILE.getAbsolutePath(), path}; - String output=Shell.execCommand(command); - return output.split(":|\n"); - } - - - /** - * Utility method to check permission of a given path. Requires the permission - * script directory to be setup in order to call. - * - * - * @param path - * @param expectedPermissions - * @param expectedOwnerUser - * @param expectedOwnerGroup - * @throws IOException - */ - static void checkFilePermissions(String path, String expectedPermissions, - String expectedOwnerUser, String expectedOwnerGroup) - throws IOException { - String[] attrs = getFilePermissionAttrs(path); - assertTrue("File attrs length is not 3 but " + attrs.length, - attrs.length == 3); - assertTrue("Path " + path + " has the permissions " + attrs[0] - + " instead of the expected " + expectedPermissions, attrs[0] - .equals(expectedPermissions)); - assertTrue("Path " + path + " is user owned not by " + expectedOwnerUser - + " but by " + attrs[1], attrs[1].equals(expectedOwnerUser)); - assertTrue("Path " + path + " is group owned not by " + expectedOwnerGroup - + " but by " + attrs[2], attrs[2].equals(expectedOwnerGroup)); - } - - /** - * Verify the task-controller's setup functionality - * - * @throws IOException - */ - public void testTaskControllerSetup() - throws IOException { - if (!canRun()) { - return; - } - // Task-controller is already set up in the test's setup method. Now verify. - for (String localDir : localDirs) { - - // Verify the local-dir itself. - File lDir = new File(localDir); - assertTrue("localDir " + lDir + " doesn't exists!", lDir.exists()); - checkFilePermissions(lDir.getAbsolutePath(), "drwxr-xr-x", task - .getUser(), taskTrackerUGI.getGroupNames()[0]); - } - - // Verify the pemissions on the userlogs dir - File taskLog = TaskLog.getUserLogDir(); - checkFilePermissions(taskLog.getAbsolutePath(), "drwxr-xr-x", task - .getUser(), taskTrackerUGI.getGroupNames()[0]); - } - - /** - * Test the localization of a user on the TT. - * - * @throws IOException - */ - public void testUserLocalization() - throws IOException { - if (!canRun()) { - return; - } - // /////////// The main method being tested - tracker.getLocalizer().initializeUserDirs(task.getUser()); - // /////////// - - // Check the directory structure and permissions - checkUserLocalization(); - - // For the sake of testing re-entrancy of initializeUserDirs(), we remove - // the user directories now and make sure that further calls of the method - // don't create directories any more. - for (String dir : localDirs) { - File userDir = new File(dir, TaskTracker.getUserDir(task.getUser())); - if (!FileUtil.fullyDelete(userDir)) { - throw new IOException("Uanble to delete " + userDir); - } - } - - // Now call the method again. - tracker.getLocalizer().initializeUserDirs(task.getUser()); - - // Files should not be created now and so shouldn't be there anymore. - for (String dir : localDirs) { - File userDir = new File(dir, TaskTracker.getUserDir(task.getUser())); - assertFalse("Unexpectedly, user-dir " + userDir.getAbsolutePath() - + " exists!", userDir.exists()); - } - } - - protected void checkUserLocalization() - throws IOException { - for (String dir : localDirs) { - - File localDir = new File(dir); - assertTrue(MRConfig.LOCAL_DIR + localDir + " isn'task created!", - localDir.exists()); - - File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR); - assertTrue("taskTracker sub-dir in the local-dir " + localDir - + "is not created!", taskTrackerSubDir.exists()); - - File userDir = new File(taskTrackerSubDir, task.getUser()); - assertTrue("user-dir in taskTrackerSubdir " + taskTrackerSubDir - + "is not created!", userDir.exists()); - checkFilePermissions(userDir.getAbsolutePath(), "drwx------", task - .getUser(), taskTrackerUGI.getGroupNames()[0]); - - File jobCache = new File(userDir, TaskTracker.JOBCACHE); - assertTrue("jobcache in the userDir " + userDir + " isn't created!", - jobCache.exists()); - checkFilePermissions(jobCache.getAbsolutePath(), "drwx------", task - .getUser(), taskTrackerUGI.getGroupNames()[0]); - - // Verify the distributed cache dir. - File distributedCacheDir = - new File(localDir, TaskTracker - .getPrivateDistributedCacheDir(task.getUser())); - assertTrue("distributed cache dir " + distributedCacheDir - + " doesn't exists!", distributedCacheDir.exists()); - checkFilePermissions(distributedCacheDir.getAbsolutePath(), - "drwx------", task.getUser(), taskTrackerUGI.getGroupNames()[0]); - } - } - - /** - * Test job localization on a TT. Tests localization of job.xml, job.jar and - * corresponding setting of configuration. Also test - * {@link TaskController#initializeJob(JobInitializationContext)} - * - * @throws IOException - */ - public void testJobLocalization() - throws Exception { - if (!canRun()) { - return; - } - TaskTracker.RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - - checkJobLocalization(); - } - - /** - * Test that, if the job log dir can't be created, the job will fail - * during localization rather than at the time when the task itself - * tries to write into it. - */ - public void testJobLocalizationFailsIfLogDirUnwritable() - throws Exception { - if (!canRun()) { - return; - } - - File logDir = TaskLog.getJobDir(jobId); - File logDirParent = logDir.getParentFile(); - - try { - assertTrue(logDirParent.mkdirs() || logDirParent.isDirectory()); - FileUtil.fullyDelete(logDir); - FileUtil.chmod(logDirParent.getAbsolutePath(), "000"); - - tracker.localizeJob(tip); - fail("No exception"); - } catch (IOException ioe) { - LOG.info("Got exception", ioe); - assertTrue(ioe.getMessage().contains("Could not create job user log")); - } finally { - // Put it back just to be safe - FileUtil.chmod(logDirParent.getAbsolutePath(), "755"); - } - } - - protected void checkJobLocalization() - throws IOException { - // Check the directory structure - for (String dir : localDirs) { - - File localDir = new File(dir); - File taskTrackerSubDir = new File(localDir, TaskTracker.SUBDIR); - File userDir = new File(taskTrackerSubDir, task.getUser()); - File jobCache = new File(userDir, TaskTracker.JOBCACHE); - - File jobDir = new File(jobCache, jobId.toString()); - assertTrue("job-dir in " + jobCache + " isn't created!", jobDir.exists()); - - // check the private permissions on the job directory - checkFilePermissions(jobDir.getAbsolutePath(), "drwx------", task - .getUser(), taskTrackerUGI.getGroupNames()[0]); - } - - // check the localization of job.xml - assertTrue("job.xml is not localized on this TaskTracker!!", lDirAlloc - .getLocalPathToRead(TaskTracker.getLocalJobConfFile(task.getUser(), - jobId.toString()), trackerFConf) != null); - - // check the localization of job.jar - Path jarFileLocalized = - lDirAlloc.getLocalPathToRead(TaskTracker.getJobJarFile(task.getUser(), - jobId.toString()), trackerFConf); - assertTrue("job.jar is not localized on this TaskTracker!!", - jarFileLocalized != null); - assertTrue("lib/lib1.jar is not unjarred on this TaskTracker!!", new File( - jarFileLocalized.getParent() + Path.SEPARATOR + "lib/lib1.jar") - .exists()); - assertTrue("lib/lib2.jar is not unjarred on this TaskTracker!!", new File( - jarFileLocalized.getParent() + Path.SEPARATOR + "lib/lib2.jar") - .exists()); - - // check the creation of job work directory - assertTrue("job-work dir is not created on this TaskTracker!!", lDirAlloc - .getLocalPathToRead(TaskTracker.getJobWorkDir(task.getUser(), jobId - .toString()), trackerFConf) != null); - - // Check the setting of mapreduce.job.local.dir and job.jar which will eventually be - // used by the user's task - boolean jobLocalDirFlag = false, mapredJarFlag = false; - String localizedJobLocalDir = - localizedJobConf.get(TaskTracker.JOB_LOCAL_DIR); - String localizedJobJar = localizedJobConf.getJar(); - for (String localDir : localizedJobConf.getStrings(MRConfig.LOCAL_DIR)) { - if (localizedJobLocalDir.equals(localDir + Path.SEPARATOR - + TaskTracker.getJobWorkDir(task.getUser(), jobId.toString()))) { - jobLocalDirFlag = true; - } - if (localizedJobJar.equals(localDir + Path.SEPARATOR - + TaskTracker.getJobJarFile(task.getUser(), jobId.toString()))) { - mapredJarFlag = true; - } - } - assertTrue(TaskTracker.JOB_LOCAL_DIR - + " is not set properly to the target users directory : " - + localizedJobLocalDir, jobLocalDirFlag); - assertTrue( - "mapreduce.job.jar is not set properly to the target users directory : " - + localizedJobJar, mapredJarFlag); - - // check job user-log directory permissions - File jobLogDir = TaskLog.getJobDir(jobId); - assertTrue("job log directory " + jobLogDir + " does not exist!", jobLogDir - .exists()); - checkFilePermissions(jobLogDir.toString(), "drwx------", task.getUser(), - taskTrackerUGI.getGroupNames()[0]); - - // Make sure that the job ACLs file job-acls.xml exists in job userlog dir - File jobACLsFile = new File(jobLogDir, TaskTracker.jobACLsFile); - assertTrue("JobACLsFile is missing in the job userlog dir " + jobLogDir, - jobACLsFile.exists()); - - // With default task controller, the job-acls.xml file is owned by TT and - // permissions are 700 - checkFilePermissions(jobACLsFile.getAbsolutePath(), "-rw-------", - taskTrackerUGI.getShortUserName(), taskTrackerUGI.getGroupNames()[0]); - - validateJobACLsFileContent(); - } - - // Validate the contents of jobACLsFile ( i.e. user name, job-view-acl, queue - // name and queue-admins-acl ). - protected void validateJobACLsFileContent() { - JobConf jobACLsConf = TaskLogServlet.getConfFromJobACLsFile(jobId); - - assertTrue(jobACLsConf.get("user.name").equals( - localizedJobConf.getUser())); - assertTrue(jobACLsConf.get(MRJobConfig.JOB_ACL_VIEW_JOB). - equals(localizedJobConf.get(MRJobConfig.JOB_ACL_VIEW_JOB))); - - String queue = localizedJobConf.getQueueName(); - assertTrue(queue.equalsIgnoreCase(jobACLsConf.getQueueName())); - - String qACLName = toFullPropertyName(queue, - QueueACL.ADMINISTER_JOBS.getAclName()); - assertTrue(jobACLsConf.get(qACLName).equals( - localizedJobConf.get(qACLName))); - } - - /** - * Test task localization on a TT. - * - * @throws IOException - */ - public void testTaskLocalization() - throws Exception { - if (!canRun()) { - return; - } - TaskTracker.RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - initializeTask(); - - checkTaskLocalization(); - } - - private void initializeTask() throws IOException { - tip.setJobConf(localizedJobConf); - - // ////////// The central method being tested - tip.localizeTask(task); - // ////////// - - // check the functionality of localizeTask - for (String dir : trackerFConf.getStrings(MRConfig.LOCAL_DIR)) { - File attemptDir = - new File(dir, TaskTracker.getLocalTaskDir(task.getUser(), jobId - .toString(), taskId.toString(), task.isTaskCleanupTask())); - assertTrue("attempt-dir " + attemptDir + " in localDir " + dir - + " is not created!!", attemptDir.exists()); - } - - attemptWorkDir = - lDirAlloc.getLocalPathToRead(TaskTracker.getTaskWorkDir( - task.getUser(), task.getJobID().toString(), task.getTaskID() - .toString(), task.isTaskCleanupTask()), trackerFConf); - assertTrue("atttempt work dir for " + taskId.toString() - + " is not created in any of the configured dirs!!", - attemptWorkDir != null); - - TaskRunner runner = new MapTaskRunner(tip, tracker, tip.getJobConf()); - tip.setTaskRunner(runner); - - // /////// Few more methods being tested - runner.setupChildTaskConfiguration(lDirAlloc); - TaskRunner.createChildTmpDir(new File(attemptWorkDir.toUri().getPath()), - localizedJobConf); - attemptLogFiles = runner.prepareLogFiles(task.getTaskID(), - task.isTaskCleanupTask()); - - // Make sure the task-conf file is created - Path localTaskFile = - lDirAlloc.getLocalPathToRead(TaskTracker.getTaskConfFile(task - .getUser(), task.getJobID().toString(), task.getTaskID() - .toString(), task.isTaskCleanupTask()), trackerFConf); - assertTrue("Task conf file " + localTaskFile.toString() - + " is not created!!", new File(localTaskFile.toUri().getPath()) - .exists()); - - // /////// One more method being tested. This happens in child space. - localizedTaskConf = new JobConf(localTaskFile); - TaskRunner.setupChildMapredLocalDirs(task, localizedTaskConf); - // /////// - - // Initialize task via TaskController - TaskControllerContext taskContext = - new TaskController.TaskControllerContext(); - taskContext.env = - new JvmEnv(null, null, null, null, -1, new File(localizedJobConf - .get(TaskTracker.JOB_LOCAL_DIR)), null, localizedJobConf); - taskContext.task = task; - // /////////// The method being tested - taskController.initializeTask(taskContext); - // /////////// - } - - protected void checkTaskLocalization() - throws IOException { - // Make sure that the mapreduce.cluster.local.dir is sandboxed - for (String childMapredLocalDir : localizedTaskConf - .getStrings(MRConfig.LOCAL_DIR)) { - assertTrue("Local dir " + childMapredLocalDir + " is not sandboxed !!", - childMapredLocalDir.endsWith(TaskTracker.getLocalTaskDir(task - .getUser(), jobId.toString(), taskId.toString(), - task.isTaskCleanupTask()))); - } - - // Make sure task task.getJobFile is changed and pointed correctly. - assertTrue(task.getJobFile().endsWith( - TaskTracker.getTaskConfFile(task.getUser(), jobId.toString(), taskId - .toString(), task.isTaskCleanupTask()))); - - // Make sure that the tmp directories are created - assertTrue("tmp dir is not created in workDir " - + attemptWorkDir.toUri().getPath(), new File(attemptWorkDir.toUri() - .getPath(), "tmp").exists()); - - // Make sure that the logs are setup properly - File logDir = TaskLog.getAttemptDir(taskId, task.isTaskCleanupTask()); - assertTrue("task's log dir " + logDir.toString() + " doesn't exist!", - logDir.exists()); - checkFilePermissions(logDir.getAbsolutePath(), "drwx------", task - .getUser(), taskTrackerUGI.getGroupNames()[0]); - - File expectedStdout = new File(logDir, TaskLog.LogName.STDOUT.toString()); - assertTrue("stdout log file is improper. Expected : " - + expectedStdout.toString() + " Observed : " - + attemptLogFiles[0].toString(), expectedStdout.toString().equals( - attemptLogFiles[0].toString())); - File expectedStderr = - new File(logDir, Path.SEPARATOR + TaskLog.LogName.STDERR.toString()); - assertTrue("stderr log file is improper. Expected : " - + expectedStderr.toString() + " Observed : " - + attemptLogFiles[1].toString(), expectedStderr.toString().equals( - attemptLogFiles[1].toString())); - } - - /** - * Create a file in the given dir and set permissions r_xr_xr_x sothat no one - * can delete it directly(without doing chmod). - * Creates dir/subDir and dir/subDir/file - */ - static void createFileAndSetPermissions(JobConf jobConf, Path dir) - throws IOException { - Path subDir = new Path(dir, "subDir"); - FileSystem fs = FileSystem.getLocal(jobConf); - fs.mkdirs(subDir); - Path p = new Path(subDir, "file"); - java.io.DataOutputStream out = fs.create(p); - out.writeBytes("dummy input"); - out.close(); - // no write permission for subDir and subDir/file - try { - int ret = 0; - if((ret = FileUtil.chmod(subDir.toUri().getPath(), "a=rx", true)) != 0) { - LOG.warn("chmod failed for " + subDir + ";retVal=" + ret); - } - } catch(InterruptedException e) { - LOG.warn("Interrupted while doing chmod for " + subDir); - } - } - - /** - * Validates the removal of $taskid and $tasid/work under mapred-local-dir - * in cases where those directories cannot be deleted without adding - * write permission to the newly created directories under $taskid and - * $taskid/work - * Also see createFileAndSetPermissions for details - */ - void validateRemoveTaskFiles(boolean needCleanup, boolean jvmReuse, - TaskInProgress tip) throws IOException { - // create files and set permissions 555. Verify if task controller sets - // the permissions for TT to delete the taskDir or workDir - String dir = (!needCleanup || jvmReuse) ? - TaskTracker.getTaskWorkDir(task.getUser(), task.getJobID().toString(), - taskId.toString(), task.isTaskCleanupTask()) - : TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID().toString(), - taskId.toString(), task.isTaskCleanupTask()); - - Path[] paths = tracker.getLocalFiles(localizedJobConf, dir); - assertTrue("No paths found", paths.length > 0); - for (Path p : paths) { - if (tracker.getLocalFileSystem().exists(p)) { - createFileAndSetPermissions(localizedJobConf, p); - } - } - - InlineCleanupQueue cleanupQueue = new InlineCleanupQueue(); - tracker.setCleanupThread(cleanupQueue); - - tip.removeTaskFiles(needCleanup, taskId); - - if (jvmReuse) { - // work dir should still exist and cleanup queue should be empty - assertTrue("cleanup queue is not empty after removeTaskFiles() in case " - + "of jvm reuse.", cleanupQueue.isQueueEmpty()); - boolean workDirExists = false; - for (Path p : paths) { - if (tracker.getLocalFileSystem().exists(p)) { - workDirExists = true; - } - } - assertTrue("work dir does not exist in case of jvm reuse", workDirExists); - - // now try to delete the work dir and verify that there are no stale paths - JvmManager.deleteWorkDir(tracker, task); - } - - assertTrue("Some task files are not deleted!! Number of stale paths is " - + cleanupQueue.stalePaths.size(), cleanupQueue.stalePaths.size() == 0); - } - - /** - * Validates if task cleanup is done properly for a succeeded task - * @throws IOException - */ - public void testTaskFilesRemoval() - throws Exception { - if (!canRun()) { - return; - } - testTaskFilesRemoval(false, false);// no needCleanup; no jvmReuse - } - - /** - * Validates if task cleanup is done properly for a task that is not succeeded - * @throws IOException - */ - public void testFailedTaskFilesRemoval() - throws Exception { - if (!canRun()) { - return; - } - testTaskFilesRemoval(true, false);// needCleanup; no jvmReuse - - // initialize a cleanupAttempt for the task. - task.setTaskCleanupTask(); - // localize task cleanup attempt - initializeTask(); - checkTaskLocalization(); - - // verify the cleanup of cleanup attempt. - testTaskFilesRemoval(true, false);// needCleanup; no jvmReuse - } - - /** - * Validates if task cleanup is done properly for a succeeded task - * @throws IOException - */ - public void testTaskFilesRemovalWithJvmUse() - throws Exception { - if (!canRun()) { - return; - } - testTaskFilesRemoval(false, true);// no needCleanup; jvmReuse - } - - /** - * Validates if task cleanup is done properly - */ - private void testTaskFilesRemoval(boolean needCleanup, boolean jvmReuse) - throws Exception { - // Localize job and localize task. - TaskTracker.RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - if (jvmReuse) { - localizedJobConf.setNumTasksToExecutePerJvm(2); - } - initializeTask(); - - // TODO: Let the task run and create files. - - // create files and set permissions 555. Verify if task controller sets - // the permissions for TT to delete the task dir or work dir properly - validateRemoveTaskFiles(needCleanup, jvmReuse, tip); - } - - /** - * Test userlogs cleanup. - * - * @throws IOException - */ - private void verifyUserLogsRemoval() - throws IOException { - // verify user logs cleanup - File jobUserLogDir = TaskLog.getJobDir(jobId); - // Logs should be there before cleanup. - assertTrue("Userlogs dir " + jobUserLogDir + " is not present as expected!!", - jobUserLogDir.exists()); - tracker.purgeJob(new KillJobAction(jobId)); - tracker.getTaskLogCleanupThread().processCompletedJobs(); - - // Logs should be gone after cleanup. - assertFalse("Userlogs dir " + jobUserLogDir + " is not deleted as expected!!", - jobUserLogDir.exists()); - } - - /** - * Test job cleanup by doing the following - * - create files with no write permissions to TT under job-work-dir - * - create files with no write permissions to TT under task-work-dir - */ - public void testJobFilesRemoval() throws IOException, InterruptedException { - if (!canRun()) { - return; - } - - LOG.info("Running testJobCleanup()"); - // Localize job and localize task. - TaskTracker.RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - - // Set an inline cleanup queue - InlineCleanupQueue cleanupQueue = new InlineCleanupQueue(); - tracker.setCleanupThread(cleanupQueue); - - // Create a file in job's work-dir with 555 - String jobWorkDir = - TaskTracker.getJobWorkDir(task.getUser(), task.getJobID().toString()); - Path[] jPaths = tracker.getLocalFiles(localizedJobConf, jobWorkDir); - assertTrue("No paths found for job", jPaths.length > 0); - for (Path p : jPaths) { - if (tracker.getLocalFileSystem().exists(p)) { - createFileAndSetPermissions(localizedJobConf, p); - } - } - - // Initialize task dirs - tip.setJobConf(localizedJobConf); - tip.localizeTask(task); - - // Create a file in task local dir with 555 - // this is to simply test the case where the jvm reuse is enabled and some - // files in task-attempt-local-dir are left behind to be cleaned up when the - // job finishes. - String taskLocalDir = - TaskTracker.getLocalTaskDir(task.getUser(), task.getJobID().toString(), - task.getTaskID().toString(), false); - Path[] tPaths = tracker.getLocalFiles(localizedJobConf, taskLocalDir); - assertTrue("No paths found for task", tPaths.length > 0); - for (Path p : tPaths) { - if (tracker.getLocalFileSystem().exists(p)) { - createFileAndSetPermissions(localizedJobConf, p); - } - } - - // remove the job work dir - tracker.removeJobFiles(task.getUser(), task.getJobID()); - - // check the task-local-dir - boolean tLocalDirExists = false; - for (Path p : tPaths) { - if (tracker.getLocalFileSystem().exists(p)) { - tLocalDirExists = true; - } - } - assertFalse("Task " + task.getTaskID() + " local dir exists after cleanup", - tLocalDirExists); - - // Verify that the TaskTracker (via the task-controller) cleans up the dirs. - // check the job-work-dir - boolean jWorkDirExists = false; - for (Path p : jPaths) { - if (tracker.getLocalFileSystem().exists(p)) { - jWorkDirExists = true; - } - } - assertFalse("Job " + task.getJobID() + " work dir exists after cleanup", - jWorkDirExists); - // Test userlogs cleanup. - verifyUserLogsRemoval(); - - // Check that the empty $mapred.local.dir/taskTracker/$user dirs are still - // there. - for (String localDir : localDirs) { - Path userDir = - new Path(localDir, TaskTracker.getUserDir(task.getUser())); - assertTrue("User directory " + userDir + " is not present!!", - tracker.getLocalFileSystem().exists(userDir)); - } - } - - /** - * Tests TaskTracker restart after the localization. - * - * This tests the following steps: - * - * Localize Job, initialize a task. - * Then restart the Tracker. - * launch a cleanup attempt for the task. - * - * @throws IOException - * @throws InterruptedException - */ - public void testTrackerRestart() throws IOException, InterruptedException { - if (!canRun()) { - return; - } - - // Localize job and localize task. - TaskTracker.RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - initializeTask(); - - // imitate tracker restart - startTracker(); - - // create a task cleanup attempt - createTask(); - task.setTaskCleanupTask(); - // register task - tip = tracker.new TaskInProgress(task, trackerFConf); - - // localize the job again. - rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - checkJobLocalization(); - - // localize task cleanup attempt - initializeTask(); - checkTaskLocalization(); - } - - /** - * Tests TaskTracker re-init after the localization. - * - * This tests the following steps: - * - * Localize Job, initialize a task. - * Then reinit the Tracker. - * launch a cleanup attempt for the task. - * - * @throws IOException - * @throws InterruptedException - */ - public void testTrackerReinit() throws IOException, InterruptedException { - if (!canRun()) { - return; - } - - // Localize job and localize task. - TaskTracker.RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - initializeTask(); - - // imitate tracker reinit - initializeTracker(); - - // create a task cleanup attempt - createTask(); - task.setTaskCleanupTask(); - // register task - tip = tracker.new TaskInProgress(task, trackerFConf); - - // localize the job again. - rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - checkJobLocalization(); - - // localize task cleanup attempt - initializeTask(); - checkTaskLocalization(); - } - - /** - * Localizes a cleanup task and validates permissions. - * - * @throws InterruptedException - * @throws IOException - */ - public void testCleanupTaskLocalization() throws IOException, - InterruptedException { - if (!canRun()) { - return; - } - - task.setTaskCleanupTask(); - // register task - tip = tracker.new TaskInProgress(task, trackerFConf); - - // localize the job. - RunningJob rjob = tracker.localizeJob(tip); - localizedJobConf = rjob.getJobConf(); - checkJobLocalization(); - - // localize task cleanup attempt - initializeTask(); - checkTaskLocalization(); - - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java deleted file mode 100644 index 39ba02fd5e7..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerMemoryManager.java +++ /dev/null @@ -1,606 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.regex.Pattern; -import java.util.regex.Matcher; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.mapreduce.util.LinuxResourceCalculatorPlugin; -import org.apache.hadoop.mapreduce.util.ProcfsBasedProcessTree; -import org.apache.hadoop.mapreduce.SleepJob; -import org.apache.hadoop.mapreduce.util.TestProcfsBasedProcessTree; -import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.ToolRunner; - -import org.junit.After; -import org.junit.Ignore; -import org.junit.Test; -import static org.junit.Assert.*; - -/** - * Test class to verify memory management of tasks. - */ -public class TestTaskTrackerMemoryManager { - - private static final Log LOG = - LogFactory.getLog(TestTaskTrackerMemoryManager.class); - private static String TEST_ROOT_DIR = new Path(System.getProperty( - "test.build.data", "/tmp")).toString().replace(' ', '+'); - - private MiniMRCluster miniMRCluster; - - private String taskOverLimitPatternString = - "TaskTree \\[pid=[0-9]*,tipID=.*\\] is running beyond.*memory-limits. " - + "Current usage : [0-9]*bytes. Limit : %sbytes. Killing task."; - - private void startCluster(JobConf conf) - throws Exception { - conf.set(JTConfig.JT_IPC_HANDLER_COUNT, "1"); - conf.set(TTConfig.TT_MAP_SLOTS, "1"); - conf.set(TTConfig.TT_REDUCE_SLOTS, "1"); - conf.set(TTConfig.TT_SLEEP_TIME_BEFORE_SIG_KILL, "0"); - miniMRCluster = new MiniMRCluster(1, "file:///", 1, null, null, conf); - } - - @After - public void tearDown() { - if (miniMRCluster != null) { - miniMRCluster.shutdown(); - } - } - - private int runSleepJob(JobConf conf) throws Exception { - String[] args = { "-m", "3", "-r", "1", "-mt", "3000", "-rt", "1000" }; - return ToolRunner.run(conf, new SleepJob(), args); - } - - private void runAndCheckSuccessfulJob(JobConf conf) - throws IOException { - Pattern taskOverLimitPattern = - Pattern.compile(String.format(taskOverLimitPatternString, "[0-9]*")); - Matcher mat = null; - - // Start the job. - int ret; - try { - ret = runSleepJob(conf); - } catch (Exception e) { - ret = 1; - } - - // Job has to succeed - assertTrue(ret == 0); - - JobClient jClient = new JobClient(conf); - JobStatus[] jStatus = jClient.getAllJobs(); - JobStatus js = jStatus[0]; // Our only job - RunningJob rj = jClient.getJob(js.getJobID()); - - // All events - TaskCompletionEvent[] taskComplEvents = rj.getTaskCompletionEvents(0); - - for (TaskCompletionEvent tce : taskComplEvents) { - String[] diagnostics = - rj.getTaskDiagnostics(tce.getTaskAttemptId()); - - if (diagnostics != null) { - for (String str : diagnostics) { - mat = taskOverLimitPattern.matcher(str); - // The error pattern shouldn't be there in any TIP's diagnostics - assertFalse(mat.find()); - } - } - } - } - - private boolean isProcfsBasedTreeAvailable() { - try { - if (!ProcfsBasedProcessTree.isAvailable()) { - LOG.info("Currently ProcessTree has only one implementation " - + "ProcfsBasedProcessTree, which is not available on this " - + "system. Not testing"); - return false; - } - } catch (Exception e) { - LOG.info(StringUtils.stringifyException(e)); - return false; - } - return true; - } - - /** - * Test for verifying that nothing is killed when memory management is - * disabled on the TT, even when the tasks run over their limits. - * - * @throws Exception - */ - @Test - public void testTTLimitsDisabled() - throws Exception { - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - - // Task-memory management disabled by default. - startCluster(new JobConf()); - long PER_TASK_LIMIT = 1L; // Doesn't matter how low. - JobConf conf = miniMRCluster.createJobConf(); - conf.setMemoryForMapTask(PER_TASK_LIMIT); - conf.setMemoryForReduceTask(PER_TASK_LIMIT); - runAndCheckSuccessfulJob(conf); - } - - /** - * Test for verifying that tasks within limits, with the cumulative usage also - * under TT's limits succeed. - * - * @throws Exception - */ - @Test - public void testTasksWithinLimits() - throws Exception { - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - - // Large so that sleepjob goes through and fits total TT usage - long PER_TASK_LIMIT = 2 * 1024L; - - // Start cluster with proper configuration. - JobConf fConf = new JobConf(); - fConf.setLong(MRConfig.MAPMEMORY_MB, 2 * 1024L); - fConf.setLong(MRConfig.REDUCEMEMORY_MB, 2 * 1024L); - // Reserve only 1 mb of the memory on TaskTrackers - fConf.setLong(TTConfig.TT_RESERVED_PHYSCIALMEMORY_MB, 1L); - startCluster(new JobConf()); - - JobConf conf = new JobConf(miniMRCluster.createJobConf()); - conf.setMemoryForMapTask(PER_TASK_LIMIT); - conf.setMemoryForReduceTask(PER_TASK_LIMIT); - // Set task physical memory limits - conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT); - conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT); - runAndCheckSuccessfulJob(conf); - } - - /** - * Test for verifying that tasks that go beyond limits get killed. - * - * @throws Exception - */ - @Ignore("Intermittent, unexpected task success causes test to fail.") - @Test - public void testTasksBeyondLimits() - throws Exception { - - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - - // Start cluster with proper configuration. - JobConf fConf = new JobConf(); - // very small value, so that no task escapes to successful completion. - fConf.setInt(TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL, 100); - fConf.setLong(MRConfig.MAPMEMORY_MB, 2 * 1024); - fConf.setLong(MRConfig.REDUCEMEMORY_MB, 2 * 1024); - startCluster(fConf); - runJobExceedingMemoryLimit(false); - } - - /** - * Test for verifying that tasks that go beyond physical limits get killed. - * - * @throws Exception - */ - @Ignore("Intermittent, unexpected task success causes test to fail.") - @Test - public void testTasksBeyondPhysicalLimits() - throws Exception { - - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - - // Start cluster with proper configuration. - JobConf fConf = new JobConf(); - // very small value, so that no task escapes to successful completion. - fConf.setInt(TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL, 100); - // Reserve only 1 mb of the memory on TaskTrackers - fConf.setLong(TTConfig.TT_RESERVED_PHYSCIALMEMORY_MB, 1L); - startCluster(fConf); - runJobExceedingMemoryLimit(true); - } - - /** - * Runs tests with tasks beyond limit and using old configuration values for - * the TaskTracker. - * - * @throws Exception - */ - @Ignore("Intermittent, unexpected task success causes test to fail.") - @Test - public void testTaskMemoryMonitoringWithDeprecatedConfiguration () - throws Exception { - - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - // Start cluster with proper configuration. - JobConf fConf = new JobConf(); - // very small value, so that no task escapes to successful completion. - fConf.setInt(TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL, 100); - //set old values, max vm property per task and upper limit on the tasks - //vm - //setting the default maximum vmem property to 2 GB - fConf.setLong(JobConf.MAPRED_TASK_DEFAULT_MAXVMEM_PROPERTY, - (2L * 1024L * 1024L * 1024L)); - fConf.setLong(JobConf.UPPER_LIMIT_ON_TASK_VMEM_PROPERTY, - (3L * 1024L * 1024L * 1024L)); - startCluster(fConf); - runJobExceedingMemoryLimit(false); - } - - /** - * Runs a job which should fail the when run by the memory monitor. - * - * @param doPhysicalMemory If it is true, use physical memory limit. - * Otherwise use virtual memory limit. - * @throws IOException - */ - private void runJobExceedingMemoryLimit(boolean doPhysicalMemory) - throws IOException { - long PER_TASK_LIMIT = 1L; // Low enough to kill off sleepJob tasks. - - Pattern taskOverLimitPattern = - Pattern.compile(String.format(taskOverLimitPatternString, String - .valueOf(PER_TASK_LIMIT*1024*1024L))); - Matcher mat = null; - - // Set up job. - JobConf conf = new JobConf(miniMRCluster.createJobConf()); - if (doPhysicalMemory) { - conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT); - conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, PER_TASK_LIMIT); - } else { - conf.setMemoryForMapTask(PER_TASK_LIMIT); - conf.setMemoryForReduceTask(PER_TASK_LIMIT); - } - conf.setMaxMapAttempts(1); - conf.setMaxReduceAttempts(1); - - // Start the job. - int ret = 0; - try { - ret = runSleepJob(conf); - } catch (Exception e) { - ret = 1; - } - - // Job has to fail - assertTrue(ret != 0); - - JobClient jClient = new JobClient(conf); - JobStatus[] jStatus = jClient.getAllJobs(); - JobStatus js = jStatus[0]; // Our only job - RunningJob rj = jClient.getJob(js.getJobID()); - - // All events - TaskCompletionEvent[] taskComplEvents = rj.getTaskCompletionEvents(0); - - for (TaskCompletionEvent tce : taskComplEvents) { - // Every task HAS to fail - assertTrue("Failure expected, task: " + tce.getTaskStatus(), - tce.getTaskStatus() == TaskCompletionEvent.Status.TIPFAILED || - tce.getTaskStatus() == TaskCompletionEvent.Status.FAILED); - - String[] diagnostics = - rj.getTaskDiagnostics(tce.getTaskAttemptId()); - - // Every task HAS to spit out the out-of-memory errors - assertNotNull(diagnostics); - - for (String str : diagnostics) { - mat = taskOverLimitPattern.matcher(str); - // Every task HAS to spit out the out-of-memory errors in the same - // format. And these are the only diagnostic messages. - assertTrue(mat.find()); - } - } - } - - /** - * Test for verifying that tasks causing cumulative usage to go beyond TT's - * limit get killed even though they all are under individual limits. Memory - * management for tasks with disabled task-limits also traverses the same - * code-path, so we don't need a separate testTaskLimitsDisabled. - * - * @throws Exception - */ - @Test - public void testTasksCumulativelyExceedingTTLimits() - throws Exception { - - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - - // Large enough for SleepJob Tasks. - long PER_TASK_LIMIT = 100 * 1024L; - - // Start cluster with proper configuration. - JobConf fConf = new JobConf(); - fConf.setLong(MRConfig.MAPMEMORY_MB, - 1L); - fConf.setLong( - MRConfig.REDUCEMEMORY_MB, 1L); - - // Because of the above, the total tt limit is 2mb - long TASK_TRACKER_LIMIT = 2 * 1024 * 1024L; - - // very small value, so that no task escapes to successful completion. - fConf.setInt(TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL, 100); - - startCluster(fConf); - - Pattern taskOverLimitPattern = - Pattern.compile(String.format(taskOverLimitPatternString, String - .valueOf(PER_TASK_LIMIT))); - - Pattern trackerOverLimitPattern = - Pattern - .compile("Killing one of the least progress tasks - .*, as " - + "the cumulative memory usage of all the tasks on the TaskTracker" - + " exceeds virtual memory limit " + TASK_TRACKER_LIMIT + "."); - Matcher mat = null; - - // Set up job. - JobConf conf = new JobConf(miniMRCluster.createJobConf()); - conf.setMemoryForMapTask(PER_TASK_LIMIT); - conf.setMemoryForReduceTask(PER_TASK_LIMIT); - - JobClient jClient = new JobClient(conf); - SleepJob sleepJob = new SleepJob(); - sleepJob.setConf(conf); - // Start the job - Job job = sleepJob.createJob(1, 1, 5000, 1, 1000, 1); - job.submit(); - boolean TTOverFlowMsgPresent = false; - while (true) { - List allTaskReports = new ArrayList(); - allTaskReports.addAll(Arrays.asList(jClient - .getSetupTaskReports(JobID.downgrade(job.getJobID())))); - allTaskReports.addAll(Arrays.asList(jClient - .getMapTaskReports(JobID.downgrade(job.getJobID())))); - for (TaskReport tr : allTaskReports) { - String[] diag = tr.getDiagnostics(); - for (String str : diag) { - mat = taskOverLimitPattern.matcher(str); - assertFalse(mat.find()); - mat = trackerOverLimitPattern.matcher(str); - if (mat.find()) { - TTOverFlowMsgPresent = true; - } - } - } - if (TTOverFlowMsgPresent) { - break; - } - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - // nothing - } - } - // If it comes here without a test-timeout, it means there was a task that - // was killed because of crossing cumulative TT limit. - - // Test succeeded, kill the job. - job.killJob(); - } - - /** - * Test to verify the check for whether a process tree is over limit or not. - * @throws IOException if there was a problem setting up the - * fake procfs directories or files. - */ - @Test - public void testProcessTreeLimits() throws IOException { - - // set up a dummy proc file system - File procfsRootDir = new File(TEST_ROOT_DIR, "proc"); - String[] pids = { "100", "200", "300", "400", "500", "600", "700" }; - try { - TestProcfsBasedProcessTree.setupProcfsRootDir(procfsRootDir); - - // create pid dirs. - TestProcfsBasedProcessTree.setupPidDirs(procfsRootDir, pids); - - // create process infos. - TestProcfsBasedProcessTree.ProcessStatInfo[] procs = - new TestProcfsBasedProcessTree.ProcessStatInfo[7]; - - // assume pids 100, 500 are in 1 tree - // 200,300,400 are in another - // 600,700 are in a third - procs[0] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"100", "proc1", "1", "100", "100", "100000"}); - procs[1] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"200", "proc2", "1", "200", "200", "200000"}); - procs[2] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"300", "proc3", "200", "200", "200", "300000"}); - procs[3] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"400", "proc4", "200", "200", "200", "400000"}); - procs[4] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"500", "proc5", "100", "100", "100", "1500000"}); - procs[5] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"600", "proc6", "1", "600", "600", "100000"}); - procs[6] = new TestProcfsBasedProcessTree.ProcessStatInfo( - new String[] {"700", "proc7", "600", "600", "600", "100000"}); - // write stat files. - TestProcfsBasedProcessTree.writeStatFiles(procfsRootDir, pids, procs); - - // vmem limit - long limit = 700000; - - // Create TaskMemoryMonitorThread - TaskMemoryManagerThread test = new TaskMemoryManagerThread(1000000L, - 5000L); - // create process trees - // tree rooted at 100 is over limit immediately, as it is - // twice over the mem limit. - ProcfsBasedProcessTree pTree = new ProcfsBasedProcessTree( - "100", true, 100L, - procfsRootDir.getAbsolutePath()); - pTree.getProcessTree(); - assertTrue("tree rooted at 100 should be over limit " + - "after first iteration.", - test.isProcessTreeOverLimit(pTree, "dummyId", limit)); - - // the tree rooted at 200 is initially below limit. - pTree = new ProcfsBasedProcessTree("200", true, 100L, - procfsRootDir.getAbsolutePath()); - pTree.getProcessTree(); - assertFalse("tree rooted at 200 shouldn't be over limit " + - "after one iteration.", - test.isProcessTreeOverLimit(pTree, "dummyId", limit)); - // second iteration - now the tree has been over limit twice, - // hence it should be declared over limit. - pTree.getProcessTree(); - assertTrue("tree rooted at 200 should be over limit after 2 iterations", - test.isProcessTreeOverLimit(pTree, "dummyId", limit)); - - // the tree rooted at 600 is never over limit. - pTree = new ProcfsBasedProcessTree("600", true, 100L, - procfsRootDir.getAbsolutePath()); - pTree.getProcessTree(); - assertFalse("tree rooted at 600 should never be over limit.", - test.isProcessTreeOverLimit(pTree, "dummyId", limit)); - - // another iteration does not make any difference. - pTree.getProcessTree(); - assertFalse("tree rooted at 600 should never be over limit.", - test.isProcessTreeOverLimit(pTree, "dummyId", limit)); - } finally { - FileUtil.fullyDelete(procfsRootDir); - } - } - - /** - * Test for verifying that tasks causing cumulative usage of physical memory - * to go beyond TT's limit get killed. - * - * @throws Exception - */ - @Test - public void testTasksCumulativelyExceedingTTPhysicalLimits() - throws Exception { - - // Run the test only if memory management is enabled - if (!isProcfsBasedTreeAvailable()) { - return; - } - - // Start cluster with proper configuration. - JobConf fConf = new JobConf(); - - // very small value, so that no task escapes to successful completion. - fConf.setInt(TTConfig.TT_MEMORY_MANAGER_MONITORING_INTERVAL, 100); - - // reserve all memory on TT so that the job will exceed memory limits - LinuxResourceCalculatorPlugin memoryCalculatorPlugin = - new LinuxResourceCalculatorPlugin(); - long totalPhysicalMemory = memoryCalculatorPlugin.getPhysicalMemorySize(); - long reservedPhysicalMemory = totalPhysicalMemory / (1024 * 1024) + 1; - fConf.setLong(TTConfig.TT_RESERVED_PHYSCIALMEMORY_MB, - reservedPhysicalMemory); - long maxRssMemoryAllowedForAllTasks = totalPhysicalMemory - - reservedPhysicalMemory * 1024 * 1024L; - Pattern physicalMemoryOverLimitPattern = Pattern.compile( - "Killing one of the memory-consuming tasks - .*" - + ", as the cumulative RSS memory usage of all the tasks on " - + "the TaskTracker exceeds physical memory limit " - + maxRssMemoryAllowedForAllTasks + "."); - - startCluster(fConf); - Matcher mat = null; - - // Set up job. - JobConf conf = new JobConf(miniMRCluster.createJobConf()); - // Set per task physical memory limits to be a higher value - conf.setLong(MRJobConfig.MAP_MEMORY_PHYSICAL_MB, 2 * 1024L); - conf.setLong(MRJobConfig.REDUCE_MEMORY_PHYSICAL_MB, 2 * 1024L); - JobClient jClient = new JobClient(conf); - SleepJob sleepJob = new SleepJob(); - sleepJob.setConf(conf); - // Start the job - Job job = sleepJob.createJob(1, 1, 100000, 1, 100000, 1); - job.submit(); - boolean TTOverFlowMsgPresent = false; - while (true) { - List allTaskReports = new ArrayList(); - allTaskReports.addAll(Arrays.asList(jClient - .getSetupTaskReports(JobID.downgrade(job.getJobID())))); - allTaskReports.addAll(Arrays.asList(jClient - .getMapTaskReports(JobID.downgrade(job.getJobID())))); - for (TaskReport tr : allTaskReports) { - String[] diag = tr.getDiagnostics(); - for (String str : diag) { - mat = physicalMemoryOverLimitPattern.matcher(str); - if (mat.find()) { - TTOverFlowMsgPresent = true; - } - } - } - if (TTOverFlowMsgPresent) { - break; - } - assertFalse("Job should not finish successfully", job.isSuccessful()); - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - // nothing - } - } - // If it comes here without a test-timeout, it means there was a task that - // was killed because of crossing cumulative TT limit. - - // Test succeeded, kill the job. - job.killJob(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java deleted file mode 100644 index 3bfbc681844..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTaskTrackerSlotManagement.java +++ /dev/null @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.net.URI; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.ClusterMetrics; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.JobContext; -import org.apache.hadoop.mapreduce.MapReduceTestUtil; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -/** - * Regression test for MAPREDUCE-913 - */ -public class TestTaskTrackerSlotManagement { - - private static final Path TEST_DIR = new Path(System.getProperty( - "test.build.data", "/tmp"), "tt_slots"); - private static final String CACHE_FILE_PATH = new Path(TEST_DIR, "test.txt") - .toString(); - - /** - * Test-setup. Create the cache-file. - * - * @throws Exception - */ - @Before - public void setUp() throws Exception { - new File(TEST_DIR.toString()).mkdirs(); - File myFile = new File(CACHE_FILE_PATH); - myFile.createNewFile(); - } - - /** - * Test-cleanup. Remove the cache-file. - * - * @throws Exception - */ - @After - public void tearDown() throws Exception { - File myFile = new File(CACHE_FILE_PATH); - myFile.delete(); - new File(TEST_DIR.toString()).delete(); - } - - /** - * Test case to test addition of free slot when the job fails localization due - * to cache file being modified after the job has started running. - * - * @throws Exception - */ - @Test - public void testFreeingOfTaskSlots() throws Exception { - // Start a cluster with no task tracker. - MiniMRCluster mrCluster = new MiniMRCluster(0, "file:///", 1); - Configuration conf = mrCluster.createJobConf(); - Cluster cluster = new Cluster(conf); - // set the debug script so that TT tries to launch the debug - // script for failed tasks. - conf.set(JobContext.MAP_DEBUG_SCRIPT, "/bin/echo"); - conf.set(JobContext.REDUCE_DEBUG_SCRIPT, "/bin/echo"); - Job j = MapReduceTestUtil.createJob(conf, new Path(TEST_DIR, "in"), - new Path(TEST_DIR, "out"), 0, 0); - // Add the local filed created to the cache files of the job - j.addCacheFile(new URI(CACHE_FILE_PATH)); - j.setMaxMapAttempts(1); - j.setMaxReduceAttempts(1); - // Submit the job and return immediately. - // Job submit now takes care setting the last - // modified time of the cache file. - j.submit(); - // Look up the file and modify the modification time. - File myFile = new File(CACHE_FILE_PATH); - myFile.setLastModified(0L); - // Start up the task tracker after the time has been changed. - mrCluster.startTaskTracker(null, null, 0, 1); - // Now wait for the job to fail. - j.waitForCompletion(false); - Assert.assertFalse("Job successfully completed.", j.isSuccessful()); - - ClusterMetrics metrics = cluster.getClusterStatus(); - // validate number of slots in JobTracker - Assert.assertEquals(0, metrics.getOccupiedMapSlots()); - Assert.assertEquals(0, metrics.getOccupiedReduceSlots()); - - // validate number of slots in TaskTracker - TaskTracker tt = mrCluster.getTaskTrackerRunner(0).getTaskTracker(); - Assert.assertEquals(metrics.getMapSlotCapacity(), tt.getFreeSlots(true)); - Assert.assertEquals(metrics.getReduceSlotCapacity(), tt.getFreeSlots(false)); - - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java deleted file mode 100644 index c4b93ee9d83..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerBlacklistAcrossJobs.java +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; - -public class TestTrackerBlacklistAcrossJobs extends TestCase { - private static final String hosts[] = new String[] { - "host1.rack.com", "host2.rack.com", "host3.rack.com" - }; - - public static class FailOnHostMapper extends MapReduceBase - implements Mapper { - String hostname = ""; - - public void configure(JobConf job) { - this.hostname = job.get(TTConfig.TT_HOST_NAME); - } - - public void map(NullWritable key, NullWritable value, - OutputCollector output, - Reporter reporter) - throws IOException { - if (this.hostname.equals(hosts[0])) { - // fail here - throw new IOException("failing on host: " + hosts[0]); - } - } - } - - public void testBlacklistAcrossJobs() throws IOException { - MiniMRCluster mr = null; - FileSystem fileSys = null; - Configuration conf = new Configuration(); - fileSys = FileSystem.get(conf); - // start mr cluster - JobConf jtConf = new JobConf(); - jtConf.setInt(JTConfig.JT_MAX_TRACKER_BLACKLISTS, 1); - - mr = new MiniMRCluster(3, fileSys.getUri().toString(), - 1, null, hosts, jtConf); - - // setup job configuration - JobConf mrConf = mr.createJobConf(); - JobConf job = new JobConf(mrConf); - job.setInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 1); - job.setNumMapTasks(6); - job.setNumReduceTasks(0); - job.setMapperClass(FailOnHostMapper.class); - job.setMapOutputKeyClass(NullWritable.class); - job.setMapOutputValueClass(NullWritable.class); - job.setOutputFormat(NullOutputFormat.class); - job.setInputFormat(TestReduceFetchFromPartialMem.FakeIF.class); - - // run the job - JobClient jc = new JobClient(mrConf); - RunningJob running = JobClient.runJob(job); - assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState()); - assertEquals("Did not blacklist the host", 1, - jc.getClusterStatus().getBlacklistedTrackers()); - assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0])); - - // run the same job once again - // there should be no change in blacklist count - running = JobClient.runJob(job); - assertEquals("Job failed", JobStatus.SUCCEEDED, running.getJobState()); - assertEquals("Didn't blacklist the host", 1, - jc.getClusterStatus().getBlacklistedTrackers()); - assertEquals("Fault count should be 1", 1, mr.getFaultCount(hosts[0])); - - if (fileSys != null) { fileSys.close(); } - if (mr!= null) { mr.shutdown(); } - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java deleted file mode 100644 index 254a88a6513..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestTrackerReservation.java +++ /dev/null @@ -1,282 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; - -import javax.security.auth.login.LoginException; - -import org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobInProgress; -import org.apache.hadoop.mapreduce.ClusterMetrics; -import org.apache.hadoop.mapreduce.TaskType; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; - -import junit.extensions.TestSetup; -import junit.framework.Test; -import junit.framework.TestCase; -import junit.framework.TestSuite; - -public class TestTrackerReservation extends TestCase { - - static String[] trackers = new String[] { "tracker_tracker1:1000", - "tracker_tracker2:1000", "tracker_tracker3:1000" }; - private static FakeJobTracker jobTracker; - - private static class FakeJobTracker extends - org.apache.hadoop.mapred.FakeObjectUtilities.FakeJobTracker { - - FakeJobTracker(JobConf conf, Clock clock, String[] tts) throws IOException, - InterruptedException, LoginException { - super(conf, clock, tts); - } - - @Override - synchronized void finalizeJob(JobInProgress job) { - // Do nothing - } - } - - - public static Test suite() { - TestSetup setup = new TestSetup(new TestSuite(TestTrackerReservation.class)) { - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - jobTracker = new FakeJobTracker(conf, new Clock(), trackers); - for (String tracker : trackers) { - FakeObjectUtilities.establishFirstContact(jobTracker, tracker); - } - } - - protected void tearDown() throws Exception { - } - }; - return setup; - } - - /** - * Test case to test if task tracker reservation. - *

    - *
  1. Run a cluster with 3 trackers.
  2. - *
  3. Submit a job which reserves all the slots in two - * trackers.
  4. - *
  5. Run the job on another tracker which has - * no reservations
  6. - *
  7. Finish the job and observe the reservations are - * successfully canceled
  8. - *
- * - * @throws Exception - */ - public void testTaskTrackerReservation() throws Exception { - JobConf conf = new JobConf(); - - conf.setNumMapTasks(1); - conf.setNumReduceTasks(1); - conf.setSpeculativeExecution(false); - - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - - //Set task tracker objects for reservation. - TaskTracker tt1 = jobTracker.getTaskTracker(trackers[0]); - TaskTracker tt2 = jobTracker.getTaskTracker(trackers[1]); - TaskTracker tt3 = jobTracker.getTaskTracker(trackers[2]); - TaskTrackerStatus status1 = new TaskTrackerStatus( - trackers[0],JobInProgress.convertTrackerNameToHostName( - trackers[0]),0,new ArrayList(), 0, 2, 2); - TaskTrackerStatus status2 = new TaskTrackerStatus( - trackers[1],JobInProgress.convertTrackerNameToHostName( - trackers[1]),0,new ArrayList(), 0, 2, 2); - TaskTrackerStatus status3 = new TaskTrackerStatus( - trackers[1],JobInProgress.convertTrackerNameToHostName( - trackers[1]),0,new ArrayList(), 0, 2, 2); - tt1.setStatus(status1); - tt2.setStatus(status2); - tt3.setStatus(status3); - - FakeJobInProgress fjob = new FakeJobInProgress(conf, jobTracker); - fjob.setClusterSize(3); - fjob.initTasks(); - - tt1.reserveSlots(TaskType.MAP, fjob, 2); - tt1.reserveSlots(TaskType.REDUCE, fjob, 2); - tt3.reserveSlots(TaskType.MAP, fjob, 2); - tt3.reserveSlots(TaskType.REDUCE, fjob, 2); - - assertEquals("Trackers not reserved for the job : maps", - 2, fjob.getNumReservedTaskTrackersForMaps()); - assertEquals("Trackers not reserved for the job : reduces", - 2, fjob.getNumReservedTaskTrackersForReduces()); - ClusterMetrics metrics = jobTracker.getClusterMetrics(); - assertEquals("reserved map slots do not match", - 4, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 4, metrics.getReservedReduceSlots()); - - TaskAttemptID mTid = fjob.findMapTask(trackers[1]); - TaskAttemptID rTid = fjob.findReduceTask(trackers[1]); - - fjob.finishTask(mTid); - fjob.finishTask(rTid); - - assertEquals("Job didnt complete successfully complete", fjob.getStatus() - .getRunState(), JobStatus.SUCCEEDED); - - assertEquals("Reservation for the job not released: Maps", - 0, fjob.getNumReservedTaskTrackersForMaps()); - assertEquals("Reservation for the job not released : Reduces", - 0, fjob.getNumReservedTaskTrackersForReduces()); - metrics = jobTracker.getClusterMetrics(); - assertEquals("reserved map slots do not match", - 0, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 0, metrics.getReservedReduceSlots()); - } - - /** - * Test case to check task tracker reservation for a job which - * has a job blacklisted tracker. - *
    - *
  1. Run a job which fails on one of the tracker.
  2. - *
  3. Check if the job succeeds and has no reservation.
  4. - *
- * - * @throws Exception - */ - - public void testTrackerReservationWithJobBlackListedTracker() throws Exception { - FakeJobInProgress job = TestTaskTrackerBlacklisting.runBlackListingJob( - jobTracker, trackers); - assertEquals("Job has no blacklisted trackers", 1, job - .getBlackListedTrackers().size()); - assertTrue("Tracker 1 not blacklisted for the job", job - .getBlackListedTrackers().contains( - JobInProgress.convertTrackerNameToHostName(trackers[0]))); - assertEquals("Job didnt complete successfully complete", job.getStatus() - .getRunState(), JobStatus.SUCCEEDED); - assertEquals("Reservation for the job not released: Maps", - 0, job.getNumReservedTaskTrackersForMaps()); - assertEquals("Reservation for the job not released : Reduces", - 0, job.getNumReservedTaskTrackersForReduces()); - ClusterMetrics metrics = jobTracker.getClusterMetrics(); - assertEquals("reserved map slots do not match", - 0, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 0, metrics.getReservedReduceSlots()); - } - - /** - * Test case to check if the job reservation is handled properly if the - * job has a reservation on a black listed tracker. - * - * @throws Exception - */ - public void testReservationOnBlacklistedTracker() throws Exception { - TaskAttemptID[] taskAttemptID = new TaskAttemptID[3]; - JobConf conf = new JobConf(); - conf.setSpeculativeExecution(false); - conf.setNumMapTasks(2); - conf.setNumReduceTasks(2); - conf.set(JobContext.REDUCE_FAILURES_MAXPERCENT, ".70"); - conf.set(JobContext.MAP_FAILURES_MAX_PERCENT, ".70"); - conf.setBoolean(JobContext.SETUP_CLEANUP_NEEDED, false); - conf.setMaxTaskFailuresPerTracker(1); - FakeJobInProgress job = new FakeJobInProgress(conf, jobTracker); - job.setClusterSize(trackers.length); - job.initTasks(); - - TaskTracker tt1 = jobTracker.getTaskTracker(trackers[0]); - TaskTracker tt2 = jobTracker.getTaskTracker(trackers[1]); - TaskTracker tt3 = jobTracker.getTaskTracker(trackers[2]); - TaskTrackerStatus status1 = new TaskTrackerStatus( - trackers[0],JobInProgress.convertTrackerNameToHostName( - trackers[0]),0,new ArrayList(), 0, 2, 2); - TaskTrackerStatus status2 = new TaskTrackerStatus( - trackers[1],JobInProgress.convertTrackerNameToHostName( - trackers[1]),0,new ArrayList(), 0, 2, 2); - TaskTrackerStatus status3 = new TaskTrackerStatus( - trackers[1],JobInProgress.convertTrackerNameToHostName( - trackers[1]),0,new ArrayList(), 0, 2, 2); - tt1.setStatus(status1); - tt2.setStatus(status2); - tt3.setStatus(status3); - - tt1.reserveSlots(TaskType.MAP, job, 2); - tt1.reserveSlots(TaskType.REDUCE, job, 2); - tt3.reserveSlots(TaskType.MAP, job, 2); - tt3.reserveSlots(TaskType.REDUCE, job, 2); - - assertEquals("Trackers not reserved for the job : maps", - 2, job.getNumReservedTaskTrackersForMaps()); - assertEquals("Trackers not reserved for the job : reduces", - 2, job.getNumReservedTaskTrackersForReduces()); - ClusterMetrics metrics = jobTracker.getClusterMetrics(); - assertEquals("reserved map slots do not match", - 4, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 4, metrics.getReservedReduceSlots()); - - /* - * FakeJobInProgress.findMapTask does not handle - * task failures. So working around it by failing - * reduce and blacklisting tracker. - * Then finish the map task later. - */ - TaskAttemptID mTid = job.findMapTask(trackers[0]); - TaskAttemptID rTid = job.findReduceTask(trackers[0]); - //Task should blacklist the tasktracker. - job.failTask(rTid); - - assertEquals("Tracker 0 not blacklisted for the job", 1, - job.getBlackListedTrackers().size()); - assertEquals("Extra Trackers reserved for the job : maps", - 1, job.getNumReservedTaskTrackersForMaps()); - assertEquals("Extra Trackers reserved for the job : reduces", - 1, job.getNumReservedTaskTrackersForReduces()); - metrics = jobTracker.getClusterMetrics(); - assertEquals("reserved map slots do not match", - 2, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 2, metrics.getReservedReduceSlots()); - - //Finish the map task on the tracker 1. Finishing it here to work - //around bug in the FakeJobInProgress object - job.finishTask(mTid); - mTid = job.findMapTask(trackers[1]); - rTid = job.findReduceTask(trackers[1]); - job.finishTask(mTid); - job.finishTask(rTid); - rTid = job.findReduceTask(trackers[1]); - job.finishTask(rTid); - assertEquals("Job didnt complete successfully complete", job.getStatus() - .getRunState(), JobStatus.SUCCEEDED); - assertEquals("Trackers not unreserved for the job : maps", - 0, job.getNumReservedTaskTrackersForMaps()); - assertEquals("Trackers not unreserved for the job : reduces", - 0, job.getNumReservedTaskTrackersForReduces()); - metrics = jobTracker.getClusterMetrics(); - assertEquals("reserved map slots do not match", - 0, metrics.getReservedMapSlots()); - assertEquals("reserved reduce slots do not match", - 0, metrics.getReservedReduceSlots()); - } -} - \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java deleted file mode 100644 index 1592025d062..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestUserLogCleanup.java +++ /dev/null @@ -1,288 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.server.tasktracker.Localizer; -import org.apache.hadoop.mapreduce.util.MRAsyncDiskService; - -import static org.junit.Assert.*; - -import org.junit.After; -import org.junit.Test; - -public class TestUserLogCleanup { - private static String jtid = "test"; - private static long ONE_HOUR = 1000 * 60 * 60; - private Localizer localizer; - private UserLogCleaner taskLogCleanupThread; - private TaskTracker tt; - private FakeClock myClock = new FakeClock(); - private JobID jobid1 = new JobID(jtid, 1); - private JobID jobid2 = new JobID(jtid, 2); - private JobID jobid3 = new JobID(jtid, 3); - private JobID jobid4 = new JobID(jtid, 4); - private File foo = new File(TaskLog.getUserLogDir(), "foo"); - private File bar = new File(TaskLog.getUserLogDir(), "bar"); - - public TestUserLogCleanup() throws IOException { - Configuration conf = new Configuration(); - localizer = new Localizer(FileSystem.get(conf), conf - .getStrings(MRConfig.LOCAL_DIR), new DefaultTaskController()); - taskLogCleanupThread = new UserLogCleaner(conf); - taskLogCleanupThread.setClock(myClock); - tt = new TaskTracker(); - tt.setConf(new JobConf(conf)); - tt.setLocalizer(localizer); - tt.setTaskLogCleanupThread(taskLogCleanupThread); - } - - @After - public void tearDown() { - FileUtil.fullyDelete(TaskLog.getUserLogDir()); - } - - private File localizeJob(JobID jobid) throws IOException { - File jobUserlog = TaskLog.getJobDir(jobid); - - JobConf conf = new JobConf(); - // localize job log directory - tt.initializeJobLogDir(jobid, conf); - assertTrue(jobUserlog + " directory is not created.", jobUserlog.exists()); - return jobUserlog; - } - - private void jobFinished(JobID jobid, int logRetainHours) { - Configuration jobconf = new Configuration(); - jobconf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, logRetainHours); - taskLogCleanupThread.markJobLogsForDeletion(myClock.getTime(), jobconf, - jobid); - } - - /** - * Tests job user-log directory deletion. - * - * Adds two jobs for log deletion. One with one hour retain hours, other with - * two retain hours. After an hour, - * TaskLogCleanupThread.processCompletedJobs() call, - * makes sure job with 1hr retain hours is removed and other is retained. - * After one more hour, job with 2hr retain hours is also removed. - * - * @throws IOException - */ - @Test - public void testJobLogCleanup() throws IOException { - File jobUserlog1 = localizeJob(jobid1); - File jobUserlog2 = localizeJob(jobid2); - - // add job user log directory for deletion, with 2 hours for deletion - jobFinished(jobid1, 2); - - // add the job for deletion with one hour as retain hours - jobFinished(jobid2, 1); - - // remove old logs and see jobid1 is not removed and jobid2 is removed - myClock.advance(ONE_HOUR); - taskLogCleanupThread.processCompletedJobs(); - assertTrue(jobUserlog1 + " got deleted", jobUserlog1.exists()); - assertFalse(jobUserlog2 + " still exists.", jobUserlog2.exists()); - - myClock.advance(ONE_HOUR); - // remove old logs and see jobid1 is removed now - taskLogCleanupThread.processCompletedJobs(); - assertFalse(jobUserlog1 + " still exists.", jobUserlog1.exists()); - } - - /** - * Tests user-log directory cleanup on a TT re-init with 3 hours as log - * retain hours for tracker. - * - * Adds job1 deletion before the re-init with 2 hour retain hours. - * Adds job2 for which there are no tasks/killJobAction after the re-init. - * Adds job3 for which there is localizeJob followed by killJobAction - * with 3 hours as retain hours. - * Adds job4 for which there are some tasks after the re-init. - * - * @throws IOException - */ - @Test - public void testUserLogCleanup() throws IOException { - File jobUserlog1 = localizeJob(jobid1); - File jobUserlog2 = localizeJob(jobid2); - File jobUserlog3 = localizeJob(jobid3); - File jobUserlog4 = localizeJob(jobid4); - // create a some files/dirs in userlog - foo.mkdirs(); - bar.createNewFile(); - - // add the jobid1 for deletion with retainhours = 2 - jobFinished(jobid1, 2); - - // time is now 1. - myClock.advance(ONE_HOUR); - - // mimic TaskTracker reinit - // clear userlog directory - // job directories will be added with 3 hours as retain hours. They will be - // deleted at time 4. - Configuration conf = new Configuration(); - conf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 3); - taskLogCleanupThread.clearOldUserLogs(conf); - assertFalse(foo.exists()); - assertFalse(bar.exists()); - assertTrue(jobUserlog1.exists()); - assertTrue(jobUserlog2.exists()); - assertTrue(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - assertTrue(new File(TaskLog.getUserLogDir(), MRAsyncDiskService.TOBEDELETED) - .exists()); - - myClock.advance(ONE_HOUR); - // time is now 2. - taskLogCleanupThread.processCompletedJobs(); - assertFalse(jobUserlog1.exists()); - assertTrue(jobUserlog2.exists()); - assertTrue(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - - // mimic localizeJob followed KillJobAction for jobid3 - // add the job for deletion with retainhours = 3. - // jobid3 should be deleted at time 5. - jobUserlog3 = localizeJob(jobid3); - jobFinished(jobid3, 3); - - // mimic localizeJob for jobid4 - jobUserlog4 = localizeJob(jobid4); - - // do cleanup - myClock.advance(2 * ONE_HOUR); - // time is now 4. - taskLogCleanupThread.processCompletedJobs(); - - // jobid2 will be deleted - assertFalse(jobUserlog1.exists()); - assertFalse(jobUserlog2.exists()); - assertTrue(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - - myClock.advance(ONE_HOUR); - // time is now 5. - // do cleanup again - taskLogCleanupThread.processCompletedJobs(); - - // jobid3 will be deleted - assertFalse(jobUserlog1.exists()); - assertFalse(jobUserlog2.exists()); - assertFalse(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - } - - /** - * Tests user-log directory cleanup on a TT restart. - * - * Adds job1 deletion before the restart with 2 hour retain hours. - * Adds job2 for which there are no tasks/killJobAction after the restart. - * Adds job3 for which there is localizeJob followed by killJobAction after - * the restart with 3 hours retain hours. - * Adds job4 for which there are some tasks after the restart. - * - * @throws IOException - */ - @Test - public void testUserLogCleanupAfterRestart() throws IOException { - File jobUserlog1 = localizeJob(jobid1); - File jobUserlog2 = localizeJob(jobid2); - File jobUserlog3 = localizeJob(jobid3); - File jobUserlog4 = localizeJob(jobid4); - // create a some files/dirs in userlog - foo.mkdirs(); - bar.createNewFile(); - - // add the jobid1 for deletion with retainhours = 2 - jobFinished(jobid1, 2); - - // time is now 1. - myClock.advance(ONE_HOUR); - - // mimic TaskTracker restart - // clear userlog directory - // job directories will be added with 3 hours as retain hours. - Configuration conf = new Configuration(); - conf.setInt(MRJobConfig.USER_LOG_RETAIN_HOURS, 3); - taskLogCleanupThread = new UserLogCleaner(conf); - myClock = new FakeClock(); // clock is reset. - taskLogCleanupThread.setClock(myClock); - taskLogCleanupThread.clearOldUserLogs(conf); - tt.setTaskLogCleanupThread(taskLogCleanupThread); - assertFalse(foo.exists()); - assertFalse(bar.exists()); - assertTrue(jobUserlog1.exists()); - assertTrue(jobUserlog2.exists()); - assertTrue(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - assertTrue(new File(TaskLog.getUserLogDir(), MRAsyncDiskService.TOBEDELETED) - .exists()); - - myClock.advance(ONE_HOUR); - // time is now 1. - taskLogCleanupThread.processCompletedJobs(); - assertTrue(jobUserlog1.exists()); - assertTrue(jobUserlog2.exists()); - assertTrue(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - - // mimic localizeJob followed KillJobAction for jobid3 - // add the job for deletion with retainhours = 3. - // jobid3 should be deleted at time 4. - jobUserlog3 = localizeJob(jobid3); - jobFinished(jobid3, 3); - - // mimic localizeJob for jobid4 - jobUserlog4 = localizeJob(jobid4); - - // do cleanup - myClock.advance(2 * ONE_HOUR); - // time is now 3. - taskLogCleanupThread.processCompletedJobs(); - - // jobid1 and jobid2 will be deleted - assertFalse(jobUserlog1.exists()); - assertFalse(jobUserlog2.exists()); - assertTrue(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - - myClock.advance(ONE_HOUR); - // time is now 4. - // do cleanup again - taskLogCleanupThread.processCompletedJobs(); - - // jobid3 will be deleted - assertFalse(jobUserlog1.exists()); - assertFalse(jobUserlog2.exists()); - assertFalse(jobUserlog3.exists()); - assertTrue(jobUserlog4.exists()); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java deleted file mode 100644 index 1c7e70c4a4d..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java +++ /dev/null @@ -1,792 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.File; -import java.io.FileInputStream; -import java.io.DataOutputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.text.DecimalFormat; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Enumeration; -import java.util.Iterator; -import java.util.List; -import java.util.Properties; - -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.io.BytesWritable; -import org.apache.hadoop.io.SequenceFile; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.SequenceFile.CompressionType; -import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat; -import org.apache.hadoop.mapred.lib.IdentityMapper; -import org.apache.hadoop.mapred.lib.IdentityReducer; -import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.util.StringUtils; - -import org.apache.commons.logging.Log; - -/** - * Utilities used in unit test. - * - */ -public class UtilsForTests { - - static final Log LOG = LogFactory.getLog(UtilsForTests.class); - - final static long KB = 1024L * 1; - final static long MB = 1024L * KB; - final static long GB = 1024L * MB; - final static long TB = 1024L * GB; - final static long PB = 1024L * TB; - final static Object waitLock = new Object(); - - static DecimalFormat dfm = new DecimalFormat("####.000"); - static DecimalFormat ifm = new DecimalFormat("###,###,###,###,###"); - - public static String dfmt(double d) { - return dfm.format(d); - } - - public static String ifmt(double d) { - return ifm.format(d); - } - - public static String formatBytes(long numBytes) { - StringBuffer buf = new StringBuffer(); - boolean bDetails = true; - double num = numBytes; - - if (numBytes < KB) { - buf.append(numBytes + " B"); - bDetails = false; - } else if (numBytes < MB) { - buf.append(dfmt(num / KB) + " KB"); - } else if (numBytes < GB) { - buf.append(dfmt(num / MB) + " MB"); - } else if (numBytes < TB) { - buf.append(dfmt(num / GB) + " GB"); - } else if (numBytes < PB) { - buf.append(dfmt(num / TB) + " TB"); - } else { - buf.append(dfmt(num / PB) + " PB"); - } - if (bDetails) { - buf.append(" (" + ifmt(numBytes) + " bytes)"); - } - return buf.toString(); - } - - public static String formatBytes2(long numBytes) { - StringBuffer buf = new StringBuffer(); - long u = 0; - if (numBytes >= TB) { - u = numBytes / TB; - numBytes -= u * TB; - buf.append(u + " TB "); - } - if (numBytes >= GB) { - u = numBytes / GB; - numBytes -= u * GB; - buf.append(u + " GB "); - } - if (numBytes >= MB) { - u = numBytes / MB; - numBytes -= u * MB; - buf.append(u + " MB "); - } - if (numBytes >= KB) { - u = numBytes / KB; - numBytes -= u * KB; - buf.append(u + " KB "); - } - buf.append(u + " B"); //even if zero - return buf.toString(); - } - - static final String regexpSpecials = "[]()?*+|.!^-\\~@"; - - public static String regexpEscape(String plain) { - StringBuffer buf = new StringBuffer(); - char[] ch = plain.toCharArray(); - int csup = ch.length; - for (int c = 0; c < csup; c++) { - if (regexpSpecials.indexOf(ch[c]) != -1) { - buf.append("\\"); - } - buf.append(ch[c]); - } - return buf.toString(); - } - - public static String safeGetCanonicalPath(File f) { - try { - String s = f.getCanonicalPath(); - return (s == null) ? f.toString() : s; - } catch (IOException io) { - return f.toString(); - } - } - - public static String slurp(File f) throws IOException { - int len = (int) f.length(); - byte[] buf = new byte[len]; - FileInputStream in = new FileInputStream(f); - String contents = null; - try { - in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); - } finally { - in.close(); - } - return contents; - } - - public static String slurpHadoop(Path p, FileSystem fs) throws IOException { - int len = (int) fs.getFileStatus(p).getLen(); - byte[] buf = new byte[len]; - InputStream in = fs.open(p); - String contents = null; - try { - in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); - } finally { - in.close(); - } - return contents; - } - - public static String rjustify(String s, int width) { - if (s == null) s = "null"; - if (width > s.length()) { - s = getSpace(width - s.length()) + s; - } - return s; - } - - public static String ljustify(String s, int width) { - if (s == null) s = "null"; - if (width > s.length()) { - s = s + getSpace(width - s.length()); - } - return s; - } - - static char[] space; - static { - space = new char[300]; - Arrays.fill(space, '\u0020'); - } - - public static String getSpace(int len) { - if (len > space.length) { - space = new char[Math.max(len, 2 * space.length)]; - Arrays.fill(space, '\u0020'); - } - return new String(space, 0, len); - } - - /** - * Gets job status from the jobtracker given the jobclient and the job id - */ - static JobStatus getJobStatus(JobClient jc, JobID id) throws IOException { - JobStatus[] statuses = jc.getAllJobs(); - for (JobStatus jobStatus : statuses) { - if (jobStatus.getJobID().equals(id)) { - return jobStatus; - } - } - return null; - } - - /** - * A utility that waits for specified amount of time - */ - public static void waitFor(long duration) { - try { - synchronized (waitLock) { - waitLock.wait(duration); - } - } catch (InterruptedException ie) {} - } - - /** - * Wait for the jobtracker to be RUNNING. - */ - static void waitForJobTracker(JobClient jobClient) { - while (true) { - try { - ClusterStatus status = jobClient.getClusterStatus(); - while (status.getJobTrackerStatus() != JobTrackerStatus.RUNNING) { - waitFor(100); - status = jobClient.getClusterStatus(); - } - break; // means that the jt is ready - } catch (IOException ioe) {} - } - } - - /** - * Waits until all the jobs at the jobtracker complete. - */ - static void waitTillDone(JobClient jobClient) throws IOException { - // Wait for the last job to complete - while (true) { - boolean shouldWait = false; - for (JobStatus jobStatuses : jobClient.getAllJobs()) { - if (jobStatuses.getRunState() != JobStatus.SUCCEEDED - && jobStatuses.getRunState() != JobStatus.FAILED - && jobStatuses.getRunState() != JobStatus.KILLED) { - shouldWait = true; - break; - } - } - if (shouldWait) { - waitFor(100); - } else { - break; - } - } - } - - /** - * Configure a waiting job - */ - static void configureWaitingJobConf(JobConf jobConf, Path inDir, - Path outputPath, int numMaps, int numRed, - String jobName, String mapSignalFilename, - String redSignalFilename) - throws IOException { - jobConf.setJobName(jobName); - jobConf.setInputFormat(NonSplitableSequenceFileInputFormat.class); - jobConf.setOutputFormat(SequenceFileOutputFormat.class); - FileInputFormat.setInputPaths(jobConf, inDir); - FileOutputFormat.setOutputPath(jobConf, outputPath); - jobConf.setMapperClass(UtilsForTests.HalfWaitingMapper.class); - jobConf.setReducerClass(IdentityReducer.class); - jobConf.setOutputKeyClass(BytesWritable.class); - jobConf.setOutputValueClass(BytesWritable.class); - jobConf.setInputFormat(RandomInputFormat.class); - jobConf.setNumMapTasks(numMaps); - jobConf.setNumReduceTasks(numRed); - jobConf.setJar("build/test/mapred/testjar/testjob.jar"); - jobConf.set(getTaskSignalParameter(true), mapSignalFilename); - jobConf.set(getTaskSignalParameter(false), redSignalFilename); - } - - /** - * Commonly used map and reduce classes - */ - - /** - * Map is a Mapper that just waits for a file to be created on the dfs. The - * file creation is a signal to the mappers and hence acts as a waiting job. - */ - - static class WaitingMapper - extends MapReduceBase - implements Mapper { - - FileSystem fs = null; - Path signal; - int id = 0; - int totalMaps = 0; - - /** - * Checks if the map task needs to wait. By default all the maps will wait. - * This method needs to be overridden to make a custom waiting mapper. - */ - public boolean shouldWait(int id) { - return true; - } - - /** - * Returns a signal file on which the map task should wait. By default all - * the maps wait on a single file passed as test.mapred.map.waiting.target. - * This method needs to be overridden to make a custom waiting mapper - */ - public Path getSignalFile(int id) { - return signal; - } - - /** The waiting function. The map exits once it gets a signal. Here the - * signal is the file existence. - */ - public void map(WritableComparable key, Writable val, - OutputCollector output, - Reporter reporter) - throws IOException { - if (shouldWait(id)) { - if (fs != null) { - while (!fs.exists(getSignalFile(id))) { - try { - reporter.progress(); - synchronized (this) { - this.wait(1000); // wait for 1 sec - } - } catch (InterruptedException ie) { - System.out.println("Interrupted while the map was waiting for " - + " the signal."); - break; - } - } - } else { - throw new IOException("Could not get the DFS!!"); - } - } - } - - public void configure(JobConf conf) { - try { - String taskId = conf.get(JobContext.TASK_ATTEMPT_ID); - id = Integer.parseInt(taskId.split("_")[4]); - totalMaps = Integer.parseInt(conf.get(JobContext.NUM_MAPS)); - fs = FileSystem.get(conf); - signal = new Path(conf.get(getTaskSignalParameter(true))); - } catch (IOException ioe) { - System.out.println("Got an exception while obtaining the filesystem"); - } - } - } - - /** Only the later half of the maps wait for the signal while the rest - * complete immediately. - */ - static class HalfWaitingMapper extends WaitingMapper { - @Override - public boolean shouldWait(int id) { - return id >= (totalMaps / 2); - } - } - - /** - * Reduce that just waits for a file to be created on the dfs. The - * file creation is a signal to the reduce. - */ - - static class WaitingReducer extends MapReduceBase - implements Reducer { - - FileSystem fs = null; - Path signal; - - /** The waiting function. The reduce exits once it gets a signal. Here the - * signal is the file existence. - */ - public void reduce(WritableComparable key, Iterator val, - OutputCollector output, - Reporter reporter) - throws IOException { - if (fs != null) { - while (!fs.exists(signal)) { - try { - reporter.progress(); - synchronized (this) { - this.wait(1000); // wait for 1 sec - } - } catch (InterruptedException ie) { - System.out.println("Interrupted while the map was waiting for the" - + " signal."); - break; - } - } - } else { - throw new IOException("Could not get the DFS!!"); - } - } - - public void configure(JobConf conf) { - try { - fs = FileSystem.get(conf); - signal = new Path(conf.get(getTaskSignalParameter(false))); - } catch (IOException ioe) { - System.out.println("Got an exception while obtaining the filesystem"); - } - } - } - - static String getTaskSignalParameter(boolean isMap) { - return isMap - ? "test.mapred.map.waiting.target" - : "test.mapred.reduce.waiting.target"; - } - - /** - * Signal the maps/reduces to start. - */ - static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, - String mapSignalFile, - String reduceSignalFile, int replication) - throws Exception { - writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), - (short)replication); - writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), - (short)replication); - } - - /** - * Signal the maps/reduces to start. - */ - static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, - boolean isMap, String mapSignalFile, - String reduceSignalFile) - throws Exception { - // signal the maps to complete - writeFile(dfs.getNameNode(), fileSys.getConf(), - isMap - ? new Path(mapSignalFile) - : new Path(reduceSignalFile), (short)1); - } - - static String getSignalFile(Path dir) { - return (new Path(dir, "signal")).toString(); - } - - static String getMapSignalFile(Path dir) { - return (new Path(dir, "map-signal")).toString(); - } - - static String getReduceSignalFile(Path dir) { - return (new Path(dir, "reduce-signal")).toString(); - } - - static void writeFile(NameNode namenode, Configuration conf, Path name, - short replication) throws Exception { - FileSystem fileSys = FileSystem.get(conf); - SequenceFile.Writer writer = - SequenceFile.createWriter(fileSys, conf, name, - BytesWritable.class, BytesWritable.class, - CompressionType.NONE); - writer.append(new BytesWritable(), new BytesWritable()); - writer.close(); - fileSys.setReplication(name, replication); - DFSTestUtil.waitReplication(fileSys, name, replication); - } - - // Input formats - /** - * A custom input format that creates virtual inputs of a single string - * for each map. - */ - public static class RandomInputFormat implements InputFormat { - - public InputSplit[] getSplits(JobConf job, - int numSplits) throws IOException { - InputSplit[] result = new InputSplit[numSplits]; - Path outDir = FileOutputFormat.getOutputPath(job); - for(int i=0; i < result.length; ++i) { - result[i] = new FileSplit(new Path(outDir, "dummy-split-" + i), - 0, 1, (String[])null); - } - return result; - } - - static class RandomRecordReader implements RecordReader { - Path name; - public RandomRecordReader(Path p) { - name = p; - } - public boolean next(Text key, Text value) { - if (name != null) { - key.set(name.getName()); - name = null; - return true; - } - return false; - } - public Text createKey() { - return new Text(); - } - public Text createValue() { - return new Text(); - } - public long getPos() { - return 0; - } - public void close() {} - public float getProgress() { - return 0.0f; - } - } - - public RecordReader getRecordReader(InputSplit split, - JobConf job, - Reporter reporter) - throws IOException { - return new RandomRecordReader(((FileSplit) split).getPath()); - } - } - - // Start a job and return its RunningJob object - static RunningJob runJob(JobConf conf, Path inDir, Path outDir) - throws IOException { - return runJob(conf, inDir, outDir, conf.getNumMapTasks(), conf.getNumReduceTasks()); - } - - // Start a job and return its RunningJob object - static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps, - int numReds) throws IOException { - - String input = "The quick brown fox\n" + "has many silly\n" - + "red fox sox\n"; - - // submit the job and wait for it to complete - return runJob(conf, inDir, outDir, numMaps, numReds, input); - } - - // Start a job with the specified input and return its RunningJob object - static RunningJob runJob(JobConf conf, Path inDir, Path outDir, int numMaps, - int numReds, String input) throws IOException { - FileSystem fs = FileSystem.get(conf); - if (fs.exists(outDir)) { - fs.delete(outDir, true); - } - if (!fs.exists(inDir)) { - fs.mkdirs(inDir); - } - - for (int i = 0; i < numMaps; ++i) { - DataOutputStream file = fs.create(new Path(inDir, "part-" + i)); - file.writeBytes(input); - file.close(); - } - - conf.setInputFormat(TextInputFormat.class); - conf.setOutputKeyClass(LongWritable.class); - conf.setOutputValueClass(Text.class); - - FileInputFormat.setInputPaths(conf, inDir); - FileOutputFormat.setOutputPath(conf, outDir); - conf.setNumMapTasks(numMaps); - conf.setNumReduceTasks(numReds); - - JobClient jobClient = new JobClient(conf); - RunningJob job = jobClient.submitJob(conf); - - return job; - } - - // Run a job that will be succeeded and wait until it completes - public static RunningJob runJobSucceed(JobConf conf, Path inDir, Path outDir) - throws IOException { - conf.setJobName("test-job-succeed"); - conf.setMapperClass(IdentityMapper.class); - conf.setReducerClass(IdentityReducer.class); - - RunningJob job = UtilsForTests.runJob(conf, inDir, outDir); - while (!job.isComplete()) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - - return job; - } - - // Run a job that will be failed and wait until it completes - public static RunningJob runJobFail(JobConf conf, Path inDir, Path outDir) - throws IOException { - conf.setJobName("test-job-fail"); - conf.setMapperClass(FailMapper.class); - conf.setReducerClass(IdentityReducer.class); - conf.setMaxMapAttempts(1); - - RunningJob job = UtilsForTests.runJob(conf, inDir, outDir); - while (!job.isComplete()) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - - return job; - } - - // Run a job that will be killed and wait until it completes - public static RunningJob runJobKill(JobConf conf, Path inDir, Path outDir) - throws IOException { - - conf.setJobName("test-job-kill"); - conf.setMapperClass(KillMapper.class); - conf.setReducerClass(IdentityReducer.class); - - RunningJob job = UtilsForTests.runJob(conf, inDir, outDir); - while (job.getJobState() != JobStatus.RUNNING) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - break; - } - } - job.killJob(); - while (job.cleanupProgress() == 0.0f) { - try { - Thread.sleep(10); - } catch (InterruptedException ie) { - break; - } - } - - return job; - } - - /** - * Cleans up files/dirs inline. CleanupQueue deletes in a separate thread - * asynchronously. - */ - public static class InlineCleanupQueue extends CleanupQueue { - List stalePaths = new ArrayList(); - - public InlineCleanupQueue() { - // do nothing - } - - @Override - public void addToQueue(PathDeletionContext... contexts) { - // delete paths in-line - for (PathDeletionContext context : contexts) { - try { - if (!deletePath(context)) { - LOG.warn("Stale path " + context.fullPath); - stalePaths.add(context.fullPath); - } - } catch (IOException e) { - LOG.warn("Caught exception while deleting path " - + context.fullPath); - LOG.info(StringUtils.stringifyException(e)); - stalePaths.add(context.fullPath); - } - } - } - } - - static class FakeClock extends Clock { - long time = 0; - - public void advance(long millis) { - time += millis; - } - - @Override - long getTime() { - return time; - } - } - // Mapper that fails - static class FailMapper extends MapReduceBase implements - Mapper { - - public void map(WritableComparable key, Writable value, - OutputCollector out, Reporter reporter) - throws IOException { - //NOTE- the next line is required for the TestDebugScript test to succeed - System.err.println("failing map"); - throw new RuntimeException("failing map"); - } - } - - // Mapper that sleeps for a long time. - // Used for running a job that will be killed - static class KillMapper extends MapReduceBase implements - Mapper { - - public void map(WritableComparable key, Writable value, - OutputCollector out, Reporter reporter) - throws IOException { - - try { - Thread.sleep(1000000); - } catch (InterruptedException e) { - // Do nothing - } - } - } - - static void setUpConfigFile(Properties confProps, File configFile) - throws IOException { - Configuration config = new Configuration(false); - FileOutputStream fos = new FileOutputStream(configFile); - - for (Enumeration e = confProps.propertyNames(); e.hasMoreElements();) { - String key = (String) e.nextElement(); - config.set(key, confProps.getProperty(key)); - } - - config.writeXml(fos); - fos.close(); - } - - static JobTracker getJobTracker() { - JobTracker jt = new JobTracker(); - return jt; - } - - /** - * This creates a file in the dfs - * @param dfs FileSystem Local File System where file needs to be picked - * @param URIPATH Path dfs path where file needs to be copied - * @param permission FsPermission File permission - * @return returns the DataOutputStream - */ - public static DataOutputStream - createTmpFileDFS(FileSystem dfs, Path URIPATH, - FsPermission permission, String input) throws Exception { - //Creating the path with the file - DataOutputStream file = - FileSystem.create(dfs, URIPATH, permission); - file.writeBytes(input); - file.close(); - return file; - } - - /** - * This formats the long tasktracker name to just the FQDN - * @param taskTrackerLong String The long format of the tasktracker string - * @return String The FQDN of the tasktracker - * @throws Exception - */ - public static String getFQDNofTT (String taskTrackerLong) throws Exception { - //Getting the exact FQDN of the tasktracker from the tasktracker string. - String[] firstSplit = taskTrackerLong.split("_"); - String tmpOutput = firstSplit[1]; - String[] secondSplit = tmpOutput.split(":"); - String tmpTaskTracker = secondSplit[0]; - return tmpTaskTracker; - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/WordCount.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/WordCount.java deleted file mode 100644 index 60d29001924..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/WordCount.java +++ /dev/null @@ -1,159 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.StringTokenizer; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.FileOutputFormat; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MapReduceBase; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.mapred.Reducer; -import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; - -/** - * This is an example Hadoop Map/Reduce application. - * It reads the text input files, breaks each line into words - * and counts them. The output is a locally sorted list of words and the - * count of how often they occurred. - * - * To run: bin/hadoop jar build/hadoop-examples.jar wordcount - * [-m maps] [-r reduces] in-dir out-dir - */ -public class WordCount extends Configured implements Tool { - - /** - * Counts the words in each line. - * For each line of input, break the line into words and emit them as - * (word, 1). - */ - public static class MapClass extends MapReduceBase - implements Mapper { - - private final static IntWritable one = new IntWritable(1); - private Text word = new Text(); - - public void map(LongWritable key, Text value, - OutputCollector output, - Reporter reporter) throws IOException { - String line = value.toString(); - StringTokenizer itr = new StringTokenizer(line); - while (itr.hasMoreTokens()) { - word.set(itr.nextToken()); - output.collect(word, one); - } - } - } - - /** - * A reducer class that just emits the sum of the input values. - */ - public static class Reduce extends MapReduceBase - implements Reducer { - - public void reduce(Text key, Iterator values, - OutputCollector output, - Reporter reporter) throws IOException { - int sum = 0; - while (values.hasNext()) { - sum += values.next().get(); - } - output.collect(key, new IntWritable(sum)); - } - } - - static int printUsage() { - System.out.println("wordcount [-m ] [-r ] "); - ToolRunner.printGenericCommandUsage(System.out); - return -1; - } - - /** - * The main driver for word count map/reduce program. - * Invoke this method to submit the map/reduce job. - * @throws IOException When there is communication problems with the - * job tracker. - */ - public int run(String[] args) throws Exception { - JobConf conf = new JobConf(getConf(), WordCount.class); - conf.setJobName("wordcount"); - - // the keys are words (strings) - conf.setOutputKeyClass(Text.class); - // the values are counts (ints) - conf.setOutputValueClass(IntWritable.class); - - conf.setMapperClass(MapClass.class); - conf.setCombinerClass(Reduce.class); - conf.setReducerClass(Reduce.class); - - List other_args = new ArrayList(); - for(int i=0; i < args.length; ++i) { - try { - if ("-m".equals(args[i])) { - conf.setNumMapTasks(Integer.parseInt(args[++i])); - } else if ("-r".equals(args[i])) { - conf.setNumReduceTasks(Integer.parseInt(args[++i])); - } else { - other_args.add(args[i]); - } - } catch (NumberFormatException except) { - System.out.println("ERROR: Integer expected instead of " + args[i]); - return printUsage(); - } catch (ArrayIndexOutOfBoundsException except) { - System.out.println("ERROR: Required parameter missing from " + - args[i-1]); - return printUsage(); - } - } - // Make sure there are exactly 2 parameters left. - if (other_args.size() != 2) { - System.out.println("ERROR: Wrong number of parameters: " + - other_args.size() + " instead of 2."); - return printUsage(); - } - FileInputFormat.setInputPaths(conf, other_args.get(0)); - FileOutputFormat.setOutputPath(conf, new Path(other_args.get(1))); - - JobClient.runJob(conf); - return 0; - } - - - public static void main(String[] args) throws Exception { - int res = ToolRunner.run(new Configuration(), new WordCount(), args); - System.exit(res); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/concat.bz2 b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/concat.bz2 deleted file mode 100644 index f31fb0c32bb..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/concat.bz2 and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/concat.gz b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/concat.gz deleted file mode 100644 index 53d5a07fcae..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/concat.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java deleted file mode 100644 index d160de5db61..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapred.jobcontrol; - -import java.io.IOException; -import java.text.NumberFormat; -import java.util.Iterator; -import java.util.List; -import java.util.Random; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.FileOutputFormat; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MapReduceBase; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.mapred.Reducer; -import org.apache.hadoop.mapred.Reporter; - -/** - * Utility methods used in various Job Control unit tests. - */ -public class JobControlTestUtils { - - static private Random rand = new Random(); - - private static NumberFormat idFormat = NumberFormat.getInstance(); - - static { - idFormat.setMinimumIntegerDigits(4); - idFormat.setGroupingUsed(false); - } - - /** - * Cleans the data from the passed Path in the passed FileSystem. - * - * @param fs FileSystem to delete data from. - * @param dirPath Path to be deleted. - * @throws IOException If an error occurs cleaning the data. - */ - static void cleanData(FileSystem fs, Path dirPath) throws IOException { - fs.delete(dirPath, true); - } - - /** - * Generates a string of random digits. - * - * @return A random string. - */ - private static String generateRandomWord() { - return idFormat.format(rand.nextLong()); - } - - /** - * Generates a line of random text. - * - * @return A line of random text. - */ - private static String generateRandomLine() { - long r = rand.nextLong() % 7; - long n = r + 20; - StringBuffer sb = new StringBuffer(); - for (int i = 0; i < n; i++) { - sb.append(generateRandomWord()).append(" "); - } - sb.append("\n"); - return sb.toString(); - } - - /** - * Generates data that can be used for Job Control tests. - * - * @param fs FileSystem to create data in. - * @param dirPath Path to create the data in. - * @throws IOException If an error occurs creating the data. - */ - static void generateData(FileSystem fs, Path dirPath) throws IOException { - FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt")); - for (int i = 0; i < 10000; i++) { - String line = generateRandomLine(); - out.write(line.getBytes("UTF-8")); - } - out.close(); - } - - /** - * Creates a simple copy job. - * - * @param indirs List of input directories. - * @param outdir Output directory. - * @return JobConf initialised for a simple copy job. - * @throws Exception If an error occurs creating job configuration. - */ - static JobConf createCopyJob(List indirs, Path outdir) throws Exception { - - Configuration defaults = new Configuration(); - JobConf theJob = new JobConf(defaults, TestJobControl.class); - theJob.setJobName("DataMoveJob"); - - FileInputFormat.setInputPaths(theJob, indirs.toArray(new Path[0])); - theJob.setMapperClass(DataCopy.class); - FileOutputFormat.setOutputPath(theJob, outdir); - theJob.setOutputKeyClass(Text.class); - theJob.setOutputValueClass(Text.class); - theJob.setReducerClass(DataCopy.class); - theJob.setNumMapTasks(12); - theJob.setNumReduceTasks(4); - return theJob; - } - - /** - * Simple Mapper and Reducer implementation which copies data it reads in. - */ - public static class DataCopy extends MapReduceBase implements - Mapper, Reducer { - public void map(LongWritable key, Text value, OutputCollector output, - Reporter reporter) throws IOException { - output.collect(new Text(key.toString()), value); - } - - public void reduce(Text key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { - Text dumbKey = new Text(""); - while (values.hasNext()) { - Text data = values.next(); - output.collect(dumbKey, data); - } - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.jar b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.jar deleted file mode 100644 index 07539b138db..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.jar and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tar b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tar deleted file mode 100644 index b06a0aeaa4a..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tar and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz deleted file mode 100644 index 68974aefcc7..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tar.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tgz b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tgz deleted file mode 100644 index 699dbc9642e..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.tgz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.txt b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.txt deleted file mode 100644 index 36703f52e13..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.txt +++ /dev/null @@ -1 +0,0 @@ -This is a test file used for testing caching jars, zip and normal files. diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.zip b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.zip deleted file mode 100644 index 3cf9cc1c3db..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/test.zip and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2 b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2 deleted file mode 100644 index a21c0e2c10b..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.bz2 and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz deleted file mode 100644 index 75e5f8c7f74..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testCompressThenConcat.txt.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2 b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2 deleted file mode 100644 index 5983e52cc03..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.bz2 and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz deleted file mode 100644 index 6e8eaa56f78..00000000000 Binary files a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testConcatThenCompress.txt.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testscript.txt b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testscript.txt deleted file mode 100644 index a9dd99db5d2..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/testscript.txt +++ /dev/null @@ -1,2 +0,0 @@ -echo 'Test Script' -cat $2 diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java deleted file mode 100644 index de2fcc4a730..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestJobACLs.java +++ /dev/null @@ -1,463 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce; - -import java.io.IOException; -import java.security.PrivilegedExceptionAction; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobInProgress; -import org.apache.hadoop.mapred.JobTracker; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.Operation; -import static org.apache.hadoop.mapred.QueueManagerTestUtils.*; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.Before; -import org.junit.Test; -import org.junit.After; -import static org.junit.Assert.fail; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * Verify the job-ACLs - * - */ -public class TestJobACLs { - - static final Log LOG = LogFactory.getLog(TestJobACLs.class); - - private MiniMRCluster mr = null; - - private static final Path TEST_DIR = - new Path(System.getProperty("test.build.data", "/tmp"), - TestJobACLs.class.getCanonicalName() + Path.SEPARATOR - + "completed-job-store"); - - private String jobSubmitter = "jobSubmitter"; - private String viewColleague = "viewColleague"; - private String modifyColleague = "modifyColleague"; - private String qAdmin = "qAdmin"; - - /** - * Start the cluster before running the actual test. - * - * @throws IOException - */ - @Before - public void setup() throws Exception { - // Start the cluster - startCluster(false); - } - - private void startCluster(boolean reStart) throws Exception { - - // Configure job queues - String[] queueNames = {"default"}; - createQueuesConfigFile(queueNames, - new String[] { jobSubmitter }, new String[] { qAdmin }); - - JobConf conf = new JobConf(); - - // Enable queue and job level authorization - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - - // Enable CompletedJobStore - FileSystem fs = FileSystem.getLocal(conf); - if (!reStart) { - fs.delete(TEST_DIR, true); - } - conf.set(JTConfig.JT_PERSIST_JOBSTATUS_DIR, - fs.makeQualified(TEST_DIR).toString()); - conf.setBoolean(JTConfig.JT_PERSIST_JOBSTATUS, true); - conf.set(JTConfig.JT_PERSIST_JOBSTATUS_HOURS, "1"); - - UserGroupInformation MR_UGI = UserGroupInformation.getLoginUser(); - mr = new MiniMRCluster(0, 0, 1, "file:///", 1, null, null, MR_UGI, conf); - } - - /** - * Kill the cluster after the test is done. - */ - @After - public void tearDown() { - deleteQueuesConfigFile(); - if (mr != null) { - mr.shutdown(); - } - } - - /** - * Test view-job-acl, modify-job-acl and acl persistence to the - * completed-jobs-store. - * - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException - */ - @Test - public void testACLS() throws Exception { - verifyACLViewJob(); - verifyACLModifyJob(modifyColleague); - verifyACLModifyJob(qAdmin); - verifyACLPersistence(); - } - - /** - * Verify JobContext.JOB_ACL_VIEW_JOB - * - * @throws IOException - * @throws InterruptedException - */ - private void verifyACLViewJob() throws IOException, InterruptedException { - - // Set the job up. - final Configuration myConf = mr.createJobConf(); - myConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, viewColleague); - - // Submit the job as user1 - Job job = submitJobAsUser(myConf, jobSubmitter); - - final JobID jobId = job.getJobID(); - - // Try operations as an unauthorized user. - verifyViewJobAsUnauthorizedUser(myConf, jobId, modifyColleague); - - // Try operations as an authorized user, who is part of view-job-acl. - verifyViewJobAsAuthorizedUser(myConf, jobId, viewColleague); - - // Try operations as an authorized user, who is a queue administrator. - verifyViewJobAsAuthorizedUser(myConf, jobId, qAdmin); - - // Clean up the job - job.killJob(); - } - - /** - * Submits a sleep job with 1 map task that runs for a long time(60 sec) and - * wait for the job to go into RUNNING state. - * @param clusterConf - * @param user the jobOwner - * @return Job that is started - * @throws IOException - * @throws InterruptedException - */ - private Job submitJobAsUser(final Configuration clusterConf, String user) - throws IOException, InterruptedException { - UserGroupInformation ugi = - UserGroupInformation.createUserForTesting(user, new String[] {}); - Job job = (Job) ugi.doAs(new PrivilegedExceptionAction() { - @Override - public Object run() throws Exception { - SleepJob sleepJob = new SleepJob(); - sleepJob.setConf(clusterConf); - // Disable setup/cleanup tasks at the job level - sleepJob.getConf().setBoolean(MRJobConfig.SETUP_CLEANUP_NEEDED, false); - Job myJob = sleepJob.createJob(1, 0, 60000, 1, 1, 1); - myJob.submit(); - return myJob; - } - }); - - // Make the job go into RUNNING state by forceful initialization. - JobTracker jt = mr.getJobTrackerRunner().getJobTracker(); - JobInProgress jip = - jt.getJob(org.apache.hadoop.mapred.JobID.downgrade(job.getJobID())); - jt.initJob(jip); - - return job; - } - - private void verifyViewJobAsAuthorizedUser(final Configuration myConf, - final JobID jobId, String authorizedUser) throws IOException, - InterruptedException { - UserGroupInformation authorizedUGI = - UserGroupInformation.createUserForTesting(authorizedUser, - new String[] {}); - authorizedUGI.doAs(new PrivilegedExceptionAction() { - @SuppressWarnings("null") - @Override - public Object run() throws Exception { - Job myJob = null; - try { - Cluster cluster = new Cluster(myConf); - myJob = cluster.getJob(jobId); - } catch (Exception e) { - fail("Exception .." + e); - } - - assertNotNull("Job " + jobId + " is not known to the JobTracker!", - myJob); - - // Tests authorization with getCounters - try { - myJob.getCounters(); - } catch (IOException ioe) { - fail("Unexpected.. exception.. " + ioe); - } - - // Tests authorization with getTaskReports - try { - myJob.getTaskReports(TaskType.JOB_CLEANUP); - } catch (IOException ioe) { - fail("Unexpected.. exception.. " + ioe); - } - - return null; - } - }); - } - - private void verifyViewJobAsUnauthorizedUser(final Configuration myConf, - final JobID jobId, String unauthorizedUser) throws IOException, - InterruptedException { - UserGroupInformation unauthorizedUGI = - UserGroupInformation.createUserForTesting(unauthorizedUser, - new String[] {}); - unauthorizedUGI.doAs(new PrivilegedExceptionAction() { - @SuppressWarnings("null") - @Override - public Object run() { - Job myJob = null; - try { - Cluster cluster = new Cluster(myConf); - myJob = cluster.getJob(jobId); - } catch (Exception e) { - fail("Exception .." + e); - } - - assertNotNull("Job " + jobId + " is not known to the JobTracker!", - myJob); - - // Tests authorization failure with getCounters - try { - myJob.getCounters(); - fail("AccessControlException expected.."); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains( - " cannot perform operation " + JobACL.VIEW_JOB)); - } catch (InterruptedException e) { - fail("Exception .. interrupted.." + e); - } - - // Tests authorization failure with getTaskReports - try { - myJob.getTaskReports(TaskType.JOB_SETUP); - fail("AccessControlException expected.."); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains( - " cannot perform operation " + JobACL.VIEW_JOB)); - } catch (InterruptedException e) { - fail("Exception .. interrupted.." + e); - } - - return null; - } - }); - } - - /** - * Verify MRConfig.Job_ACL_MODIFY_JOB - * - * @throws IOException - * @throws InterruptedException - * @throws ClassNotFoundException - */ - private void verifyACLModifyJob(String authorizedUser) throws IOException, - InterruptedException, ClassNotFoundException { - - // Set the job up. - final Configuration myConf = mr.createJobConf(); - myConf.set(MRJobConfig.JOB_ACL_MODIFY_JOB, modifyColleague); - - // Submit the job as user1 - Job job = submitJobAsUser(myConf, jobSubmitter); - - final JobID jobId = job.getJobID(); - - // Try operations as an unauthorized user. - verifyModifyJobAsUnauthorizedUser(myConf, jobId, viewColleague); - - // Try operations as an authorized user. - verifyModifyJobAsAuthorizedUser(myConf, jobId, authorizedUser); - } - - private void verifyModifyJobAsAuthorizedUser( - final Configuration clusterConf, final JobID jobId, - String authorizedUser) throws IOException, InterruptedException { - UserGroupInformation authorizedUGI = - UserGroupInformation.createUserForTesting(authorizedUser, - new String[] {}); - authorizedUGI.doAs(new PrivilegedExceptionAction() { - @SuppressWarnings("null") - @Override - public Object run() throws Exception { - Job myJob = null; - try { - Cluster cluster = new Cluster(clusterConf); - myJob = cluster.getJob(jobId); - } catch (Exception e) { - fail("Exception .." + e); - } - - assertNotNull("Job " + jobId + " is not known to the JobTracker!", - myJob); - - // Test authorization success with setJobPriority - try { - myJob.setPriority(JobPriority.HIGH); - assertEquals(myJob.getPriority(), JobPriority.HIGH); - } catch (IOException ioe) { - fail("Unexpected.. exception.. " + ioe); - } - - // Test authorization success with killJob - try { - myJob.killJob(); - } catch (IOException ioe) { - fail("Unexpected.. exception.. " + ioe); - } - - return null; - } - }); - } - - private void verifyModifyJobAsUnauthorizedUser( - final Configuration clusterConf, final JobID jobId, - String unauthorizedUser) throws IOException, InterruptedException { - UserGroupInformation unauthorizedUGI = - UserGroupInformation.createUserForTesting(unauthorizedUser, - new String[] {}); - unauthorizedUGI.doAs(new PrivilegedExceptionAction() { - @SuppressWarnings("null") - @Override - public Object run() { - Job myJob = null; - try { - Cluster cluster = new Cluster(clusterConf); - myJob = cluster.getJob(jobId); - } catch (Exception e) { - fail("Exception .." + e); - } - - assertNotNull("Job " + jobId + " is not known to the JobTracker!", - myJob); - - // Tests authorization failure with killJob - try { - myJob.killJob(); - fail("AccessControlException expected.."); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains( - " cannot perform operation " + Operation.KILL_JOB)); - } catch (InterruptedException e) { - fail("Exception .. interrupted.." + e); - } - - // Tests authorization failure with setJobPriority - try { - myJob.setPriority(JobPriority.HIGH); - fail("AccessControlException expected.."); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains( - " cannot perform operation " + Operation.SET_JOB_PRIORITY)); - } catch (InterruptedException e) { - fail("Exception .. interrupted.." + e); - } - - return null; - } - }); - } - - private void verifyACLPersistence() throws Exception { - - // Set the job up. - final Configuration myConf = mr.createJobConf(); - myConf.set(MRJobConfig.JOB_ACL_VIEW_JOB, viewColleague + " group2"); - - // Submit the job as user1 - Job job = submitJobAsUser(myConf, jobSubmitter); - - final JobID jobId = job.getJobID(); - - // Kill the job and wait till it is actually killed so that it is written to - // CompletedJobStore - job.killJob(); - while (job.getJobState() != JobStatus.State.KILLED) { - LOG.info("Waiting for the job to be killed successfully.."); - Thread.sleep(200); - } - - // Now kill the cluster, so that the job is 'forgotten' - tearDown(); - - // Re-start the cluster - startCluster(true); - - final Configuration myNewJobConf = mr.createJobConf(); - // Now verify view-job works off CompletedJobStore - verifyViewJobAsAuthorizedUser(myNewJobConf, jobId, viewColleague); - verifyViewJobAsAuthorizedUser(myNewJobConf, jobId, qAdmin); - - // Only JobCounters is persisted on the JobStore. So test counters only. - UserGroupInformation unauthorizedUGI = - UserGroupInformation.createUserForTesting( - modifyColleague, new String[] {}); - - unauthorizedUGI.doAs(new PrivilegedExceptionAction() { - @SuppressWarnings("null") - @Override - public Object run() { - Job myJob = null; - try { - Cluster cluster = new Cluster(myNewJobConf); - myJob = cluster.getJob(jobId); - } catch (Exception e) { - fail("Exception .." + e); - } - - assertNotNull("Job " + jobId + " is not known to the JobTracker!", - myJob); - - // Tests authorization failure with getCounters - try { - myJob.getCounters(); - fail("AccessControlException expected.."); - } catch (IOException ioe) { - assertTrue(ioe.getMessage().contains( - " cannot perform operation " + Operation.VIEW_JOB_COUNTERS)); - } catch (InterruptedException e) { - fail("Exception .. interrupted.." + e); - } - - return null; - } - }); - - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java deleted file mode 100644 index 638f5f27988..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestMapReduceLocal.java +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce; - -import java.io.BufferedReader; -import java.io.DataInputStream; -import java.io.DataOutputStream; -import java.io.IOException; -import java.io.InputStreamReader; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.examples.MultiFileWordCount; -import org.apache.hadoop.examples.WordCount; -import org.apache.hadoop.examples.WordCount.IntSumReducer; -import org.apache.hadoop.examples.WordCount.TokenizerMapper; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; -import org.apache.hadoop.mapreduce.lib.input.FileInputFormatCounter; -import org.apache.hadoop.mapreduce.lib.input.LineRecordReader; -import org.apache.hadoop.mapreduce.lib.input.TextInputFormat; -import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; -import org.apache.hadoop.util.ToolRunner; - -/** - * A JUnit test to test min map-reduce cluster with local file system. - */ -public class TestMapReduceLocal extends TestCase { - private static Path TEST_ROOT_DIR = - new Path(System.getProperty("test.build.data","/tmp")); - private static Configuration conf = new Configuration(); - private static FileSystem localFs; - static { - try { - localFs = FileSystem.getLocal(conf); - } catch (IOException io) { - throw new RuntimeException("problem getting local fs", io); - } - } - - public static Path writeFile(String name, String data) throws IOException { - Path file = new Path(TEST_ROOT_DIR + "/" + name); - localFs.delete(file, false); - DataOutputStream f = localFs.create(file); - f.write(data.getBytes()); - f.close(); - return file; - } - - public static String readFile(String name) throws IOException { - DataInputStream f = localFs.open(new Path(TEST_ROOT_DIR + "/" + name)); - BufferedReader b = new BufferedReader(new InputStreamReader(f)); - StringBuilder result = new StringBuilder(); - String line = b.readLine(); - while (line != null) { - result.append(line); - result.append('\n'); - line = b.readLine(); - } - b.close(); - return result.toString(); - } - - public void testWithLocal() throws Exception { - MiniMRCluster mr = null; - try { - mr = new MiniMRCluster(2, "file:///", 3); - Configuration conf = mr.createJobConf(); - runWordCount(conf); - runMultiFileWordCount(conf); - } finally { - if (mr != null) { mr.shutdown(); } - } - } - - public static class TrackingTextInputFormat extends TextInputFormat { - - public static class MonoProgressRecordReader extends LineRecordReader { - private float last = 0.0f; - private boolean progressCalled = false; - @Override - public float getProgress() throws IOException { - progressCalled = true; - final float ret = super.getProgress(); - assertTrue("getProgress decreased", ret >= last); - last = ret; - return ret; - } - @Override - public synchronized void close() throws IOException { - assertTrue("getProgress never called", progressCalled); - super.close(); - } - } - - @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) { - return new MonoProgressRecordReader(); - } - } - - private void runWordCount(Configuration conf - ) throws IOException, - InterruptedException, - ClassNotFoundException { - final String COUNTER_GROUP = "org.apache.hadoop.mapreduce.TaskCounter"; - localFs.delete(new Path(TEST_ROOT_DIR + "/in"), true); - localFs.delete(new Path(TEST_ROOT_DIR + "/out"), true); - writeFile("in/part1", "this is a test\nof word count test\ntest\n"); - writeFile("in/part2", "more test"); - Job job = Job.getInstance(conf, "word count"); - job.setJarByClass(WordCount.class); - job.setMapperClass(TokenizerMapper.class); - job.setCombinerClass(IntSumReducer.class); - job.setReducerClass(IntSumReducer.class); - job.setOutputKeyClass(Text.class); - job.setOutputValueClass(IntWritable.class); - job.setInputFormatClass(TrackingTextInputFormat.class); - FileInputFormat.addInputPath(job, new Path(TEST_ROOT_DIR + "/in")); - FileOutputFormat.setOutputPath(job, new Path(TEST_ROOT_DIR + "/out")); - assertTrue(job.waitForCompletion(false)); - String out = readFile("out/part-r-00000"); - System.out.println(out); - assertEquals("a\t1\ncount\t1\nis\t1\nmore\t1\nof\t1\ntest\t4\nthis\t1\nword\t1\n", - out); - Counters ctrs = job.getCounters(); - System.out.println("Counters: " + ctrs); - long mapIn = ctrs.findCounter(FileInputFormatCounter.BYTES_READ).getValue(); - assertTrue(mapIn != 0); - long combineIn = ctrs.findCounter(COUNTER_GROUP, - "COMBINE_INPUT_RECORDS").getValue(); - long combineOut = ctrs.findCounter(COUNTER_GROUP, - "COMBINE_OUTPUT_RECORDS").getValue(); - long reduceIn = ctrs.findCounter(COUNTER_GROUP, - "REDUCE_INPUT_RECORDS").getValue(); - long mapOut = ctrs.findCounter(COUNTER_GROUP, - "MAP_OUTPUT_RECORDS").getValue(); - long reduceOut = ctrs.findCounter(COUNTER_GROUP, - "REDUCE_OUTPUT_RECORDS").getValue(); - long reduceGrps = ctrs.findCounter(COUNTER_GROUP, - "REDUCE_INPUT_GROUPS").getValue(); - long mergedMapOutputs = ctrs.findCounter(COUNTER_GROUP, - "MERGED_MAP_OUTPUTS").getValue(); - long shuffledMaps = ctrs.findCounter(COUNTER_GROUP, - "SHUFFLED_MAPS").getValue(); - assertEquals("map out = combine in", mapOut, combineIn); - assertEquals("combine out = reduce in", combineOut, reduceIn); - assertTrue("combine in > combine out", combineIn > combineOut); - assertEquals("reduce groups = reduce out", reduceGrps, reduceOut); - assertEquals("Mismatch in mergedMapOutputs", mergedMapOutputs, 2); - assertEquals("Mismatch in shuffledMaps", shuffledMaps, 2); - String group = "Random Group"; - CounterGroup ctrGrp = ctrs.getGroup(group); - assertEquals(0, ctrGrp.size()); - } - - public void runMultiFileWordCount(Configuration conf) throws Exception { - localFs.delete(new Path(TEST_ROOT_DIR + "/in"), true); - localFs.delete(new Path(TEST_ROOT_DIR + "/out"), true); - writeFile("in/part1", "this is a test\nof " + - "multi file word count test\ntest\n"); - writeFile("in/part2", "more test"); - - int ret = ToolRunner.run(conf, new MultiFileWordCount(), - new String[] {TEST_ROOT_DIR + "/in", TEST_ROOT_DIR + "/out"}); - assertTrue("MultiFileWordCount failed", ret == 0); - String out = readFile("out/part-r-00000"); - System.out.println(out); - assertEquals("a\t1\ncount\t1\nfile\t1\nis\t1\n" + - "more\t1\nmulti\t1\nof\t1\ntest\t4\nthis\t1\nword\t1\n", out); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java deleted file mode 100644 index 8f44754f5ef..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/filecache/TestTrackerDistributedCacheManager.java +++ /dev/null @@ -1,1019 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.filecache; - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.security.PrivilegedExceptionAction; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import javax.security.auth.login.LoginException; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapred.DefaultTaskController; -import org.apache.hadoop.mapred.TaskController; -import org.apache.hadoop.mapred.TaskTracker; -import org.apache.hadoop.mapreduce.Cluster; -import org.apache.hadoop.mapreduce.Job; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.LocalDirAllocator; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.mapreduce.filecache.TaskDistributedCacheManager; -import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager; -import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager.CacheStatus; -import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.ReflectionUtils; -import org.mortbay.log.Log; - -import org.mockito.Matchers; -import static org.mockito.Mockito.*; - -public class TestTrackerDistributedCacheManager extends TestCase { - - protected String TEST_ROOT_DIR = - new File(System.getProperty("test.build.data", "/tmp"), - TestTrackerDistributedCacheManager.class.getSimpleName()) - .getAbsolutePath(); - - protected File ROOT_MAPRED_LOCAL_DIR; - protected int numLocalDirs = 6; - - private static final int TEST_FILE_SIZE = 4 * 1024; // 4K - private static final int LOCAL_CACHE_LIMIT = 5 * 1024; //5K - private static final int LOCAL_CACHE_SUBDIR = 2; - protected Configuration conf; - protected Path firstCacheFile; - protected Path secondCacheFile; - private FileSystem fs; - - protected LocalDirAllocator localDirAllocator = - new LocalDirAllocator(MRConfig.LOCAL_DIR); - protected TaskController taskController; - - @Override - protected void setUp() throws IOException,InterruptedException { - - // Prepare the tests' root dir - File TEST_ROOT = new File(TEST_ROOT_DIR); - if (!TEST_ROOT.exists()) { - TEST_ROOT.mkdirs(); - } - - conf = new Configuration(); - conf.set(FileSystem.FS_DEFAULT_NAME_KEY, "file:///"); - fs = FileSystem.get(conf); - - // This test suite will fail if any ancestor directory of the - // test directory is not world-searchable (ie +x). - // We prefer to fail the test in an obvious manner up front - // during setUp() rather than in a subtle way later. - assertTrue("Test root directory " + TEST_ROOT + " and all of its " + - "parent directories must have a+x permissions", - ClientDistributedCacheManager.ancestorsHaveExecutePermissions( - fs, new Path(TEST_ROOT.toString()))); - - // Prepare the tests' mapred-local-dir - ROOT_MAPRED_LOCAL_DIR = new File(TEST_ROOT_DIR, "mapred/local"); - ROOT_MAPRED_LOCAL_DIR.mkdirs(); - - String []localDirs = new String[numLocalDirs]; - for (int i = 0; i < numLocalDirs; i++) { - File localDir = new File(ROOT_MAPRED_LOCAL_DIR, "0_" + i); - localDirs[i] = localDir.getPath(); - localDir.mkdir(); - } - - conf.setStrings(MRConfig.LOCAL_DIR, localDirs); - Class taskControllerClass = conf.getClass( - TTConfig.TT_TASK_CONTROLLER, DefaultTaskController.class, - TaskController.class); - taskController = (TaskController) ReflectionUtils.newInstance( - taskControllerClass, conf); - - // setup permissions for mapred local dir - taskController.setup(); - - // Create the temporary cache files to be used in the tests. - firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile"); - secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile"); - createPrivateTempFile(firstCacheFile); - createPrivateTempFile(secondCacheFile); - } - - protected void refreshConf(Configuration conf) throws IOException { - taskController.setConf(conf); - taskController.setup(); - } - - /** - * Whether the test can run on the machine - * - * @return true if test can run on the machine, false otherwise - */ - protected boolean canRun() { - return true; - } - - /** - * This is the typical flow for using the DistributedCache classes. - * - * @throws IOException - * @throws LoginException - */ - public void testManagerFlow() throws IOException, LoginException { - if (!canRun()) { - return; - } - - // ****** Imitate JobClient code - // Configures a task/job with both a regular file and a "classpath" file. - Configuration subConf = new Configuration(conf); - String userName = getJobOwnerName(); - subConf.set(MRJobConfig.USER_NAME, userName); - DistributedCache.addCacheFile(firstCacheFile.toUri(), subConf); - DistributedCache.addFileToClassPath(secondCacheFile, subConf); - ClientDistributedCacheManager.determineTimestamps(subConf); - ClientDistributedCacheManager.determineCacheVisibilities(subConf); - // ****** End of imitating JobClient code - - Path jobFile = new Path(TEST_ROOT_DIR, "job.xml"); - FileOutputStream os = new FileOutputStream(new File(jobFile.toString())); - subConf.writeXml(os); - os.close(); - - // ****** Imitate TaskRunner code. - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(conf, taskController); - TaskDistributedCacheManager handle = - manager.newTaskDistributedCacheManager(subConf); - assertNull(null, DistributedCache.getLocalCacheFiles(subConf)); - File workDir = new File(new Path(TEST_ROOT_DIR, "workdir").toString()); - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), - TaskTracker.getPublicDistributedCacheDir()); - // ****** End of imitating TaskRunner code - - Path[] localCacheFiles = DistributedCache.getLocalCacheFiles(subConf); - assertNotNull(null, localCacheFiles); - assertEquals(2, localCacheFiles.length); - Path cachedFirstFile = localCacheFiles[0]; - Path cachedSecondFile = localCacheFiles[1]; - assertFileLengthEquals(firstCacheFile, cachedFirstFile); - assertFalse("Paths should be different.", - firstCacheFile.equals(cachedFirstFile)); - - assertEquals(1, handle.getClassPaths().size()); - assertEquals(cachedSecondFile.toString(), handle.getClassPaths().get(0)); - - checkFilePermissions(localCacheFiles); - - // Cleanup - handle.release(); - manager.purgeCache(); - assertFalse(pathToFile(cachedFirstFile).exists()); - } - - /** - * This DistributedCacheManager fails in localizing firstCacheFile. - */ - public class FakeTrackerDistributedCacheManager extends - TrackerDistributedCacheManager { - public FakeTrackerDistributedCacheManager(Configuration conf) - throws IOException { - super(conf, taskController); - } - - @Override - Path localizeCache(Configuration conf, URI cache, long confFileStamp, - CacheStatus cacheStatus, boolean isArchive, boolean isPublic) - throws IOException { - if (cache.equals(firstCacheFile.toUri())) { - throw new IOException("fake fail"); - } - return super.localizeCache(conf, cache, confFileStamp, cacheStatus, - isArchive, isPublic); - } - } - - public void testReferenceCount() throws IOException, LoginException, - URISyntaxException, InterruptedException { - if (!canRun()) { - return; - } - TrackerDistributedCacheManager manager = - new FakeTrackerDistributedCacheManager(conf); - String userName = getJobOwnerName(); - File workDir = new File(new Path(TEST_ROOT_DIR, "workdir").toString()); - - // Configures a job with a regular file - Job job1 = Job.getInstance(conf); - job1.setUser(userName); - job1.addCacheFile(secondCacheFile.toUri()); - Configuration conf1 = job1.getConfiguration(); - ClientDistributedCacheManager.determineTimestamps(conf1); - ClientDistributedCacheManager.determineCacheVisibilities(conf1); - - // Task localizing for first job - TaskDistributedCacheManager handle = manager - .newTaskDistributedCacheManager(conf1); - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), - TaskTracker.getPublicDistributedCacheDir()); - handle.release(); - for (TaskDistributedCacheManager.CacheFile c : handle.getCacheFiles()) { - assertEquals(0, manager.getReferenceCount(c.uri, conf1, c.timestamp, - c.owner, false)); - } - - Path thirdCacheFile = new Path(TEST_ROOT_DIR, "thirdcachefile"); - createPrivateTempFile(thirdCacheFile); - - // Configures another job with three regular files. - Job job2 = Job.getInstance(conf); - job2.setUser(userName); - // add a file that would get failed to localize - job2.addCacheFile(firstCacheFile.toUri()); - // add a file that is already localized by different job - job2.addCacheFile(secondCacheFile.toUri()); - // add a file that is never localized - job2.addCacheFile(thirdCacheFile.toUri()); - Configuration conf2 = job2.getConfiguration(); - ClientDistributedCacheManager.determineTimestamps(conf2); - ClientDistributedCacheManager.determineCacheVisibilities(conf2); - - // Task localizing for second job - // localization for the "firstCacheFile" will fail. - handle = manager.newTaskDistributedCacheManager(conf2); - Throwable th = null; - try { - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), - TaskTracker.getPublicDistributedCacheDir()); - } catch (IOException e) { - th = e; - Log.info("Exception during setup", e); - } - assertNotNull(th); - assertTrue(th.getMessage().contains("fake fail")); - handle.release(); - th = null; - for (TaskDistributedCacheManager.CacheFile c : handle.getCacheFiles()) { - try { - assertEquals(0, manager.getReferenceCount(c.uri, conf2, c.timestamp, - c.owner, false)); - } catch (IOException ie) { - th = ie; - Log.info("Exception getting reference count for " + c.uri, ie); - } - } - assertNotNull(th); - assertTrue(th.getMessage().contains(thirdCacheFile.getName())); - fs.delete(thirdCacheFile, false); - } - - /** - * Tests that localization of distributed cache file happens in the desired - * directory - * @throws IOException - * @throws LoginException - */ - public void testPublicPrivateCache() - throws IOException, LoginException, InterruptedException { - if (!canRun()) { - return; - } - checkLocalizedPath(true); - checkLocalizedPath(false); - } - - public void testPrivateCacheForMultipleUsers() - throws IOException, LoginException, InterruptedException{ - if (!canRun()) { - return; - } - // Try to initialize the distributed cache for the same file on the - // HDFS, for two different users. - // First initialize as the user running the test, then as some other user. - // Although the same cache file is used in both, the localization - // should happen twice. - - UserGroupInformation ugi = UserGroupInformation.getLoginUser(); - Path p = ugi.doAs(new PrivilegedExceptionAction() { - public Path run() - throws IOException, LoginException, InterruptedException { - return checkLocalizedPath(false); - } - }); - String distCacheDir = TaskTracker.getPrivateDistributedCacheDir( - ugi.getShortUserName()); - assertTrue("Cache file didn't get localized in the expected directory. " + - "Expected localization to happen within " + - ROOT_MAPRED_LOCAL_DIR + "/" + distCacheDir + - ", but was localized at " + - p, p.toString().contains(distCacheDir)); - - ugi = UserGroupInformation.createRemoteUser("fooUserInMachine"); - p = ugi.doAs(new PrivilegedExceptionAction() { - public Path run() - throws IOException, LoginException, InterruptedException { - return checkLocalizedPath(false); - } - }); - distCacheDir = TaskTracker.getPrivateDistributedCacheDir( - ugi.getShortUserName()); - assertTrue("Cache file didn't get localized in the expected directory. " + - "Expected localization to happen within " + - ROOT_MAPRED_LOCAL_DIR + "/" + distCacheDir + - ", but was localized at " + - p, p.toString().contains(distCacheDir)); - - } - - private Path checkLocalizedPath(boolean visibility) - throws IOException, LoginException, InterruptedException { - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(conf, taskController); - String userName = getJobOwnerName(); - File workDir = new File(TEST_ROOT_DIR, "workdir"); - Path cacheFile = new Path(TEST_ROOT_DIR, "fourthcachefile"); - if (visibility) { - createPublicTempFile(cacheFile); - } else { - createPrivateTempFile(cacheFile); - } - - Job job1 = Job.getInstance(conf); - job1.setUser(userName); - job1.addCacheFile(cacheFile.toUri()); - Configuration conf1 = job1.getConfiguration(); - ClientDistributedCacheManager.determineTimestamps(conf1); - ClientDistributedCacheManager.determineCacheVisibilities(conf1); - - // Task localizing for job - TaskDistributedCacheManager handle = manager - .newTaskDistributedCacheManager(conf1); - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), - TaskTracker.getPublicDistributedCacheDir()); - TaskDistributedCacheManager.CacheFile c = handle.getCacheFiles().get(0); - String distCacheDir; - if (visibility) { - distCacheDir = TaskTracker.getPublicDistributedCacheDir(); - } else { - distCacheDir = TaskTracker.getPrivateDistributedCacheDir(userName); - } - Path localizedPath = - manager.getLocalCache(cacheFile.toUri(), conf1, distCacheDir, - fs.getFileStatus(cacheFile), false, - c.timestamp, new Path(TEST_ROOT_DIR), false, - visibility); - assertTrue("Cache file didn't get localized in the expected directory. " + - "Expected localization to happen within " + - ROOT_MAPRED_LOCAL_DIR + "/" + distCacheDir + - ", but was localized at " + - localizedPath, localizedPath.toString().contains(distCacheDir)); - if (visibility) { - checkPublicFilePermissions(new Path[]{localizedPath}); - } else { - checkFilePermissions(new Path[]{localizedPath}); - } - return localizedPath; - } - - /** - * Check proper permissions on the cache files - * - * @param localCacheFiles - * @throws IOException - */ - protected void checkFilePermissions(Path[] localCacheFiles) - throws IOException { - // All the files should have executable permissions on them. - for (Path p : localCacheFiles) { - assertTrue("Cache file is not executable!", new File(p - .toUri().getPath()).canExecute()); - } - } - - /** - * Check permissions on the public cache files - * - * @param localCacheFiles - * @throws IOException - */ - private void checkPublicFilePermissions(Path[] localCacheFiles) - throws IOException { - checkPublicFilePermissions(fs, localCacheFiles); - } - - /** - * Verify the permissions for a file localized as a public distributed - * cache file - * @param fs The Local FileSystem used to get the permissions - * @param localCacheFiles The list of files whose permissions should be - * verified. - * @throws IOException - */ - public static void checkPublicFilePermissions(FileSystem fs, - Path[] localCacheFiles) throws IOException { - // All the files should have read and executable permissions for others - for (Path p : localCacheFiles) { - FsPermission perm = fs.getFileStatus(p).getPermission(); - assertTrue("cache file is not readable / executable by owner: perm=" - + perm.getUserAction(), perm.getUserAction() - .implies(FsAction.READ_EXECUTE)); - assertTrue("cache file is not readable / executable by group: perm=" - + perm.getGroupAction(), perm.getGroupAction() - .implies(FsAction.READ_EXECUTE)); - assertTrue("cache file is not readable / executable by others: perm=" - + perm.getOtherAction(), perm.getOtherAction() - .implies(FsAction.READ_EXECUTE)); - } - } - - /** - * Verify the ownership for files localized as a public distributed cache - * file. - * @param fs The Local FileSystem used to get the ownership - * @param localCacheFiles THe list of files whose ownership should be - * verified - * @param owner The owner of the files - * @param group The group owner of the files. - * @throws IOException - */ - public static void checkPublicFileOwnership(FileSystem fs, - Path[] localCacheFiles, String owner, String group) - throws IOException { - for (Path p: localCacheFiles) { - assertEquals(owner, fs.getFileStatus(p).getOwner()); - assertEquals(group, fs.getFileStatus(p).getGroup()); - } - } - - public static class MyTrackerDistributedCacheManager - extends TrackerDistributedCacheManager { - - public Throwable caught = null; - public CountDownLatch done = new CountDownLatch(1); - - public MyTrackerDistributedCacheManager(Configuration conf, - TaskController controller) throws IOException { - super(conf, controller); - this.baseDirManager = new TrackerDistributedCacheManager.BaseDirManager() { - - @Override - public void checkAndCleanup() throws IOException { - throw new RuntimeException("This is a test!!!!"); - } - }; - - this.cleanupThread = new TestCleanupThread(conf); - } - - class TestCleanupThread extends TrackerDistributedCacheManager.CleanupThread { - - public TestCleanupThread(Configuration conf) { - super(conf); - } - - @Override - protected void exitTaskTracker(Throwable t) { - caught = t; - this.stopRunning(); - done.countDown(); - } - } - } - - public void testRuntimeExceptionInCleanup() throws Exception { - if(!canRun()) { - return; - } - - Configuration conf2 = new Configuration(conf); - conf2.set("mapred.local.dir", ROOT_MAPRED_LOCAL_DIR.toString()); - conf2.setLong("local.cache.size", LOCAL_CACHE_LIMIT); - conf2.setLong("mapreduce.tasktracker.distributedcache.checkperiod", 0); // 0 ms (Don't sleep) - - refreshConf(conf2); - MyTrackerDistributedCacheManager manager = - new MyTrackerDistributedCacheManager(conf2, taskController); - manager.startCleanupThread(); - - assertTrue(manager.done.await(200l, TimeUnit.MILLISECONDS)); - assertNotNull(manager.caught); - assertTrue(manager.caught instanceof RuntimeException); - } - - protected String getJobOwnerName() throws IOException { - return UserGroupInformation.getCurrentUser().getUserName(); - } - - private long getFileStamp(Path file) throws IOException { - FileStatus fileStatus = fs.getFileStatus(file); - return fileStatus.getModificationTime(); - } - - public static final long CACHE_DELETE_PERIOD_MS = 100l; - - /** test delete cache */ - public void testLRUDeleteCache() throws Exception { - if (!canRun()) { - return; - } - // This test needs MRConfig.LOCAL_DIR to be single directory - // instead of four, because it assumes that both - // firstcachefile and secondcachefile will be localized on same directory - // so that second localization triggers deleteCache. - // If MRConfig.LOCAL_DIR is four directories, second localization might not - // trigger deleteCache, if it is localized in different directory. - Configuration conf2 = new Configuration(conf); - conf2.set(MRConfig.LOCAL_DIR, ROOT_MAPRED_LOCAL_DIR.toString()); - //Make it larger then expected - conf2.setLong(TTConfig.TT_LOCAL_CACHE_SIZE, 21 * 1024l); - conf2.setLong(TTConfig.TT_LOCAL_CACHE_SUBDIRS_LIMIT, 3); - //The goal is to get down to 15.75K and 2 dirs - conf2.setFloat(TTConfig.TT_LOCAL_CACHE_KEEP_AROUND_PCT, 0.75f); - conf2.setLong(TTConfig.TT_DISTRIBUTED_CACHE_CHECK_PERIOD, CACHE_DELETE_PERIOD_MS); - refreshConf(conf2); - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(conf2, taskController); - manager.startCleanupThread(); - FileSystem localfs = FileSystem.getLocal(conf2); - String userName = getJobOwnerName(); - conf2.set(MRJobConfig.USER_NAME, userName); - - //Here we are testing the LRU. In this case we will add in 4 cache entries - // 2 of them are 8k each and 2 of them are very small. We want to verify - // That they are deleted in LRU order. - // So what we will do is add in the two large files first, 1 then 2, and - // then one of the small ones 3. We will then release them in opposite - // order 3, 2, 1. - // - // Finally we will add in the last small file. This last file should push - // us over the 3 entry limit to trigger a cleanup. So LRU order is 3, 2, 1 - // And we will only delete 2 entries so that should leave 1 un touched - // but 3 and 2 deleted - - Path thirdCacheFile = new Path(TEST_ROOT_DIR, "thirdcachefile"); - Path fourthCacheFile = new Path(TEST_ROOT_DIR, "fourthcachefile"); - // Adding two more small files, so it triggers the number of sub directory - // limit but does not trigger the file size limit. - createTempFile(thirdCacheFile, 1); - createTempFile(fourthCacheFile, 1); - - Path firstLocalCache = manager.getLocalCache(firstCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(firstCacheFile), false, - getFileStamp(firstCacheFile), new Path(TEST_ROOT_DIR), false, false); - - Path secondLocalCache = manager.getLocalCache(secondCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(secondCacheFile), false, - getFileStamp(secondCacheFile), new Path(TEST_ROOT_DIR), false, false); - - Path thirdLocalCache = manager.getLocalCache(thirdCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(thirdCacheFile), false, - getFileStamp(thirdCacheFile), new Path(TEST_ROOT_DIR), false, false); - - manager.releaseCache(thirdCacheFile.toUri(), conf2, - getFileStamp(thirdCacheFile), - TrackerDistributedCacheManager.getLocalizedCacheOwner(false), false); - - manager.releaseCache(secondCacheFile.toUri(), conf2, - getFileStamp(secondCacheFile), - TrackerDistributedCacheManager.getLocalizedCacheOwner(false), false); - - manager.releaseCache(firstCacheFile.toUri(), conf2, - getFileStamp(firstCacheFile), - TrackerDistributedCacheManager.getLocalizedCacheOwner(false), false); - - - // Getting the fourth cache will make the number of sub directories becomes - // 4 which is greater than 3. So the released cache will be deleted. - manager.getLocalCache(fourthCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(fourthCacheFile), false, - getFileStamp(fourthCacheFile), new Path(TEST_ROOT_DIR), false, false); - - checkCacheDeletion(localfs, secondLocalCache, "DistributedCache failed " + - "deleting second cache LRU order"); - - checkCacheDeletion(localfs, thirdLocalCache, - "DistributedCache failed deleting third" + - " cache LRU order."); - - checkCacheNOTDeletion(localfs, firstLocalCache, "DistributedCache failed " + - "Deleted first cache LRU order."); - - checkCacheNOTDeletion(localfs, fourthCacheFile, "DistributedCache failed " + - "Deleted fourth cache LRU order."); - // Clean up the files created in this test - new File(thirdCacheFile.toString()).delete(); - new File(fourthCacheFile.toString()).delete(); - manager.stopCleanupThread(); - } - - public void testSameNameFileArchiveCache() throws IOException, - URISyntaxException, InterruptedException { - if (!canRun()) { - return; - } - TrackerDistributedCacheManager manager = - spy(new TrackerDistributedCacheManager(conf, taskController)); - URI rsrc = new URI("file://foo/bar/yak"); - Path cacheDir = new Path("file:///localcache"); - Path archivePath = new Path(cacheDir, "archive"); - Path filePath = new Path(cacheDir, "file"); - doReturn(archivePath).when(manager).localizeCache(eq(conf), eq(rsrc), - anyLong(), Matchers. anyObject(), eq(true), anyBoolean()); - doReturn(filePath).when(manager).localizeCache(eq(conf), eq(rsrc), - anyLong(), Matchers. anyObject(), eq(false), anyBoolean()); - // could fail, but check match instead - doNothing().when(manager).checkCacheStatusValidity( - Matchers. anyObject(), eq(rsrc), anyLong(), - Matchers. anyObject(), Matchers. anyObject(), - anyBoolean()); - // localizeCache initializes mtime of cached rsrc; set to uninitialized val - doReturn(-1L).when(manager).checkStampSinceJobStarted( - Matchers. anyObject(), - Matchers. anyObject(), eq(rsrc), anyLong(), - Matchers. anyObject(), Matchers. anyObject()); - doReturn(-1L).when(manager).getTimestamp( - Matchers. anyObject(), eq(rsrc)); - FileStatus rsrcStatus = mock(FileStatus.class); - when(rsrcStatus.getLen()).thenReturn(4344L); - - Path localizedPathForFile = - manager.getLocalCache(rsrc, conf, "sub", rsrcStatus, false, 20L, - new Path("file:///tmp"), false, true); - Path localizedPathForArchive = - manager.getLocalCache(rsrc, conf, "sub", rsrcStatus, true, 20L, - new Path("file:///tmp"), false, true); - assertNotSame("File and Archive resolve to the same path: " - + localizedPathForFile + ". Should differ.", localizedPathForFile, - localizedPathForArchive); - } - - /** test delete cache */ - public void testDeleteCache() throws Exception { - if (!canRun()) { - return; - } - // This test needs MRConfig.LOCAL_DIR to be single directory - // instead of four, because it assumes that both - // firstcachefile and secondcachefile will be localized on same directory - // so that second localization triggers deleteCache. - // If MRConfig.LOCAL_DIR is four directories, second localization might not - // trigger deleteCache, if it is localized in different directory. - Configuration conf2 = new Configuration(conf); - conf2.set(MRConfig.LOCAL_DIR, ROOT_MAPRED_LOCAL_DIR.toString()); - conf2.setLong(TTConfig.TT_LOCAL_CACHE_SIZE, LOCAL_CACHE_LIMIT); - conf2.setLong(TTConfig.TT_LOCAL_CACHE_SUBDIRS_LIMIT, LOCAL_CACHE_SUBDIR); - conf2.setLong(TTConfig.TT_DISTRIBUTED_CACHE_CHECK_PERIOD, CACHE_DELETE_PERIOD_MS); - refreshConf(conf2); - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(conf2, taskController); - manager.startCleanupThread(); - FileSystem localfs = FileSystem.getLocal(conf2); - String userName = getJobOwnerName(); - conf2.set(MRJobConfig.USER_NAME, userName); - - // We first test the size limit - Path localCache = manager.getLocalCache(firstCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(firstCacheFile), false, - getFileStamp(firstCacheFile), new Path(TEST_ROOT_DIR), false, false); - manager.releaseCache(firstCacheFile.toUri(), conf2, - getFileStamp(firstCacheFile), - TrackerDistributedCacheManager.getLocalizedCacheOwner(false), false); - //in above code,localized a file of size 4K and then release the cache - // which will cause the cache be deleted when the limit goes out. - // The below code localize another cache which's designed to - //sweep away the first cache. - manager.getLocalCache(secondCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(secondCacheFile), false, - getFileStamp(secondCacheFile), new Path(TEST_ROOT_DIR), false, false); - checkCacheDeletion(localfs, localCache, "DistributedCache failed " + - "deleting old cache when the cache store is full."); - // Now we test the number of sub directories limit - // Create the temporary cache files to be used in the tests. - Path thirdCacheFile = new Path(TEST_ROOT_DIR, "thirdcachefile"); - Path fourthCacheFile = new Path(TEST_ROOT_DIR, "fourthcachefile"); - // Adding two more small files, so it triggers the number of sub directory - // limit but does not trigger the file size limit. - createTempFile(thirdCacheFile, 1); - createTempFile(fourthCacheFile, 1); - Path thirdLocalCache = manager.getLocalCache(thirdCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(thirdCacheFile), false, - getFileStamp(thirdCacheFile), new Path(TEST_ROOT_DIR), false, false); - // Release the third cache so that it can be deleted while sweeping - manager.releaseCache(thirdCacheFile.toUri(), conf2, - getFileStamp(thirdCacheFile), - TrackerDistributedCacheManager.getLocalizedCacheOwner(false), false); - // Getting the fourth cache will make the number of sub directories becomes - // 3 which is greater than 2. So the released cache will be deleted. - manager.getLocalCache(fourthCacheFile.toUri(), conf2, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(fourthCacheFile), false, - getFileStamp(fourthCacheFile), new Path(TEST_ROOT_DIR), false, false); - checkCacheDeletion(localfs, thirdLocalCache, - "DistributedCache failed deleting old" + - " cache when the cache exceeds the number of sub directories limit."); - // Clean up the files created in this test - new File(thirdCacheFile.toString()).delete(); - new File(fourthCacheFile.toString()).delete(); - manager.stopCleanupThread(); - } - - /** - * Do a simple check to see if the file has NOT been deleted. - */ - private void checkCacheNOTDeletion(FileSystem fs, Path cache, String msg) - throws Exception { - TimeUnit.MILLISECONDS.sleep(3 * CACHE_DELETE_PERIOD_MS); - assertTrue(msg, fs.exists(cache)); - } - - /** - * Periodically checks if a file is there, return if the file is no longer - * there. Fails the test if a files is there for 30 seconds. - */ - private void checkCacheDeletion(FileSystem fs, Path cache, String msg) - throws Exception { - // Check every 100ms to see if the cache is deleted - boolean cacheExists = true; - for (int i = 0; i < 300; i++) { - if (!fs.exists(cache)) { - cacheExists = false; - break; - } - TimeUnit.MILLISECONDS.sleep(CACHE_DELETE_PERIOD_MS); - } - // If the cache is still there after 5 minutes, test fails. - assertFalse(msg, cacheExists); - } - - public void testFileSystemOtherThanDefault() throws Exception { - if (!canRun()) { - return; - } - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(conf, taskController); - conf.set("fs.fakefile.impl", conf.get("fs.file.impl")); - String userName = getJobOwnerName(); - conf.set(MRJobConfig.USER_NAME, userName); - Path fileToCache = new Path("fakefile:///" - + firstCacheFile.toUri().getPath()); - Path result = manager.getLocalCache(fileToCache.toUri(), conf, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(firstCacheFile), false, - getFileStamp(firstCacheFile), - new Path(TEST_ROOT_DIR), false, false); - assertNotNull("DistributedCache cached file on non-default filesystem.", - result); - } - - static void createTempFile(Path p) throws IOException { - createTempFile(p, TEST_FILE_SIZE); - } - - static void createTempFile(Path p, int size) throws IOException { - File f = new File(p.toString()); - FileOutputStream os = new FileOutputStream(f); - byte[] toWrite = new byte[size]; - new Random().nextBytes(toWrite); - os.write(toWrite); - os.close(); - FileSystem.LOG.info("created: " + p + ", size=" + size); - } - - static void createPublicTempFile(Path p) - throws IOException, InterruptedException { - createTempFile(p); - FileUtil.chmod(p.toString(), "0777",true); - } - - static void createPrivateTempFile(Path p) - throws IOException, InterruptedException { - createTempFile(p); - FileUtil.chmod(p.toString(), "0770",true); - } - - @Override - protected void tearDown() throws IOException { - new File(firstCacheFile.toString()).delete(); - new File(secondCacheFile.toString()).delete(); - FileUtil.fullyDelete(new File(TEST_ROOT_DIR)); - } - - protected void assertFileLengthEquals(Path a, Path b) - throws FileNotFoundException { - assertEquals("File sizes mismatch.", - pathToFile(a).length(), pathToFile(b).length()); - } - - protected File pathToFile(Path p) { - return new File(p.toString()); - } - - public static class FakeFileSystem extends RawLocalFileSystem { - private long increment = 0; - public FakeFileSystem() { - super(); - } - - public FileStatus getFileStatus(Path p) throws IOException { - File f = pathToFile(p); - return new FileStatus(f.length(), f.isDirectory(), 1, 128, - f.lastModified() + increment, makeQualified(new Path(f.getPath()))); - } - - void advanceClock(long millis) { - increment += millis; - } - } - - public void testFreshness() throws Exception { - if (!canRun()) { - return; - } - Configuration myConf = new Configuration(conf); - myConf.set(FileSystem.FS_DEFAULT_NAME_KEY, "refresh:///"); - myConf.setClass("fs.refresh.impl", FakeFileSystem.class, FileSystem.class); - String userName = getJobOwnerName(); - - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(myConf, taskController); - // ****** Imitate JobClient code - // Configures a task/job with both a regular file and a "classpath" file. - Configuration subConf = new Configuration(myConf); - subConf.set(MRJobConfig.USER_NAME, userName); - DistributedCache.addCacheFile(firstCacheFile.toUri(), subConf); - ClientDistributedCacheManager.determineTimestamps(subConf); - ClientDistributedCacheManager.determineCacheVisibilities(subConf); - // ****** End of imitating JobClient code - - // ****** Imitate TaskRunner code. - TaskDistributedCacheManager handle = - manager.newTaskDistributedCacheManager(subConf); - assertNull(null, DistributedCache.getLocalCacheFiles(subConf)); - File workDir = new File(new Path(TEST_ROOT_DIR, "workdir").toString()); - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), - TaskTracker.getPublicDistributedCacheDir()); - // ****** End of imitating TaskRunner code - - Path[] localCacheFiles = DistributedCache.getLocalCacheFiles(subConf); - assertNotNull(null, localCacheFiles); - assertEquals(1, localCacheFiles.length); - Path cachedFirstFile = localCacheFiles[0]; - assertFileLengthEquals(firstCacheFile, cachedFirstFile); - assertFalse("Paths should be different.", - firstCacheFile.equals(cachedFirstFile)); - // release - handle.release(); - - // change the file timestamp - FileSystem fs = FileSystem.get(myConf); - ((FakeFileSystem)fs).advanceClock(1); - - // running a task of the same job - Throwable th = null; - try { - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), TaskTracker.getPublicDistributedCacheDir()); - } catch (IOException ie) { - th = ie; - } - assertNotNull("Throwable is null", th); - assertTrue("Exception message does not match", - th.getMessage().contains("has changed on HDFS since job started")); - // release - handle.release(); - - // running a task of the same job on another TaskTracker which has never - // initialized the cache - TrackerDistributedCacheManager manager2 = - new TrackerDistributedCacheManager(myConf, taskController); - TaskDistributedCacheManager handle2 = - manager2.newTaskDistributedCacheManager(subConf); - File workDir2 = new File(new Path(TEST_ROOT_DIR, "workdir2").toString()); - th = null; - try { - handle2.setup(localDirAllocator, workDir2, TaskTracker - .getPrivateDistributedCacheDir(userName), - TaskTracker.getPublicDistributedCacheDir()); - } catch (IOException ie) { - th = ie; - } - assertNotNull("Throwable is null", th); - assertTrue("Exception message does not match", - th.getMessage().contains("has changed on HDFS since job started")); - // release - handle.release(); - - // submit another job - Configuration subConf2 = new Configuration(myConf); - subConf2.set(MRJobConfig.USER_NAME, userName); - DistributedCache.addCacheFile(firstCacheFile.toUri(), subConf2); - ClientDistributedCacheManager.determineTimestamps(subConf2); - ClientDistributedCacheManager.determineCacheVisibilities(subConf2); - - handle = - manager.newTaskDistributedCacheManager(subConf2); - handle.setup(localDirAllocator, workDir, TaskTracker - .getPrivateDistributedCacheDir(userName), TaskTracker.getPublicDistributedCacheDir()); - Path[] localCacheFiles2 = DistributedCache.getLocalCacheFiles(subConf2); - assertNotNull(null, localCacheFiles2); - assertEquals(1, localCacheFiles2.length); - Path cachedFirstFile2 = localCacheFiles2[0]; - assertFileLengthEquals(firstCacheFile, cachedFirstFile2); - assertFalse("Paths should be different.", - firstCacheFile.equals(cachedFirstFile2)); - - // assert that two localizations point to different paths - assertFalse("two jobs with different timestamps did not localize" + - " in different paths", cachedFirstFile.equals(cachedFirstFile2)); - // release - handle.release(); - } - - /** - * Localize a file. After localization is complete, create a file, "myFile", - * under the directory where the file is localized and ensure that it has - * permissions different from what is set by default. Then, localize another - * file. Verify that "myFile" has the right permissions. - * @throws Exception - */ - public void testCustomPermissions() throws Exception { - if (!canRun()) { - return; - } - String userName = getJobOwnerName(); - conf.set(MRJobConfig.USER_NAME, userName); - TrackerDistributedCacheManager manager = - new TrackerDistributedCacheManager(conf, taskController); - FileSystem localfs = FileSystem.getLocal(conf); - - Path[] localCache = new Path[2]; - localCache[0] = manager.getLocalCache(firstCacheFile.toUri(), conf, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(firstCacheFile), false, - getFileStamp(firstCacheFile), new Path(TEST_ROOT_DIR), false, false); - FsPermission myPermission = new FsPermission((short)0600); - Path myFile = new Path(localCache[0].getParent(), "myfile.txt"); - if (FileSystem.create(localfs, myFile, myPermission) == null) { - throw new IOException("Could not create " + myFile); - } - try { - localCache[1] = manager.getLocalCache(secondCacheFile.toUri(), conf, - TaskTracker.getPrivateDistributedCacheDir(userName), - fs.getFileStatus(secondCacheFile), false, - getFileStamp(secondCacheFile), new Path(TEST_ROOT_DIR), false, - false); - FileStatus stat = localfs.getFileStatus(myFile); - assertTrue(stat.getPermission().equals(myPermission)); - // validate permissions of localized files. - checkFilePermissions(localCache); - } finally { - localfs.delete(myFile, false); - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java deleted file mode 100644 index c297ac06e3f..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEvents.java +++ /dev/null @@ -1,163 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapreduce.jobhistory; - -import junit.framework.TestCase; - -import org.apache.hadoop.mapred.TaskStatus; -import org.apache.hadoop.mapreduce.Counters; -import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.TaskType; - -/** - * Test various jobhistory events - */ -public class TestJobHistoryEvents extends TestCase { - static final int[][] NULL_SPLITS_ARRAY - = new int[org.apache.hadoop.tools.rumen.LoggedTaskAttempt.SplitVectorKind.values().length][]; - - static { - for (int i = 0; i < NULL_SPLITS_ARRAY.length; ++i) { - NULL_SPLITS_ARRAY[i] = new int[0]; - } - } - - /** - * Test {@link TaskAttemptStartedEvent} for various task types. - */ - private static void testAttemptStartedEventForTypes(EventType expected, - TaskAttemptID id, - TaskType[] types) { - for (TaskType t : types) { - TaskAttemptStartedEvent tase = - new TaskAttemptStartedEvent(id, t, 0L, "", 0, -1); - assertEquals(expected, tase.getEventType()); - } - } - - /** - * Test {@link TaskAttemptStartedEvent}. - */ - public void testTaskAttemptStartedEvent() { - EventType expected = EventType.MAP_ATTEMPT_STARTED; - TaskAttemptID fakeId = new TaskAttemptID("1234", 1, TaskType.MAP, 1, 1); - - // check the events for job-setup, job-cleanup and map task-types - testAttemptStartedEventForTypes(expected, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.MAP}); - - expected = EventType.REDUCE_ATTEMPT_STARTED; - fakeId = new TaskAttemptID("1234", 1, TaskType.REDUCE, 1, 1); - - // check the events for job-setup, job-cleanup and reduce task-types - testAttemptStartedEventForTypes(expected, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.REDUCE}); - } - - /** - * Test {@link TaskAttemptUnsuccessfulCompletionEvent} for various task types. - */ - private static void testFailedKilledEventsForTypes(EventType expected, - TaskAttemptID id, - TaskType[] types, - String state) { - for (TaskType t : types) { - TaskAttemptUnsuccessfulCompletionEvent tauce = - new TaskAttemptUnsuccessfulCompletionEvent - (id, t, state, 0L, "", -1, "", "", NULL_SPLITS_ARRAY); - assertEquals(expected, tauce.getEventType()); - } - } - - /** - * Test {@link TaskAttemptUnsuccessfulCompletionEvent} for killed/failed task. - */ - public void testTaskAttemptUnsuccessfulCompletionEvent() { - TaskAttemptID fakeId = new TaskAttemptID("1234", 1, TaskType.MAP, 1, 1); - - // check killed events for job-setup, job-cleanup and map task-types - testFailedKilledEventsForTypes(EventType.MAP_ATTEMPT_KILLED, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.MAP}, - TaskStatus.State.KILLED.toString()); - // check failed events for job-setup, job-cleanup and map task-types - testFailedKilledEventsForTypes(EventType.MAP_ATTEMPT_FAILED, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.MAP}, - TaskStatus.State.FAILED.toString()); - - fakeId = new TaskAttemptID("1234", 1, TaskType.REDUCE, 1, 1); - - // check killed events for job-setup, job-cleanup and reduce task-types - testFailedKilledEventsForTypes(EventType.REDUCE_ATTEMPT_KILLED, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.REDUCE}, - TaskStatus.State.KILLED.toString()); - // check failed events for job-setup, job-cleanup and reduce task-types - testFailedKilledEventsForTypes(EventType.REDUCE_ATTEMPT_FAILED, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.REDUCE}, - TaskStatus.State.FAILED.toString()); - } - - /** - * Test {@link TaskAttemptFinishedEvent} for various task types. - */ - private static void testFinishedEventsForTypes(EventType expected, - TaskAttemptID id, - TaskType[] types) { - for (TaskType t : types) { - TaskAttemptFinishedEvent tafe = - new TaskAttemptFinishedEvent(id, t, - TaskStatus.State.SUCCEEDED.toString(), 0L, "", "", "", - new Counters()); - assertEquals(expected, tafe.getEventType()); - } - } - - /** - * Test {@link TaskAttemptFinishedEvent} for finished task. - */ - public void testTaskAttemptFinishedEvent() { - EventType expected = EventType.MAP_ATTEMPT_FINISHED; - TaskAttemptID fakeId = new TaskAttemptID("1234", 1, TaskType.MAP, 1, 1); - - // check the events for job-setup, job-cleanup and map task-types - testFinishedEventsForTypes(expected, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.MAP}); - - expected = EventType.REDUCE_ATTEMPT_FINISHED; - fakeId = new TaskAttemptID("1234", 1, TaskType.REDUCE, 1, 1); - - // check the events for job-setup, job-cleanup and reduce task-types - testFinishedEventsForTypes(expected, fakeId, - new TaskType[] {TaskType.JOB_SETUP, - TaskType.JOB_CLEANUP, - TaskType.REDUCE}); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java deleted file mode 100644 index 521316fa18e..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java +++ /dev/null @@ -1,296 +0,0 @@ -/** Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.mapreduce.security; - - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -import java.io.File; -import java.io.IOException; -import java.net.URI; -import java.security.NoSuchAlgorithmException; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.Map; - -import javax.crypto.KeyGenerator; -import javax.crypto.spec.SecretKeySpec; - -import org.apache.commons.codec.binary.Base64; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.io.IntWritable; -import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.EmptyInputFormat; -import org.apache.hadoop.mapred.JobClient; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobTracker; -import org.apache.hadoop.mapred.Mapper; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.OutputCollector; -import org.apache.hadoop.mapred.Partitioner; -import org.apache.hadoop.mapred.Reducer; -import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.mapred.lib.NullOutputFormat; -import org.apache.hadoop.mapreduce.JobContext; -import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.util.Tool; -import org.apache.hadoop.util.ToolRunner; -import org.codehaus.jackson.map.ObjectMapper; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - - -@SuppressWarnings("deprecation") -public class TestTokenCacheOldApi { - private static final int NUM_OF_KEYS = 10; - - // my sleep class - adds check for tokenCache - static class MyDummyJob extends Configured implements Tool, - Mapper, - Reducer, - Partitioner { - Credentials ts; - - public void configure(JobConf job) { - } - - /** - * attempts to access tokenCache as from client - */ - public void map(IntWritable key, IntWritable value, - OutputCollector output, Reporter reporter) - throws IOException { - // get token storage and a key - byte[] key1 = ts.getSecretKey(new Text("alias1")); - Collection> dts = ts.getAllTokens(); - int dts_size = 0; - if(dts != null) - dts_size = dts.size(); - - if(dts.size() != 2) { // one job token and one delegation token - throw new RuntimeException("tokens are not available"); // fail the test - } - - if(key1 == null || ts == null || ts.numberOfSecretKeys() != NUM_OF_KEYS) { - throw new RuntimeException("secret keys are not available"); // fail the test - } - - output.collect(new IntWritable(1), NullWritable.get()); - } - - public JobConf setupJobConf() { - - JobConf job = new JobConf(getConf(), MyDummyJob.class); - job.setNumMapTasks(1); - job.setNumReduceTasks(1); - job.setMapperClass(MyDummyJob.class); - job.setMapOutputKeyClass(IntWritable.class); - job.setMapOutputValueClass(NullWritable.class); - job.setReducerClass(MyDummyJob.class); - job.setOutputFormat(NullOutputFormat.class); - job.setInputFormat(EmptyInputFormat.class); - job.setPartitionerClass(MyDummyJob.class); - job.setSpeculativeExecution(false); - job.setJobName("Sleep job"); - populateTokens(job); - return job; - } - - private void populateTokens(JobConf job) { - // Credentials in the job will not have delegation tokens - // because security is disabled. Fetch delegation tokens - // and populate the credential in the job. - try { - Credentials ts = job.getCredentials(); - Path p1 = new Path("file1"); - p1 = p1.getFileSystem(job).makeQualified(p1); - Credentials cred = new Credentials(); - TokenCache.obtainTokensForNamenodesInternal(cred, new Path[] { p1 }, - job); - for (Token t : cred.getAllTokens()) { - ts.addToken(new Text("Hdfs"), t); - } - } catch (IOException e) { - Assert.fail("Exception " + e); - } - } - - public void close() throws IOException { - } - - public void reduce(IntWritable key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { - return; - } - - public int getPartition(IntWritable key, NullWritable value, - int numPartitions) { - return key.get() % numPartitions; - } - - public int run(String[] args) throws Exception { - JobConf job = setupJobConf(); - JobClient.runJob(job); - return 0; - } - } - - private static MiniMRCluster mrCluster; - private static MiniDFSCluster dfsCluster; - private static final Path TEST_DIR = - new Path(System.getProperty("test.build.data","/tmp"), "sleepTest"); - private static final Path tokenFileName = new Path(TEST_DIR, "tokenFile.json"); - private static int numSlaves = 1; - private static JobConf jConf; - private static ObjectMapper mapper = new ObjectMapper(); - private static Path p1; - private static Path p2; - - @BeforeClass - public static void setUp() throws Exception { - Configuration conf = new Configuration(); - dfsCluster = new MiniDFSCluster(conf, numSlaves, true, null); - jConf = new JobConf(conf); - mrCluster = new MiniMRCluster(0, 0, numSlaves, - dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, - jConf); - - createTokenFileJson(); - verifySecretKeysInJSONFile(); - NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); - FileSystem fs = dfsCluster.getFileSystem(); - - p1 = new Path("file1"); - p2 = new Path("file2"); - p1 = fs.makeQualified(p1); - } - - @AfterClass - public static void tearDown() throws Exception { - if(mrCluster != null) - mrCluster.shutdown(); - mrCluster = null; - if(dfsCluster != null) - dfsCluster.shutdown(); - dfsCluster = null; - } - - // create jason file and put some keys into it.. - private static void createTokenFileJson() throws IOException { - Map map = new HashMap(); - - try { - KeyGenerator kg = KeyGenerator.getInstance("HmacSHA1"); - for(int i=0; i map; - map = mapper.readValue(new File(tokenFileName.toString()), Map.class); - assertEquals("didn't read JSON correctly", map.size(), NUM_OF_KEYS); - } - - /** - * run a distributed job and verify that TokenCache is available - * @throws IOException - */ - @Test - public void testTokenCache() throws IOException { - // make sure JT starts - jConf = mrCluster.createJobConf(); - - // provide namenodes names for the job to get the delegation tokens for - //String nnUri = dfsCluster.getNameNode().getUri(namenode).toString(); - NameNode nn = dfsCluster.getNameNode(); - URI nnUri = NameNode.getUri(nn.getNameNodeAddress()); - jConf.set(JobContext.JOB_NAMENODES, nnUri + "," + nnUri.toString()); - // job tracker principle id.. - jConf.set(JobTracker.JT_USER_NAME, "jt_id"); - - // using argument to pass the file name - String[] args = { - "-tokenCacheFile", tokenFileName.toString(), - "-m", "1", "-r", "1", "-mt", "1", "-rt", "1" - }; - - int res = -1; - try { - res = ToolRunner.run(jConf, new MyDummyJob(), args); - } catch (Exception e) { - System.out.println("Job failed with" + e.getLocalizedMessage()); - e.printStackTrace(System.out); - Assert.fail("Job failed"); - } - assertEquals("dist job res is not 0", res, 0); - } - - /** - * run a local job and verify that TokenCache is available - * @throws NoSuchAlgorithmException - * @throws IOException - */ - @Test - public void testLocalJobTokenCache() throws NoSuchAlgorithmException, IOException { - // this is local job - String[] args = {"-m", "1", "-r", "1", "-mt", "1", "-rt", "1"}; - jConf.set("mapreduce.job.credentials.json", tokenFileName.toString()); - - int res = -1; - try { - res = ToolRunner.run(jConf, new MyDummyJob(), args); - } catch (Exception e) { - System.out.println("Job failed with" + e.getLocalizedMessage()); - e.printStackTrace(System.out); - fail("local Job failed"); - } - assertEquals("local job res is not 0", res, 0); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java deleted file mode 100644 index 8dfe18efcf8..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/record/TestRecordMR.java +++ /dev/null @@ -1,467 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.record; - -import org.apache.hadoop.mapred.*; -import org.apache.hadoop.fs.*; -import org.apache.hadoop.io.*; -import org.apache.hadoop.io.SequenceFile.CompressionType; -import org.apache.hadoop.conf.*; -import junit.framework.TestCase; -import java.io.*; -import java.util.*; - - -/********************************************************** - * MapredLoadTest generates a bunch of work that exercises - * a Hadoop Map-Reduce system (and DFS, too). It goes through - * the following steps: - * - * 1) Take inputs 'range' and 'counts'. - * 2) Generate 'counts' random integers between 0 and range-1. - * 3) Create a file that lists each integer between 0 and range-1, - * and lists the number of times that integer was generated. - * 4) Emit a (very large) file that contains all the integers - * in the order generated. - * 5) After the file has been generated, read it back and count - * how many times each int was generated. - * 6) Compare this big count-map against the original one. If - * they match, then SUCCESS! Otherwise, FAILURE! - * - * OK, that's how we can think about it. What are the map-reduce - * steps that get the job done? - * - * 1) In a non-mapred thread, take the inputs 'range' and 'counts'. - * 2) In a non-mapread thread, generate the answer-key and write to disk. - * 3) In a mapred job, divide the answer key into K jobs. - * 4) A mapred 'generator' task consists of K map jobs. Each reads - * an individual "sub-key", and generates integers according to - * to it (though with a random ordering). - * 5) The generator's reduce task agglomerates all of those files - * into a single one. - * 6) A mapred 'reader' task consists of M map jobs. The output - * file is cut into M pieces. Each of the M jobs counts the - * individual ints in its chunk and creates a map of all seen ints. - * 7) A mapred job integrates all the count files into a single one. - * - **********************************************************/ -public class TestRecordMR extends TestCase { - /** - * Modified to make it a junit test. - * The RandomGen Job does the actual work of creating - * a huge file of assorted numbers. It receives instructions - * as to how many times each number should be counted. Then - * it emits those numbers in a crazy order. - * - * The map() function takes a key/val pair that describes - * a value-to-be-emitted (the key) and how many times it - * should be emitted (the value), aka "numtimes". map() then - * emits a series of intermediate key/val pairs. It emits - * 'numtimes' of these. The key is a random number and the - * value is the 'value-to-be-emitted'. - * - * The system collates and merges these pairs according to - * the random number. reduce() function takes in a key/value - * pair that consists of a crazy random number and a series - * of values that should be emitted. The random number key - * is now dropped, and reduce() emits a pair for every intermediate value. - * The emitted key is an intermediate value. The emitted value - * is just a blank string. Thus, we've created a huge file - * of numbers in random order, but where each number appears - * as many times as we were instructed. - */ - static public class RandomGenMapper implements Mapper { - Random r = new Random(); - public void configure(JobConf job) { - } - - public void map(RecInt key, - RecInt val, - OutputCollector out, - Reporter reporter) throws IOException { - int randomVal = key.getData(); - int randomCount = val.getData(); - - for (int i = 0; i < randomCount; i++) { - out.collect(new RecInt(Math.abs(r.nextInt())), - new RecString(Integer.toString(randomVal))); - } - } - public void close() { - } - } - /** - */ - static public class RandomGenReducer implements Reducer { - public void configure(JobConf job) { - } - - public void reduce(RecInt key, - Iterator it, - OutputCollector out, - Reporter reporter) throws IOException { - int keyint = key.getData(); - while (it.hasNext()) { - String val = it.next().getData(); - out.collect(new RecInt(Integer.parseInt(val)), - new RecString("")); - } - } - public void close() { - } - } - - /** - * The RandomCheck Job does a lot of our work. It takes - * in a num/string keyspace, and transforms it into a - * key/count(int) keyspace. - * - * The map() function just emits a num/1 pair for every - * num/string input pair. - * - * The reduce() function sums up all the 1s that were - * emitted for a single key. It then emits the key/total - * pair. - * - * This is used to regenerate the random number "answer key". - * Each key here is a random number, and the count is the - * number of times the number was emitted. - */ - static public class RandomCheckMapper implements Mapper { - public void configure(JobConf job) { - } - - public void map(RecInt key, - RecString val, - OutputCollector out, - Reporter reporter) throws IOException { - int pos = key.getData(); - String str = val.getData(); - out.collect(new RecInt(pos), new RecString("1")); - } - public void close() { - } - } - /** - */ - static public class RandomCheckReducer implements Reducer { - public void configure(JobConf job) { - } - - public void reduce(RecInt key, - Iterator it, - OutputCollector out, - Reporter reporter) throws IOException { - int keyint = key.getData(); - int count = 0; - while (it.hasNext()) { - it.next(); - count++; - } - out.collect(new RecInt(keyint), new RecString(Integer.toString(count))); - } - public void close() { - } - } - - /** - * The Merge Job is a really simple one. It takes in - * an int/int key-value set, and emits the same set. - * But it merges identical keys by adding their values. - * - * Thus, the map() function is just the identity function - * and reduce() just sums. Nothing to see here! - */ - static public class MergeMapper implements Mapper { - public void configure(JobConf job) { - } - - public void map(RecInt key, - RecString val, - OutputCollector out, - Reporter reporter) throws IOException { - int keyint = key.getData(); - String valstr = val.getData(); - out.collect(new RecInt(keyint), new RecInt(Integer.parseInt(valstr))); - } - public void close() { - } - } - static public class MergeReducer implements Reducer { - public void configure(JobConf job) { - } - - public void reduce(RecInt key, - Iterator it, - OutputCollector out, - Reporter reporter) throws IOException { - int keyint = key.getData(); - int total = 0; - while (it.hasNext()) { - total += it.next().getData(); - } - out.collect(new RecInt(keyint), new RecInt(total)); - } - public void close() { - } - } - - private static int range = 10; - private static int counts = 100; - private static Random r = new Random(); - private static Configuration conf = new Configuration(); - - public void testMapred() throws Exception { - launch(); - } - - /** - * - */ - public static void launch() throws Exception { - // - // Generate distribution of ints. This is the answer key. - // - int countsToGo = counts; - int dist[] = new int[range]; - for (int i = 0; i < range; i++) { - double avgInts = (1.0 * countsToGo) / (range - i); - dist[i] = (int) Math.max(0, Math.round(avgInts + (Math.sqrt(avgInts) * r.nextGaussian()))); - countsToGo -= dist[i]; - } - if (countsToGo > 0) { - dist[dist.length-1] += countsToGo; - } - - // - // Write the answer key to a file. - // - FileSystem fs = FileSystem.get(conf); - Path testdir = new Path("mapred.loadtest"); - if (!fs.mkdirs(testdir)) { - throw new IOException("Mkdirs failed to create directory " + testdir.toString()); - } - - Path randomIns = new Path(testdir, "genins"); - if (!fs.mkdirs(randomIns)) { - throw new IOException("Mkdirs failed to create directory " + randomIns.toString()); - } - - Path answerkey = new Path(randomIns, "answer.key"); - SequenceFile.Writer out = SequenceFile.createWriter(fs, conf, - answerkey, RecInt.class, RecInt.class, - CompressionType.NONE); - try { - for (int i = 0; i < range; i++) { - RecInt k = new RecInt(); - RecInt v = new RecInt(); - k.setData(i); - v.setData(dist[i]); - out.append(k, v); - } - } finally { - out.close(); - } - - // - // Now we need to generate the random numbers according to - // the above distribution. - // - // We create a lot of map tasks, each of which takes at least - // one "line" of the distribution. (That is, a certain number - // X is to be generated Y number of times.) - // - // A map task emits Y key/val pairs. The val is X. The key - // is a randomly-generated number. - // - // The reduce task gets its input sorted by key. That is, sorted - // in random order. It then emits a single line of text that - // for the given values. It does not emit the key. - // - // Because there's just one reduce task, we emit a single big - // file of random numbers. - // - Path randomOuts = new Path(testdir, "genouts"); - fs.delete(randomOuts, true); - - - JobConf genJob = new JobConf(conf, TestRecordMR.class); - FileInputFormat.setInputPaths(genJob, randomIns); - genJob.setInputFormat(SequenceFileInputFormat.class); - genJob.setMapperClass(RandomGenMapper.class); - - FileOutputFormat.setOutputPath(genJob, randomOuts); - genJob.setOutputKeyClass(RecInt.class); - genJob.setOutputValueClass(RecString.class); - genJob.setOutputFormat(SequenceFileOutputFormat.class); - genJob.setReducerClass(RandomGenReducer.class); - genJob.setNumReduceTasks(1); - - JobClient.runJob(genJob); - - // - // Next, we read the big file in and regenerate the - // original map. It's split into a number of parts. - // (That number is 'intermediateReduces'.) - // - // We have many map tasks, each of which read at least one - // of the output numbers. For each number read in, the - // map task emits a key/value pair where the key is the - // number and the value is "1". - // - // We have a single reduce task, which receives its input - // sorted by the key emitted above. For each key, there will - // be a certain number of "1" values. The reduce task sums - // these values to compute how many times the given key was - // emitted. - // - // The reduce task then emits a key/val pair where the key - // is the number in question, and the value is the number of - // times the key was emitted. This is the same format as the - // original answer key (except that numbers emitted zero times - // will not appear in the regenerated key.) The answer set - // is split into a number of pieces. A final MapReduce job - // will merge them. - // - // There's not really a need to go to 10 reduces here - // instead of 1. But we want to test what happens when - // you have multiple reduces at once. - // - int intermediateReduces = 10; - Path intermediateOuts = new Path(testdir, "intermediateouts"); - fs.delete(intermediateOuts, true); - JobConf checkJob = new JobConf(conf, TestRecordMR.class); - FileInputFormat.setInputPaths(checkJob, randomOuts); - checkJob.setInputFormat(SequenceFileInputFormat.class); - checkJob.setMapperClass(RandomCheckMapper.class); - - FileOutputFormat.setOutputPath(checkJob, intermediateOuts); - checkJob.setOutputKeyClass(RecInt.class); - checkJob.setOutputValueClass(RecString.class); - checkJob.setOutputFormat(SequenceFileOutputFormat.class); - checkJob.setReducerClass(RandomCheckReducer.class); - checkJob.setNumReduceTasks(intermediateReduces); - - JobClient.runJob(checkJob); - - // - // OK, now we take the output from the last job and - // merge it down to a single file. The map() and reduce() - // functions don't really do anything except reemit tuples. - // But by having a single reduce task here, we end up merging - // all the files. - // - Path finalOuts = new Path(testdir, "finalouts"); - fs.delete(finalOuts, true); - JobConf mergeJob = new JobConf(conf, TestRecordMR.class); - FileInputFormat.setInputPaths(mergeJob, intermediateOuts); - mergeJob.setInputFormat(SequenceFileInputFormat.class); - mergeJob.setMapperClass(MergeMapper.class); - - FileOutputFormat.setOutputPath(mergeJob, finalOuts); - mergeJob.setOutputKeyClass(RecInt.class); - mergeJob.setOutputValueClass(RecInt.class); - mergeJob.setOutputFormat(SequenceFileOutputFormat.class); - mergeJob.setReducerClass(MergeReducer.class); - mergeJob.setNumReduceTasks(1); - - JobClient.runJob(mergeJob); - - - // - // Finally, we compare the reconstructed answer key with the - // original one. Remember, we need to ignore zero-count items - // in the original key. - // - boolean success = true; - Path recomputedkey = new Path(finalOuts, "part-00000"); - SequenceFile.Reader in = new SequenceFile.Reader(fs, recomputedkey, conf); - int totalseen = 0; - try { - RecInt key = new RecInt(); - RecInt val = new RecInt(); - for (int i = 0; i < range; i++) { - if (dist[i] == 0) { - continue; - } - if (!in.next(key, val)) { - System.err.println("Cannot read entry " + i); - success = false; - break; - } else { - if (!((key.getData() == i) && (val.getData() == dist[i]))) { - System.err.println("Mismatch! Pos=" + key.getData() + ", i=" + i + ", val=" + val.getData() + ", dist[i]=" + dist[i]); - success = false; - } - totalseen += val.getData(); - } - } - if (success) { - if (in.next(key, val)) { - System.err.println("Unnecessary lines in recomputed key!"); - success = false; - } - } - } finally { - in.close(); - } - int originalTotal = 0; - for (int i = 0; i < dist.length; i++) { - originalTotal += dist[i]; - } - System.out.println("Original sum: " + originalTotal); - System.out.println("Recomputed sum: " + totalseen); - - // - // Write to "results" whether the test succeeded or not. - // - Path resultFile = new Path(testdir, "results"); - BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(resultFile))); - try { - bw.write("Success=" + success + "\n"); - System.out.println("Success=" + success); - } finally { - bw.close(); - } - fs.delete(testdir, true); - } - - /** - * Launches all the tasks in order. - */ - public static void main(String[] argv) throws Exception { - if (argv.length < 2) { - System.err.println("Usage: TestRecordMR "); - System.err.println(); - System.err.println("Note: a good test will have a value that is substantially larger than the "); - return; - } - - int i = 0; - int range = Integer.parseInt(argv[i++]); - int counts = Integer.parseInt(argv[i++]); - launch(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java deleted file mode 100644 index 69d532ccf49..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/record/TestRecordWritable.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.record; - -import java.io.*; -import java.util.*; -import junit.framework.TestCase; - -import org.apache.hadoop.fs.*; -import org.apache.hadoop.io.*; -import org.apache.hadoop.conf.*; -import org.apache.commons.logging.*; -import org.apache.hadoop.mapred.InputSplit; -import org.apache.hadoop.mapred.InputFormat; -import org.apache.hadoop.mapred.FileInputFormat; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.RecordReader; -import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.mapred.SequenceFileInputFormat; - -public class TestRecordWritable extends TestCase { - private static final Log LOG = FileInputFormat.LOG; - - private static int MAX_LENGTH = 10000; - private static Configuration conf = new Configuration(); - - public void testFormat() throws Exception { - JobConf job = new JobConf(conf); - FileSystem fs = FileSystem.getLocal(conf); - Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred"); - Path file = new Path(dir, "test.seq"); - - int seed = new Random().nextInt(); - //LOG.info("seed = "+seed); - Random random = new Random(seed); - - fs.delete(dir, true); - - FileInputFormat.setInputPaths(job, dir); - - // for a variety of lengths - for (int length = 0; length < MAX_LENGTH; - length+= random.nextInt(MAX_LENGTH/10)+1) { - - // create a file with length entries - SequenceFile.Writer writer = - new SequenceFile.Writer(fs, conf, file, - RecInt.class, RecBuffer.class); - try { - for (int i = 0; i < length; i++) { - RecInt key = new RecInt(); - key.setData(i); - byte[] data = new byte[random.nextInt(10)]; - random.nextBytes(data); - RecBuffer value = new RecBuffer(); - value.setData(new Buffer(data)); - writer.append(key, value); - } - } finally { - writer.close(); - } - - // try splitting the file in a variety of sizes - InputFormat format = - new SequenceFileInputFormat(); - RecInt key = new RecInt(); - RecBuffer value = new RecBuffer(); - for (int i = 0; i < 3; i++) { - int numSplits = - random.nextInt(MAX_LENGTH/(SequenceFile.SYNC_INTERVAL/20))+1; - InputSplit[] splits = format.getSplits(job, numSplits); - - // check each split - BitSet bits = new BitSet(length); - for (int j = 0; j < splits.length; j++) { - RecordReader reader = - format.getRecordReader(splits[j], job, Reporter.NULL); - try { - int count = 0; - while (reader.next(key, value)) { - assertFalse("Key in multiple partitions.", bits.get(key.getData())); - bits.set(key.getData()); - count++; - } - } finally { - reader.close(); - } - } - assertEquals("Some keys in no partition.", length, bits.cardinality()); - } - - } - } - - public static void main(String[] args) throws Exception { - new TestRecordWritable().testFormat(); - } -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java deleted file mode 100644 index 8b45220332b..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/TestMapredGroupMappingServiceRefresh.java +++ /dev/null @@ -1,271 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.security; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - - -import java.io.File; -import java.io.FileNotFoundException; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.PrintWriter; -import java.net.URI; -import java.net.URL; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeys; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.tools.DFSAdmin; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.tools.MRAdmin; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -public class TestMapredGroupMappingServiceRefresh { - private MiniDFSCluster cluster; - JobConf config; - private static long groupRefreshTimeoutSec = 2; - private String tempResource = null; - private static final Log LOG = LogFactory - .getLog(TestMapredGroupMappingServiceRefresh.class); - - public static class MockUnixGroupsMapping implements GroupMappingServiceProvider { - private int i=0; - - @Override - public List getGroups(String user) throws IOException { - String g1 = user + (10 * i + 1); - String g2 = user + (10 * i + 2); - List l = new ArrayList(2); - l.add(g1); - l.add(g2); - i++; - return l; - } - - @Override - public void cacheGroupsRefresh() throws IOException { - } - - @Override - public void cacheGroupsAdd(List groups) throws IOException { - } - } - - @Before - public void setUp() throws Exception { - config = new JobConf(new Configuration()); - - config.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - TestMapredGroupMappingServiceRefresh.MockUnixGroupsMapping.class, - GroupMappingServiceProvider.class); - config.setLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, - groupRefreshTimeoutSec); - - LOG.info("GROUP MAPPING class name=" + - config.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - ShellBasedUnixGroupsMapping.class,GroupMappingServiceProvider.class). - getName()); - - Groups.getUserToGroupsMappingService(config); - String namenodeUrl = "hdfs://localhost:" + "0"; - FileSystem.setDefaultUri(config, namenodeUrl); - - cluster = new MiniDFSCluster(0, config, 1, true, true, true, null, null, - null, null); - cluster.waitActive(); - URI uri = cluster.getURI(0); - - MiniMRCluster miniMRCluster = new MiniMRCluster(0, uri.toString() , - 3, null, null, config); - - config.set(JTConfig.JT_IPC_ADDRESS, "localhost:"+miniMRCluster.getJobTrackerPort()); - ProxyUsers.refreshSuperUserGroupsConfiguration(config); - } - - @After - public void tearDown() throws Exception { - if(cluster!=null) { - cluster.shutdown(); - } - if(tempResource!=null) { - File f = new File(tempResource); - f.delete(); - } - } - - @Test - public void testGroupMappingRefresh() throws Exception { - MRAdmin admin = new MRAdmin(config); - String [] args = new String[] { "-refreshUserToGroupsMappings" }; - - Groups groups = Groups.getUserToGroupsMappingService(config); - String user = UserGroupInformation.getLoginUser().getShortUserName(); - System.out.println("first attempt:"); - List g1 = groups.getGroups(user); - String [] str_groups = new String [g1.size()]; - g1.toArray(str_groups); - System.out.println(Arrays.toString(str_groups)); - - System.out.println("second attempt, should be same:"); - List g2 = groups.getGroups(user); - g2.toArray(str_groups); - System.out.println(Arrays.toString(str_groups)); - for(int i=0; i g3 = groups.getGroups(user); - g3.toArray(str_groups); - System.out.println(Arrays.toString(str_groups)); - for(int i=0; i g4 = groups.getGroups(user); - g4.toArray(str_groups); - System.out.println(Arrays.toString(str_groups)); - for(int i=0; i"+ - "" + keyGroup + ""+groups+"" + - "" + keyHosts + ""+hosts+"" + - ""; - PrintWriter writer = new PrintWriter(new FileOutputStream(tempResource)); - writer.println(newResource); - writer.close(); - - Configuration.addDefaultResource(rsrcName); - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java deleted file mode 100644 index 0e3cf4f791e..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/HadoopPolicyProvider.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.authorize; - -import org.apache.hadoop.hdfs.HDFSPolicyProvider; -import org.apache.hadoop.mapred.MapReducePolicyProvider; - -public class HadoopPolicyProvider extends PolicyProvider { - - @Override - public Service[] getServices() { - Service[] hdfsServices = new HDFSPolicyProvider().getServices(); - Service[] mrServices = new MapReducePolicyProvider().getServices(); - - Service[] hadoopServices = - new Service[hdfsServices.length + mrServices.length]; - System.arraycopy(hdfsServices, 0, hadoopServices, 0, hdfsServices.length); - System.arraycopy(mrServices, 0, hadoopServices, hdfsServices.length, - mrServices.length); - - return hadoopServices; - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java deleted file mode 100644 index 38b0aee9ecb..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.security.authorize; - -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.security.PrivilegedExceptionAction; -import java.util.Set; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.HDFSPolicyProvider; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; -import org.apache.hadoop.hdfs.tools.DFSAdmin; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MiniMRCluster; -import org.apache.hadoop.mapred.TestMiniMRWithDFS; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.util.StringUtils; - -import junit.framework.TestCase; - -public class TestServiceLevelAuthorization extends TestCase { - public void testServiceLevelAuthorization() throws Exception { - MiniDFSCluster dfs = null; - MiniMRCluster mr = null; - FileSystem fileSys = null; - try { - final int slaves = 4; - - // Turn on service-level authorization - Configuration conf = new Configuration(); - conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, - HadoopPolicyProvider.class, PolicyProvider.class); - conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, - true); - - // Start the mini clusters - dfs = new MiniDFSCluster(conf, slaves, true, null); - - // Ensure that the protocols authorized on the name node are only the HDFS protocols. - Set> protocolsWithAcls = NameNodeAdapter.getRpcServer(dfs.getNameNode()) - .getServiceAuthorizationManager().getProtocolsWithAcls(); - Service[] hdfsServices = new HDFSPolicyProvider().getServices(); - for (Service service : hdfsServices) { - if (!protocolsWithAcls.contains(service.getProtocol())) - fail("service authorization manager has no entry for protocol " + service.getProtocol()); - } - if (hdfsServices.length != protocolsWithAcls.size()) - fail("there should be an entry for every HDFS service in the protocols with ACLs map"); - - fileSys = dfs.getFileSystem(); - JobConf mrConf = new JobConf(conf); - mr = new MiniMRCluster(slaves, fileSys.getUri().toString(), 1, - null, null, mrConf); - - // Ensure that the protocols configured for the name node did not change - // when the MR cluster was started. - protocolsWithAcls = NameNodeAdapter.getRpcServer(dfs.getNameNode()) - .getServiceAuthorizationManager().getProtocolsWithAcls(); - hdfsServices = new HDFSPolicyProvider().getServices(); - for (Service service : hdfsServices) { - if (!protocolsWithAcls.contains(service.getProtocol())) - fail("service authorization manager has no entry for protocol " + service.getProtocol()); - } - if (hdfsServices.length != protocolsWithAcls.size()) - fail("there should be an entry for every HDFS service in the protocols with ACLs map"); - - // make cleanup inline sothat validation of existence of these directories - // can be done - mr.setInlineCleanupThreads(); - - // Run examples - TestMiniMRWithDFS.runPI(mr, mr.createJobConf(mrConf)); - TestMiniMRWithDFS.runWordCount(mr, mr.createJobConf(mrConf)); - } finally { - if (dfs != null) { dfs.shutdown(); } - if (mr != null) { mr.shutdown(); - } - } - } - - private static final String DUMMY_ACL = "nouser nogroup"; - private static final String UNKNOWN_USER = "dev,null"; - - private void rewriteHadoopPolicyFile(File policyFile) throws IOException { - FileWriter fos = new FileWriter(policyFile); - PolicyProvider policyProvider = new HDFSPolicyProvider(); - fos.write("\n"); - for (Service service : policyProvider.getServices()) { - String key = service.getServiceKey(); - String value ="*"; - if (key.equals("security.refresh.policy.protocol.acl")) { - value = DUMMY_ACL; - } - fos.write(""+ key + "" + value + - "\n"); - System.err.println(""+ key + "" + value + - "\n"); - } - fos.write("\n"); - fos.close(); - } - - private void refreshPolicy(Configuration conf) throws IOException { - DFSAdmin dfsAdmin = new DFSAdmin(conf); - dfsAdmin.refreshServiceAcl(); - } - - public void testRefresh() throws Exception { - MiniDFSCluster dfs = null; - try { - final int slaves = 4; - - // Turn on service-level authorization - final Configuration conf = new Configuration(); - conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, - HDFSPolicyProvider.class, PolicyProvider.class); - conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, - true); - - // Start the mini dfs cluster - dfs = new MiniDFSCluster(conf, slaves, true, null); - - // Refresh the service level authorization policy - refreshPolicy(conf); - - // Simulate an 'edit' of hadoop-policy.xml - String confDir = System.getProperty("test.build.extraconf", - "build/test/extraconf"); - String HADOOP_POLICY_FILE = System.getProperty("hadoop.policy.file"); - File policyFile = new File(confDir, HADOOP_POLICY_FILE); - String policyFileCopy = HADOOP_POLICY_FILE + ".orig"; - FileUtil.copy(policyFile, FileSystem.getLocal(conf), // first save original - new Path(confDir, policyFileCopy), false, conf); - rewriteHadoopPolicyFile( // rewrite the file - new File(confDir, HADOOP_POLICY_FILE)); - - // Refresh the service level authorization policy - refreshPolicy(conf); - - // Refresh the service level authorization policy once again, - // this time it should fail! - try { - // Note: hadoop-policy.xml for tests has - // security.refresh.policy.protocol.acl = ${user.name} - UserGroupInformation unknownUser = - UserGroupInformation.createRemoteUser("unknown"); - unknownUser.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException { - refreshPolicy(conf); - return null; - } - }); - fail("Refresh of NameNode's policy file cannot be successful!"); - } catch (Exception re) { - System.out.println("Good, refresh worked... refresh failed with: " + - StringUtils.stringifyException(re)); - } finally { - // Reset to original hadoop-policy.xml - FileUtil.fullyDelete(new File(confDir, - HADOOP_POLICY_FILE)); - FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, HADOOP_POLICY_FILE)); - } - } finally { - if (dfs != null) { dfs.shutdown(); } - } - } - -} diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java b/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java deleted file mode 100644 index a49bcf3e6fc..00000000000 --- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/tools/TestHarFileSystem.java +++ /dev/null @@ -1,427 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.tools; - -import java.io.IOException; -import java.net.URI; -import java.util.Iterator; - -import junit.framework.TestCase; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BlockLocation; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FsShell; -import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.MiniDFSCluster; -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; -import org.apache.hadoop.mapred.*; -import org.apache.hadoop.tools.HadoopArchives; -import org.apache.hadoop.util.ToolRunner; -import org.mortbay.log.Log; - -/** - * test the har file system - * create a har filesystem - * run fs commands - * and then run a map reduce job - */ -public class TestHarFileSystem extends TestCase { - private Path inputPath, inputrelPath; - private MiniDFSCluster dfscluster; - private MiniMRCluster mapred; - private FileSystem fs; - private Path filea, fileb, filec; - private Path archivePath; - - protected void setUp() throws Exception { - super.setUp(); - dfscluster = new MiniDFSCluster(new Configuration(), 2, true, null); - fs = dfscluster.getFileSystem(); - mapred = new MiniMRCluster(2, fs.getUri().toString(), 1); - inputPath = new Path(fs.getHomeDirectory(), "test"); - inputrelPath = new Path(fs.getHomeDirectory().toUri(). - getPath().substring(1), "test"); - filea = new Path(inputPath,"a"); - fileb = new Path(inputPath,"b"); - filec = new Path(inputPath,"c c"); - archivePath = new Path(fs.getHomeDirectory(), "tmp"); - fs.mkdirs(inputPath); - FSDataOutputStream out = fs.create(filea); - out.write("a".getBytes()); - out.close(); - out = fs.create(fileb); - out.write("b".getBytes()); - out.close(); - out = fs.create(filec); - out.write("c".getBytes()); - out.close(); - } - - protected void tearDown() throws Exception { - try { - if (mapred != null) { - mapred.shutdown(); - } - if (dfscluster != null) { - dfscluster.shutdown(); - } - } catch(Exception e) { - System.err.println(e); - } - super.tearDown(); - } - - static class TextMapperReducer implements Mapper, - Reducer { - - public void configure(JobConf conf) { - //do nothing - } - - public void map(LongWritable key, Text value, OutputCollector output, Reporter reporter) throws IOException { - output.collect(value, new Text("")); - } - - public void close() throws IOException { - // do nothing - } - - public void reduce(Text key, Iterator values, OutputCollector output, Reporter reporter) throws IOException { - while(values.hasNext()) { - values.next(); - output.collect(key, null); - } - } - } - - /* check bytes in the har output files */ - private void checkBytes(Path harPath, Configuration conf) throws IOException { - Path harFilea = new Path(harPath, "a"); - Path harFileb = new Path(harPath, "b"); - Path harFilec = new Path(harPath, "c c"); - FileSystem harFs = harFilea.getFileSystem(conf); - FSDataInputStream fin = harFs.open(harFilea); - byte[] b = new byte[4]; - int readBytes = fin.read(b); - fin.close(); - assertTrue("strings are equal ", (b[0] == "a".getBytes()[0])); - fin = harFs.open(harFileb); - fin.read(b); - fin.close(); - assertTrue("strings are equal ", (b[0] == "b".getBytes()[0])); - fin = harFs.open(harFilec); - fin.read(b); - fin.close(); - assertTrue("strings are equal ", (b[0] == "c".getBytes()[0])); - } - - private void checkProperties(Path harPath, Configuration conf) throws IOException { - Path harFilea = new Path(harPath, "a"); - Path harFileb = new Path(harPath, "b"); - Path harFilec = new Path(harPath, "c c"); - FileSystem harFs = harFilea.getFileSystem(conf); - - Path nonharFilea = new Path(inputPath, "a"); - Path nonharFileb = new Path(inputPath, "b"); - Path nonharFilec = new Path(inputPath, "c c"); - FileSystem nonharFs = nonharFilea.getFileSystem(conf); - - assertEquals("Modification times do not match for a", - harFs.getFileStatus(harFilea).getModificationTime(), - nonharFs.getFileStatus(nonharFilea).getModificationTime()); - - assertEquals("Modification times do not match for b", - harFs.getFileStatus(harFileb).getModificationTime(), - nonharFs.getFileStatus(nonharFileb).getModificationTime()); - - assertEquals("Modification times do not match for c", - harFs.getFileStatus(harFilec).getModificationTime(), - nonharFs.getFileStatus(nonharFilec).getModificationTime()); - } - - /** - * check if the block size of the part files is what we had specified - */ - private void checkBlockSize(FileSystem fs, Path finalPath, long blockSize) throws IOException { - FileStatus[] statuses = fs.globStatus(new Path(finalPath, "part-*")); - for (FileStatus status: statuses) { - assertTrue(status.getBlockSize() == blockSize); - } - } - - // test archives with a -p option - public void testRelativeArchives() throws Exception { - fs.delete(archivePath, true); - Configuration conf = mapred.createJobConf(); - HadoopArchives har = new HadoopArchives(conf); - - { - String[] args = new String[6]; - args[0] = "-archiveName"; - args[1] = "foo1.har"; - args[2] = "-p"; - args[3] = fs.getHomeDirectory().toString(); - args[4] = "test"; - args[5] = archivePath.toString(); - int ret = ToolRunner.run(har, args); - assertTrue("failed test", ret == 0); - Path finalPath = new Path(archivePath, "foo1.har"); - Path fsPath = new Path(inputPath.toUri().getPath()); - Path filePath = new Path(finalPath, "test"); - // make it a har path - Path harPath = new Path("har://" + filePath.toUri().getPath()); - assertTrue(fs.exists(new Path(finalPath, "_index"))); - assertTrue(fs.exists(new Path(finalPath, "_masterindex"))); - /*check for existence of only 1 part file, since part file size == 2GB */ - assertTrue(fs.exists(new Path(finalPath, "part-0"))); - assertTrue(!fs.exists(new Path(finalPath, "part-1"))); - assertTrue(!fs.exists(new Path(finalPath, "part-2"))); - assertTrue(!fs.exists(new Path(finalPath, "_logs"))); - FileStatus[] statuses = fs.listStatus(finalPath); - args = new String[2]; - args[0] = "-ls"; - args[1] = harPath.toString(); - FsShell shell = new FsShell(conf); - ret = ToolRunner.run(shell, args); - // fileb and filec - assertTrue(ret == 0); - checkBytes(harPath, conf); - checkProperties(harPath, conf); - /* check block size for path files */ - checkBlockSize(fs, finalPath, 512 * 1024 * 1024l); - } - - /** now try with different block size and part file size **/ - { - String[] args = new String[8]; - args[0] = "-Dhar.block.size=512"; - args[1] = "-Dhar.partfile.size=1"; - args[2] = "-archiveName"; - args[3] = "foo.har"; - args[4] = "-p"; - args[5] = fs.getHomeDirectory().toString(); - args[6] = "test"; - args[7] = archivePath.toString(); - int ret = ToolRunner.run(har, args); - assertTrue("failed test", ret == 0); - Path finalPath = new Path(archivePath, "foo.har"); - Path fsPath = new Path(inputPath.toUri().getPath()); - Path filePath = new Path(finalPath, "test"); - // make it a har path - Path harPath = new Path("har://" + filePath.toUri().getPath()); - assertTrue(fs.exists(new Path(finalPath, "_index"))); - assertTrue(fs.exists(new Path(finalPath, "_masterindex"))); - /*check for existence of 3 part files, since part file size == 1 */ - assertTrue(fs.exists(new Path(finalPath, "part-0"))); - assertTrue(fs.exists(new Path(finalPath, "part-1"))); - assertTrue(fs.exists(new Path(finalPath, "part-2"))); - assertTrue(!fs.exists(new Path(finalPath, "_logs"))); - FileStatus[] statuses = fs.listStatus(finalPath); - args = new String[2]; - args[0] = "-ls"; - args[1] = harPath.toString(); - FsShell shell = new FsShell(conf); - ret = ToolRunner.run(shell, args); - // fileb and filec - assertTrue(ret == 0); - checkBytes(harPath, conf); - checkProperties(harPath, conf); - checkBlockSize(fs, finalPath, 512); - } - } - - public void testArchivesWithMapred() throws Exception { - fs.delete(archivePath, true); - Configuration conf = mapred.createJobConf(); - HadoopArchives har = new HadoopArchives(conf); - String[] args = new String[4]; - - //check for destination not specfied - args[0] = "-archiveName"; - args[1] = "foo.har"; - args[2] = "-p"; - args[3] = "/"; - int ret = ToolRunner.run(har, args); - assertTrue(ret != 0); - args = new String[6]; - //check for wrong archiveName - args[0] = "-archiveName"; - args[1] = "/d/foo.har"; - args[2] = "-p"; - args[3] = "/"; - args[4] = inputrelPath.toString(); - args[5] = archivePath.toString(); - ret = ToolRunner.run(har, args); - assertTrue(ret != 0); - // se if dest is a file - args[1] = "foo.har"; - args[5] = filec.toString(); - ret = ToolRunner.run(har, args); - assertTrue(ret != 0); - //this is a valid run - args[0] = "-archiveName"; - args[1] = "foo.har"; - args[2] = "-p"; - args[3] = "/"; - args[4] = inputrelPath.toString(); - args[5] = archivePath.toString(); - ret = ToolRunner.run(har, args); - //checl for the existenece of the archive - assertTrue(ret == 0); - ///try running it again. it should not - // override the directory - ret = ToolRunner.run(har, args); - assertTrue(ret != 0); - Path finalPath = new Path(archivePath, "foo.har"); - Path fsPath = new Path(inputPath.toUri().getPath()); - String relative = fsPath.toString().substring(1); - Path filePath = new Path(finalPath, relative); - //make it a har path - URI uri = fs.getUri(); - Path harPath = new Path("har://" + "hdfs-" + uri.getHost() +":" + - uri.getPort() + filePath.toUri().getPath()); - assertTrue(fs.exists(new Path(finalPath, "_index"))); - assertTrue(fs.exists(new Path(finalPath, "_masterindex"))); - assertTrue(!fs.exists(new Path(finalPath, "_logs"))); - //creation tested - //check if the archive is same - // do ls and cat on all the files - - FsShell shell = new FsShell(conf); - args = new String[2]; - args[0] = "-ls"; - args[1] = harPath.toString(); - ret = ToolRunner.run(shell, args); - // ls should work. - assertTrue((ret == 0)); - //now check for contents of filea - // fileb and filec - Path harFilea = new Path(harPath, "a"); - Path harFileb = new Path(harPath, "b"); - Path harFilec = new Path(harPath, "c c"); - FileSystem harFs = harFilea.getFileSystem(conf); - FSDataInputStream fin = harFs.open(harFilea); - byte[] b = new byte[4]; - int readBytes = fin.read(b); - assertTrue("Empty read.", readBytes > 0); - fin.close(); - assertTrue("strings are equal ", (b[0] == "a".getBytes()[0])); - fin = harFs.open(harFileb); - readBytes = fin.read(b); - assertTrue("Empty read.", readBytes > 0); - fin.close(); - assertTrue("strings are equal ", (b[0] == "b".getBytes()[0])); - fin = harFs.open(harFilec); - readBytes = fin.read(b); - assertTrue("Empty read.", readBytes > 0); - fin.close(); - assertTrue("strings are equal ", (b[0] == "c".getBytes()[0])); - // ok all files match - // run a map reduce job - FileSystem fsHar = harPath.getFileSystem(conf); - FileStatus[] bla = fsHar.listStatus(harPath); - Path outdir = new Path(fs.getHomeDirectory(), "mapout"); - JobConf jobconf = mapred.createJobConf(); - FileInputFormat.addInputPath(jobconf, harPath); - jobconf.setInputFormat(TextInputFormat.class); - jobconf.setOutputFormat(TextOutputFormat.class); - FileOutputFormat.setOutputPath(jobconf, outdir); - jobconf.setMapperClass(TextMapperReducer.class); - jobconf.setMapOutputKeyClass(Text.class); - jobconf.setMapOutputValueClass(Text.class); - jobconf.setReducerClass(TextMapperReducer.class); - jobconf.setNumReduceTasks(1); - JobClient.runJob(jobconf); - args[1] = outdir.toString(); - ret = ToolRunner.run(shell, args); - - FileStatus[] status = fs.globStatus(new Path(outdir, "part*")); - Path reduceFile = status[0].getPath(); - FSDataInputStream reduceIn = fs.open(reduceFile); - b = new byte[6]; - readBytes = reduceIn.read(b); - assertTrue("Should read 6 bytes instead of "+readBytes+".", readBytes == 6); - //assuming all the 6 bytes were read. - Text readTxt = new Text(b); - assertTrue("a\nb\nc\n".equals(readTxt.toString())); - assertTrue("number of bytes left should be -1", reduceIn.read(b) == -1); - reduceIn.close(); - } - - public void testGetFileBlockLocations() throws Exception { - fs.delete(archivePath, true); - Configuration conf = mapred.createJobConf(); - HadoopArchives har = new HadoopArchives(conf); - String[] args = new String[8]; - args[0] = "-Dhar.block.size=512"; - args[1] = "-Dhar.partfile.size=1"; - args[2] = "-archiveName"; - args[3] = "foo bar.har"; - args[4] = "-p"; - args[5] = fs.getHomeDirectory().toString(); - args[6] = "test"; - args[7] = archivePath.toString(); - int ret = ToolRunner.run(har, args); - assertTrue("failed test", ret == 0); - Path finalPath = new Path(archivePath, "foo bar.har"); - Path fsPath = new Path(inputPath.toUri().getPath()); - Path filePath = new Path(finalPath, "test"); - Path filea = new Path(filePath, "a"); - // make it a har path - Path harPath = new Path("har://" + filea.toUri().getPath()); - FileSystem harFs = harPath.getFileSystem(conf); - FileStatus[] statuses = harFs.listStatus(filePath); - for (FileStatus status : statuses) { - BlockLocation[] locations = - harFs.getFileBlockLocations(status, 0, status.getLen()); - long lastOffset = 0; - assertEquals("Only one block location expected for files this small", - 1, locations.length); - assertEquals("Block location should start at offset 0", - 0, locations[0].getOffset()); - } - } - - public void testSpaces() throws Exception { - fs.delete(archivePath, true); - Configuration conf = mapred.createJobConf(); - HadoopArchives har = new HadoopArchives(conf); - String[] args = new String[6]; - args[0] = "-archiveName"; - args[1] = "foo bar.har"; - args[2] = "-p"; - args[3] = fs.getHomeDirectory().toString(); - args[4] = "test"; - args[5] = archivePath.toString(); - int ret = ToolRunner.run(har, args); - assertTrue("failed test", ret == 0); - Path finalPath = new Path(archivePath, "foo bar.har"); - Path fsPath = new Path(inputPath.toUri().getPath()); - Path filePath = new Path(finalPath, "test"); - // make it a har path - Path harPath = new Path("har://" + filePath.toUri().getPath()); - FileSystem harFs = harPath.getFileSystem(conf); - FileStatus[] statuses = harFs.listStatus(finalPath); - } -} diff --git a/hadoop-mapreduce-project/src/test/smoke-tests b/hadoop-mapreduce-project/src/test/smoke-tests deleted file mode 100644 index 44bcd791601..00000000000 --- a/hadoop-mapreduce-project/src/test/smoke-tests +++ /dev/null @@ -1,13 +0,0 @@ -**/TestMiniMRChildTask.java -**/TestMiniMRBringup.java -**/TestMiniMRDFSCaching.java -**/TestMiniMRDFSSort.java -**/TestMiniMRWithDFSWithDistinctUsers.java -**/TestLocalMRNotification.java -**/TestMapReduceLocal.java -**/TestReduceFetch.java -**/TestReduceTask.java -**/TestJobTrackerRestart.java -**/TestJobTrackerRestartWithLostTracker.java -**/TestJobTrackerSafeMode.java -**/TestChild.java diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-minimal.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-minimal.json deleted file mode 100644 index c4076d8760f..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-minimal.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "minimum" : 12345, - "rankings" : [ { - "relativeRanking" : 0.25, - "datum" : 12345 - }, { - "relativeRanking" : 0.5, - "datum" : 2345678901 - }, { - "relativeRanking" : 0.75, - "datum" : 2345678902 - } ], - "maximum" : 23456789012, - "numberValues" : 5 -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json deleted file mode 100644 index 7cb38e3cb7f..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-one-value-many-repeats.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "minimum" : 23456789012, - "rankings" : [ { - "relativeRanking" : 0.25, - "datum" : 23456789012 - }, { - "relativeRanking" : 0.5, - "datum" : 23456789012 - }, { - "relativeRanking" : 0.75, - "datum" : 23456789012 - } ], - "maximum" : 23456789012, - "numberValues" : 64 -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json deleted file mode 100644 index b24345acb5e..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-only-one-value.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "minimum" : 23456789012, - "rankings" : [ { - "relativeRanking" : 0.25, - "datum" : 23456789012 - }, { - "relativeRanking" : 0.5, - "datum" : 23456789012 - }, { - "relativeRanking" : 0.75, - "datum" : 23456789012 - } ], - "maximum" : 23456789012, - "numberValues" : 1 -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-three-values.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-three-values.json deleted file mode 100644 index 7722f796185..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/gold-three-values.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "minimum" : 1, - "rankings" : [ { - "relativeRanking" : 0.25, - "datum" : 1 - }, { - "relativeRanking" : 0.5, - "datum" : 1 - }, { - "relativeRanking" : 0.75, - "datum" : 23456789012 - } ], - "maximum" : 234567890123, - "numberValues" : 3 -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-minimal.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-minimal.json deleted file mode 100644 index 97791213c9c..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-minimal.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "data" : - [ - 12345, - 2345678901, - 23456789012, - 2345678902, - 23456789012 - ], - "percentiles" : - [ - 25, - 50, - 75 - ], - "scale" : 100 -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json deleted file mode 100644 index dba44e74345..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-one-value-many-repeats.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "data" : - [ - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012, - 23456789012 - ], - "percentiles" : - [ - 25, - 50, - 75 - ], - "scale" : 100 -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json deleted file mode 100644 index f020be8b4ce..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-only-one-value.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "data" : - [ - 23456789012 - ], - "percentiles" : - [ - 25, - 50, - 75 - ], - "scale" : 100 -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-three-values.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-three-values.json deleted file mode 100644 index 5bc4e9548e1..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/histogram-tests/input-three-values.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "data" : - [ - 1, - 23456789012, - 234567890123 - ], - "percentiles" : - [ - 25, - 50, - 75 - ], - "scale" : 100 -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml deleted file mode 100644 index ee86bd628a9..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_conf.xml +++ /dev/null @@ -1,3 +0,0 @@ - -mapred.child.java.opts-Xmx1024M -Djava.io.tmpdir=./tmp - diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz deleted file mode 100644 index 0dcdf0e991a..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_40864_job_name-DAILY%2F20100210%5D.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml deleted file mode 100644 index 1a45cb9613c..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_conf.xml +++ /dev/null @@ -1,3 +0,0 @@ - -mapred.child.java.opts-Xmx1024m -Djava.net.preferIPv4Stack=true - diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz deleted file mode 100644 index 15a89c6d41c..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-format-test-logs/megacluster.megacorp.com_1265616107882_job_201002080801_50510_job_name-DAILY%2F20100208%5D.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz deleted file mode 100644 index 35d38054ab4..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/counters-test-trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz deleted file mode 100644 index c68f4238349..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-sample-v20-jt-log.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz deleted file mode 100644 index 5f590deca97..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-topology-output.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz deleted file mode 100644 index dc033d68bb1..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/dispatch-trace-output.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz deleted file mode 100644 index 62cd24f3045..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/folder-input-trace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz deleted file mode 100644 index 0d3a3b83615..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/goldFoldedTrace.json.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output deleted file mode 100644 index 85da329b38b..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-topology-output +++ /dev/null @@ -1,1768 +0,0 @@ -{ - "name" : "", - "children" : [ { - "name" : "194\\.6\\.133\\.192", - "children" : [ { - "name" : "cluster50213\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50235\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50226\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50228\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50217\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50214\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50231\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50223\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50232\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50204\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50206\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50203\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50205\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50210\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50208\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50218\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50225\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.128", - "children" : [ { - "name" : "cluster1209\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1205\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1235\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1239\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1200\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1227\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1212\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1223\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1217\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1207\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1228\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1218\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1221\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1215\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1226\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1236\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1232\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.64", - "children" : [ { - "name" : "cluster50317\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50283\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50292\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50291\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50294\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50285\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50300\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50281\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50311\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50297\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50319\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50286\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50307\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50296\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50315\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50316\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50303\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.128", - "children" : [ { - "name" : "cluster1859\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1877\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1871\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1876\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1854\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1841\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1858\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1843\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1857\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1842\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1872\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1869\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1853\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1846\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1867\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.130\\.64", - "children" : [ { - "name" : "cluster1976\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1969\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1961\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1963\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1968\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1979\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1967\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1989\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1970\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1999\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.192", - "children" : [ { - "name" : "cluster1150\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1127\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1139\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1154\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1138\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1137\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1130\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1151\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1131\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1141\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1124\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1158\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1140\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1144\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1136\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1157\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1143\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.0", - "children" : [ { - "name" : "cluster1592\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1567\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1594\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1586\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1561\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1585\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1562\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1581\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1566\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1598\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1568\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1560\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1574\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1573\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1583\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1579\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.192", - "children" : [ { - "name" : "cluster50364\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50372\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50365\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50377\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50368\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50396\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50375\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50389\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50382\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.192", - "children" : [ { - "name" : "cluster1533\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1531\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1557\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1555\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1534\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1553\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1550\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1540\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1538\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1520\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1559\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1535\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1525\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1529\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1551\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.64", - "children" : [ { - "name" : "cluster50124\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50159\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50144\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50145\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50133\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50120\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50132\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50130\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50142\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50147\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50156\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50125\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50141\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50152\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.192", - "children" : [ { - "name" : "cluster1250\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1276\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1248\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1246\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1251\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1259\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1261\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1260\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1243\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1256\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1272\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1274\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1245\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1249\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.64", - "children" : [ { - "name" : "cluster1041\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1075\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1042\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1078\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1072\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1053\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1056\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1064\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1055\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1070\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1061\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1059\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1040\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.128", - "children" : [ { - "name" : "cluster50171\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50195\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50161\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50191\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50174\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50185\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50177\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50166\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50173\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50170\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50189\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50179\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.0", - "children" : [ { - "name" : "cluster1283\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1295\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1302\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1294\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1310\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1305\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1299\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1281\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1288\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1289\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1314\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1315\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1316\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.128", - "children" : [ { - "name" : "cluster1107\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1118\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1080\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1093\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1102\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1104\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1097\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1087\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1095\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1110\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.135\\.64", - "children" : [ { - "name" : "cluster3071\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3079\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3068\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3057\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3058\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3070\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3054\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3064\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3077\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3049\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3063\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3075\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3065\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3076\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3061\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3073\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3055\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.128", - "children" : [ { - "name" : "cluster50468\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50445\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50476\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50440\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50473\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50477\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50460\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50475\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50459\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50447\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50464\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50441\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50444\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.135\\.128", - "children" : [ { - "name" : "cluster3097\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3089\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3111\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3093\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3099\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3106\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3108\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3112\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3085\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3094\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3103\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3098\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3082\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3104\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3114\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.192", - "children" : [ { - "name" : "cluster50485\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50493\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50511\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50510\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50494\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50484\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50481\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50490\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50501\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50478\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50491\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50505\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50488\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50509\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50513\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.0", - "children" : [ { - "name" : "cluster50085\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50117\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50113\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50101\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50108\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50090\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.0", - "children" : [ { - "name" : "cluster1789\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1777\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1785\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1770\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1793\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1779\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1788\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1776\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1773\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1798\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1762\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1772\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1778\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1782\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1774\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1781\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1760\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1796\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1775\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1768\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1786\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1771\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.130\\.0", - "children" : [ { - "name" : "cluster1959\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1957\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1931\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1920\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1938\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1925\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1932\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1927\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1933\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1930\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1928\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1924\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1953\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1936\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.128", - "children" : [ { - "name" : "cluster1717\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1708\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1707\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1690\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1714\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1683\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1703\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1702\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1694\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1700\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1711\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1713\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1718\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.192", - "children" : [ { - "name" : "cluster1418\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1429\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1420\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1412\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1400\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1415\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1437\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1405\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1427\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.64", - "children" : [ { - "name" : "cluster1334\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1332\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1346\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1350\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1328\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1333\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1321\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1358\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1357\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1356\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.192", - "children" : [ { - "name" : "cluster1914\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1883\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1896\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1911\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1913\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1915\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1903\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1906\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1900\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1891\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1889\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1907\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1917\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.192", - "children" : [ { - "name" : "cluster1006\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1035\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1018\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1026\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1020\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1021\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1027\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1031\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1036\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1032\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1029\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1004\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1011\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1008\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1025\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1002\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1030\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1019\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1017\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1028\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.64", - "children" : [ { - "name" : "cluster1445\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1470\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1449\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1462\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1450\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1454\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1466\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1465\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1474\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1444\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1448\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1463\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1457\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1447\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1455\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1442\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1479\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1467\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1446\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1440\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.132\\.128", - "children" : [ { - "name" : "cluster50034\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50011\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50023\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50025\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50024\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50021\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.64", - "children" : [ { - "name" : "cluster1800\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1809\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1816\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1819\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1813\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1806\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1803\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1835\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1822\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1807\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1823\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1832\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.128", - "children" : [ { - "name" : "cluster50359\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50326\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50348\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50346\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50325\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50342\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50352\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.128", - "children" : [ { - "name" : "cluster1383\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1378\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1393\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1395\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1396\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1373\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1388\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1379\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1370\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1368\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1371\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1377\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1369\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.64", - "children" : [ { - "name" : "cluster1643\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1660\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1652\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1672\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1654\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1648\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1657\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1655\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1641\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1669\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1662\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1647\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1649\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1666\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1678\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1650\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1679\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.128", - "children" : [ { - "name" : "cluster1482\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1517\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1491\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1498\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1490\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1504\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1515\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1480\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1518\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1493\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1503\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1514\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.0", - "children" : [ { - "name" : "cluster50520\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50539\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50533\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50530\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50526\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50543\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.192", - "children" : [ { - "name" : "cluster50407\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50409\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50423\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50427\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50429\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50416\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50420\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50418\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50406\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50411\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50425\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.0", - "children" : [ { - "name" : "cluster50275\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50254\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50272\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50274\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50245\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50276\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50243\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50252\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50263\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50279\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50273\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50261\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50260\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.64", - "children" : [ { - "name" : "cluster1613\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1639\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1615\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1628\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1635\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1611\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1607\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1629\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1623\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1633\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1610\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1632\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1614\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1636\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1600\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1626\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1602\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1627\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.132\\.192", - "children" : [ { - "name" : "cluster50047\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50055\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50051\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50059\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50050\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50076\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50077\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50046\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50053\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50057\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50072\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50044\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50043\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50058\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.64", - "children" : [ { - "name" : "cluster1193\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1175\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1185\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1171\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1174\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1167\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1180\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.64", - "children" : [ { - "name" : "cluster1755\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1757\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1725\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1727\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1736\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1735\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1722\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1752\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1759\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1758\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1732\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1743\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1731\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1733\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1751\\.secondleveldomain\\.com", - "children" : null - } ] - } ] -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz deleted file mode 100644 index d8431facdaa..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/job-tracker-logs-trace-output.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml deleted file mode 100644 index 47e90009c17..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-conf.file.new.xml +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - mapreduce.job.queuenameTheQueue - - - mapreduce.job.nameMyMRJob - - - maproduce.uninteresting.propertyabcdef - - mapreduce.map.java.opts-server -Xmx640m -Djava.net.preferIPv4Stack=true - mapreduce.reduce.java.opts-server -Xmx650m -Djava.net.preferIPv4Stack=true - diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml deleted file mode 100644 index 75d0f86cfa9..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-conf.file.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - mapred.job.queue.nameTheQueue - - - mapred.job.nameMyMRJob - - - maproduce.uninteresting.propertyabcdef - - mapred.child.java.opts-server -Xmx640m -Djava.net.preferIPv4Stack=true - diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz deleted file mode 100644 index 2662ffb54f4..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/sample-job-tracker-logs.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log deleted file mode 100644 index 5e9e925a481..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-job-tracker-log +++ /dev/null @@ -1,110 +0,0 @@ - !!FILE=cluster-jt1.red.ygrid.megatron.com_1240335959557_job_200904211745_0002_hadoopqa_word+count!! -Meta VERSION="1" . -Job JOBID="job_200904211745_0002" JOBNAME="word count" USER="hadoopqa" SUBMIT_TIME="1240335962848" JOBCONF="hdfs://cluster-nn1\.secondleveldomain\.com/mapredsystem/hadoop/mapredsystem/job_200904211745_0002/job\.xml" . -Job JOBID="job_200904211745_0002" JOB_PRIORITY="NORMAL" . -Job JOBID="job_200904211745_0002" LAUNCH_TIME="1240335964437" TOTAL_MAPS="20" TOTAL_REDUCES="1" JOB_STATUS="PREP" . -Task TASKID="task_200904211745_0002_m_000021" TASK_TYPE="SETUP" START_TIME="1240336739508" SPLITS="" . -MapAttempt TASK_TYPE="SETUP" TASKID="task_200904211745_0002_m_000021" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000021_0" START_TIME="1240336739565" TRACKER_NAME="tracker_cluster1028\.secondleveldomain\.com:localhost/127\.0\.0\.1:52187" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="SETUP" TASKID="task_200904211745_0002_m_000021" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000021_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336742217" HOSTNAME="/192\.30\.63\.192/cluster1028\.secondleveldomain\.com" STATE_STRING="setup" COUNTERS="{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}" . -Task TASKID="task_200904211745_0002_m_000021" TASK_TYPE="SETUP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336752812" COUNTERS="{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}" . -Job JOBID="job_200904211745_0002" JOB_STATUS="RUNNING" . -Task TASKID="task_200904211745_0002_m_000010" TASK_TYPE="MAP" START_TIME="1240336753705" SPLITS="/192\.30\.126\.128/cluster1369\.secondleveldomain\.com,/194\.6\.129\.128/cluster1854\.secondleveldomain\.com,/194\.6\.129\.128/cluster1872\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000017" TASK_TYPE="MAP" START_TIME="1240336753750" SPLITS="/194\.6\.129\.64/cluster1803\.secondleveldomain\.com,/194\.6\.130\.0/cluster1930\.secondleveldomain\.com,/194\.6\.130\.0/cluster1932\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000019" TASK_TYPE="MAP" START_TIME="1240336753796" SPLITS="/194\.6\.128\.192/cluster50481\.secondleveldomain\.com,/194\.6\.128\.192/cluster50505\.secondleveldomain\.com,/194\.6\.134\.128/cluster50359\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000005" TASK_TYPE="MAP" START_TIME="1240336753840" SPLITS="/192\.30\.117\.192/cluster1245\.secondleveldomain\.com,/192\.30\.117\.192/cluster1261\.secondleveldomain\.com,/194\.6\.132\.128/cluster50021\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000000" TASK_TYPE="MAP" START_TIME="1240336753888" SPLITS="/192\.30\.117\.128/cluster1236\.secondleveldomain\.com,/194\.6\.129\.192/cluster1889\.secondleveldomain\.com,/194\.6\.129\.192/cluster1911\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000009" TASK_TYPE="MAP" START_TIME="1240336753938" SPLITS="/192\.30\.117\.128/cluster1227\.secondleveldomain\.com,/192\.30\.117\.192/cluster1259\.secondleveldomain\.com,/192\.30\.117\.192/cluster1260\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000002" TASK_TYPE="MAP" START_TIME="1240336753987" SPLITS="/192\.30\.126\.64/cluster1357\.secondleveldomain\.com,/192\.30\.127\.64/cluster1450\.secondleveldomain\.com,/192\.30\.127\.64/cluster1457\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000004" TASK_TYPE="MAP" START_TIME="1240336754030" SPLITS="/192\.30\.126\.0/cluster1294\.secondleveldomain\.com,/192\.30\.126\.0/cluster1288\.secondleveldomain\.com,/194\.6\.129\.128/cluster1876\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000007" TASK_TYPE="MAP" START_TIME="1240336754077" SPLITS="/192\.30\.127\.64/cluster1466\.secondleveldomain\.com,/194\.6\.133\.192/cluster50218\.secondleveldomain\.com,/194\.6\.133\.192/cluster50232\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000013" TASK_TYPE="MAP" START_TIME="1240336754124" SPLITS="/194\.6\.134\.64/cluster50286\.secondleveldomain\.com,/194\.6\.135\.64/cluster3071\.secondleveldomain\.com,/194\.6\.135\.64/cluster3049\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000012" TASK_TYPE="MAP" START_TIME="1240336754176" SPLITS="/194\.6\.132\.192/cluster50057\.secondleveldomain\.com,/194\.6\.135\.128/cluster3112\.secondleveldomain\.com,/194\.6\.135\.128/cluster3082\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000001" TASK_TYPE="MAP" START_TIME="1240336754223" SPLITS="/192\.30\.116\.128/cluster1080\.secondleveldomain\.com,/192\.30\.116\.128/cluster1097\.secondleveldomain\.com,/194\.6\.129\.0/cluster50543\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000015" TASK_TYPE="MAP" START_TIME="1240336754270" SPLITS="/192\.30\.126\.192/cluster1412\.secondleveldomain\.com,/192\.30\.62\.192/cluster50427\.secondleveldomain\.com,/192\.30\.62\.192/cluster50411\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000016" TASK_TYPE="MAP" START_TIME="1240336754319" SPLITS="/192\.30\.126\.128/cluster1393\.secondleveldomain\.com,/194\.6\.133\.64/cluster50130\.secondleveldomain\.com,/194\.6\.133\.64/cluster50141\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000018" TASK_TYPE="MAP" START_TIME="1240336754366" SPLITS="/192\.30\.117\.128/cluster1223\.secondleveldomain\.com,/192\.30\.117\.128/cluster1200\.secondleveldomain\.com,/194\.6\.133\.64/cluster50152\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000003" TASK_TYPE="MAP" START_TIME="1240336754409" SPLITS="/192\.30\.63\.64/cluster1733\.secondleveldomain\.com,/194\.6\.128\.64/cluster1607\.secondleveldomain\.com,/194\.6\.128\.64/cluster1639\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000006" TASK_TYPE="MAP" START_TIME="1240336754452" SPLITS="/192\.30\.116\.64/cluster1064\.secondleveldomain\.com,/194\.6\.128\.192/cluster50510\.secondleveldomain\.com,/194\.6\.128\.192/cluster50478\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000014" TASK_TYPE="MAP" START_TIME="1240336754500" SPLITS="/192\.30\.116\.64/cluster1059\.secondleveldomain\.com,/194\.6\.132\.192/cluster50053\.secondleveldomain\.com,/194\.6\.132\.192/cluster50050\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000008" TASK_TYPE="MAP" START_TIME="1240336754548" SPLITS="/192\.30\.116\.192/cluster1157\.secondleveldomain\.com,/192\.30\.62\.128/cluster1718\.secondleveldomain\.com,/192\.30\.62\.128/cluster1694\.secondleveldomain\.com" . -Task TASKID="task_200904211745_0002_m_000011" TASK_TYPE="MAP" START_TIME="1240336754596" SPLITS="/192\.30\.116\.192/cluster1143\.secondleveldomain\.com,/192\.30\.63\.192/cluster1004\.secondleveldomain\.com,/192\.30\.63\.192/cluster1020\.secondleveldomain\.com" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000015" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000015_0" START_TIME="1240336762622" TRACKER_NAME="tracker_cluster3104\.secondleveldomain\.com:localhost/127\.0\.0\.1:48449" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000015" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000015_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336765834" HOSTNAME="/194\.6\.135\.128/cluster3104\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000015" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336774468" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000001" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000001_0" START_TIME="1240336754801" TRACKER_NAME="tracker_cluster1660\.secondleveldomain\.com:localhost/127\.0\.0\.1:40006" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000001" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000001_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336758231" HOSTNAME="/192\.30\.62\.64/cluster1660\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000001" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336774476" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000012" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000012_0" START_TIME="1240336768390" TRACKER_NAME="tracker_cluster3097\.secondleveldomain\.com:localhost/127\.0\.0\.1:32840" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000012" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000012_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336771627" HOSTNAME="/194\.6\.135\.128/cluster3097\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000012" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336774482" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_r_000000" TASK_TYPE="REDUCE" START_TIME="1240336774548" SPLITS="" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000011" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000011_0" START_TIME="1240336744261" TRACKER_NAME="tracker_cluster3098\.secondleveldomain\.com:localhost/127\.0\.0\.1:53110" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000011" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000011_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336747517" HOSTNAME="/194\.6\.135\.128/cluster3098\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000011" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336776031" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000003" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000003_0" START_TIME="1240336755433" TRACKER_NAME="tracker_cluster1551\.secondleveldomain\.com:localhost/127\.0\.0\.1:46404" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000003" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000003_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336758966" HOSTNAME="/192\.30\.127\.192/cluster1551\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000003" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336816560" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000006" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000006_0" START_TIME="1240336747936" TRACKER_NAME="tracker_cluster1405\.secondleveldomain\.com:localhost/127\.0\.0\.1:35101" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000006" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000006_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336751464" HOSTNAME="/192\.30\.126\.192/cluster1405\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000006" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336817330" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000016" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000016_0" START_TIME="1240336759528" TRACKER_NAME="tracker_cluster1867\.secondleveldomain\.com:localhost/127\.0\.0\.1:43031" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000016" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000016_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336764456" HOSTNAME="/194\.6\.129\.128/cluster1867\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000016" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336817903" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000019" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000019_0" START_TIME="1240336766565" TRACKER_NAME="tracker_cluster1771\.secondleveldomain\.com:localhost/127\.0\.0\.1:49430" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000019" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000019_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336770163" HOSTNAME="/192\.30\.63\.0/cluster1771\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000019" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818106" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000010" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000010_0" START_TIME="1240336647215" TRACKER_NAME="tracker_cluster1396\.secondleveldomain\.com:localhost/127\.0\.0\.1:46109" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000010" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000010_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336651127" HOSTNAME="/192\.30\.126\.128/cluster1396\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000010" TASK_TY . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000017" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000017_0" START_TIME="1240336752204" TRACKER_NAME="tracker_cluster1553\.secondleveldomain\.com:localhost/127\.0\.0\.1:33829" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000017" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000017_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336755959" HOSTNAME="/192\.30\.127\.192/cluster1553\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000017" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818110" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000000" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000000_0" START_TIME="1240336755247" TRACKER_NAME="tracker_cluster1218\.secondleveldomain\.com:localhost/127\.0\.0\.1:37882" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000000" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000000_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336763432" HOSTNAME="/192\.30\.117\.128/cluster1218\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000000" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818113" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000005" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000005_0" START_TIME="1240336765887" TRACKER_NAME="tracker_cluster1779\.secondleveldomain\.com:localhost/127\.0\.0\.1:57465" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000005" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000005_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336785565" HOSTNAME="/192\.30\.63\.0/cluster1779\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000005" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818114" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000002" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000002_0" START_TIME="1240336754665" TRACKER_NAME="tracker_cluster1670\.secondleveldomain\.com:localhost/127\.0\.0\.1:47698" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000002" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000002_0" TASK_STATUS="FAILED" FINISH_TIME="1240336777673" HOSTNAME="cluster1670\.secondleveldomain\.com" ERROR="java\.io\.IOException: Task process exit with nonzero status of 15\. - at org\.apache\.hadoop\.mapred\.TaskRunner\.run(TaskRunner\.java:424) -,java\.io\.IOException: Task process exit with nonzero status of 15\. - at org\.apache\.hadoop\.mapred\.TaskRunner\.run(TaskRunner\.java:424) -" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000009" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000009_0" START_TIME="1240336758229" TRACKER_NAME="tracker_cluster1586\.secondleveldomain\.com:localhost/127\.0\.0\.1:38422" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000009" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000009_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336761612" HOSTNAME="/194\.6\.128\.0/cluster1586\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000009" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818116" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000004" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000004_0" START_TIME="1240336757380" TRACKER_NAME="tracker_cluster1869\.secondleveldomain\.com:localhost/127\.0\.0\.1:40050" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000004" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000004_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336763176" HOSTNAME="/194\.6\.129\.128/cluster1869\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000004" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818118" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000007" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000007_0" START_TIME="1240336763994" TRACKER_NAME="tracker_cluster1770\.secondleveldomain\.com:localhost/127\.0\.0\.1:52486" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000007" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000007_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336767750" HOSTNAME="/192\.30\.63\.0/cluster1770\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000007" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818119" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000013" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000013_0" START_TIME="1240336758341" TRACKER_NAME="tracker_cluster1816\.secondleveldomain\.com:localhost/127\.0\.0\.1:41947" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000013" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000013_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336762025" HOSTNAME="/194\.6\.129\.64/cluster1816\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000013" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818120" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000018" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000018_0" START_TIME="1240336759909" TRACKER_NAME="tracker_cluster1649\.secondleveldomain\.com:localhost/127\.0\.0\.1:36608" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000018" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000018_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336763727" HOSTNAME="/192\.30\.62\.64/cluster1649\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000018" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336818420" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000014" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000014_0" START_TIME="1240336756161" TRACKER_NAME="tracker_cluster1928\.secondleveldomain\.com:localhost/127\.0\.0\.1:58972" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000014" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000014_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336760016" HOSTNAME="/194\.6\.130\.0/cluster1928\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000014" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336824060" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(53639)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000008" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000008_0" START_TIME="1240336758220" TRACKER_NAME="tracker_cluster1846\.secondleveldomain\.com:localhost/127\.0\.0\.1:44127" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000008" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000008_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336762903" HOSTNAME="/194\.6\.129\.128/cluster1846\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000008" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336824556" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000002" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000002_1" START_TIME="1240336821839" TRACKER_NAME="tracker_cluster1586\.secondleveldomain\.com:localhost/127\.0\.0\.1:38422" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="MAP" TASKID="task_200904211745_0002_m_000002" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000002_1" TASK_STATUS="SUCCESS" FINISH_TIME="1240336824652" HOSTNAME="/194\.6\.128\.0/cluster1586\.secondleveldomain\.com" STATE_STRING="" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -Task TASKID="task_200904211745_0002_m_000002" TASK_TYPE="MAP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336842768" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(HDFS_BYTES_READ)(HDFS_BYTES_READ)(148286)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(37170)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(COMBINE_OUTPUT_RECORDS)(Combine output records)(5315)][(MAP_INPUT_RECORDS)(Map input records)(3601)][(SPILLED_RECORDS)(Spilled Records)(5315)][(MAP_OUTPUT_BYTES)(Map output bytes)(247925)][(COMBINE_INPUT_RECORDS)(Combine input records)(26425)][(MAP_OUTPUT_RECORDS)(Map output records)(26425)]}" . -ReduceAttempt TASK_TYPE="REDUCE" TASKID="task_200904211745_0002_r_000000" TASK_ATTEMPT_ID="attempt_200904211745_0002_r_000000_0" START_TIME="1240336786769" TRACKER_NAME="tracker_cluster1771\.secondleveldomain\.com:localhost/127\.0\.0\.1:49430" HTTP_PORT="50060" . -ReduceAttempt TASK_TYPE="REDUCE" TASKID="task_200904211745_0002_r_000000" TASK_ATTEMPT_ID="attempt_200904211745_0002_r_000000_0" TASK_STATUS="SUCCESS" SHUFFLE_FINISHED="1240336859759" SORT_FINISHED="1240336860092" FINISH_TIME="1240336870553" HOSTNAME="/192\.30\.63\.0/cluster1771\.secondleveldomain\.com" STATE_STRING="reduce > reduce" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(FILE_BYTES_READ)(FILE_BYTES_READ)(71200)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(71200)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(56630)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(705622)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(106300)][(COMBINE_INPUT_RECORDS)(Combine input records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(106300)]}" . -Task TASKID="task_200904211745_0002_r_000000" TASK_TYPE="REDUCE" TASK_STATUS="SUCCESS" FINISH_TIME="1240336873648" COUNTERS="{(FileSystemCounters)(FileSystemCounters)[(FILE_BYTES_READ)(FILE_BYTES_READ)(71200)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(71200)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(56630)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(0)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(705622)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(106300)][(COMBINE_INPUT_RECORDS)(Combine input records)(0)][(REDUCE_INPUT_RECORDS)(Reduce input records)(106300)]}" . -Task TASKID="task_200904211745_0002_m_000020" TASK_TYPE="CLEANUP" START_TIME="1240336873651" SPLITS="" . -MapAttempt TASK_TYPE="CLEANUP" TASKID="task_200904211745_0002_m_000020" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000020_0" START_TIME="1240336885885" TRACKER_NAME="tracker_cluster1771\.secondleveldomain\.com:localhost/127\.0\.0\.1:49430" HTTP_PORT="50060" . -MapAttempt TASK_TYPE="CLEANUP" TASKID="task_200904211745_0002_m_000020" TASK_ATTEMPT_ID="attempt_200904211745_0002_m_000020_0" TASK_STATUS="SUCCESS" FINISH_TIME="1240336887642" HOSTNAME="/192\.30\.63\.0/cluster1771\.secondleveldomain\.com" STATE_STRING="cleanup" COUNTERS="{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}" . -Task TASKID="task_200904211745_0002_m_000020" TASK_TYPE="CLEANUP" TASK_STATUS="SUCCESS" FINISH_TIME="1240336889658" COUNTERS="{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(SPILLED_RECORDS)(Spilled Records)(0)]}" . -Job JOBID="job_200904211745_0002" FINISH_TIME="1240336889659" JOB_STATUS="SUCCESS" FINISHED_MAPS="20" FINISHED_REDUCES="1" FAILED_MAPS="1" FAILED_REDUCES="0" COUNTERS="{(org\.apache\.hadoop\.mapred\.JobInProgress$Counter)(Job Counters )[(TOTAL_LAUNCHED_REDUCES)(Launched reduce tasks)(1)][(RACK_LOCAL_MAPS)(Rack-local map tasks)(4)][(TOTAL_LAUNCHED_MAPS)(Launched map tasks)(21)]}{(FileSystemCounters)(FileSystemCounters)[(FILE_BYTES_READ)(FILE_BYTES_READ)(71200)][(HDFS_BYTES_READ)(HDFS_BYTES_READ)(2019250)][(FILE_BYTES_WRITTEN)(FILE_BYTES_WRITTEN)(814600)][(HDFS_BYTES_WRITTEN)(HDFS_BYTES_WRITTEN)(56630)]}{(org\.apache\.hadoop\.mapred\.Task$Counter)(Map-Reduce Framework)[(REDUCE_INPUT_GROUPS)(Reduce input groups)(0)][(COMBINE_OUTPUT_RECORDS)(Combine output records)(106300)][(MAP_INPUT_RECORDS)(Map input records)(72020)][(REDUCE_SHUFFLE_BYTES)(Reduce shuffle bytes)(705622)][(REDUCE_OUTPUT_RECORDS)(Reduce output records)(0)][(SPILLED_RECORDS)(Spilled Records)(212600)][(MAP_OUTPUT_BYTES)(Map output bytes)(4958500)][(MAP_OUTPUT_RECORDS)(Map output records)(528500)][(COMBINE_INPUT_RECORDS)(Combine input records)(528500)][(REDUCE_INPUT_RECORDS)(Reduce input records)(106300)]}" . - - !!FILE=cluster-jt1.red.ygrid.megatron.com_1240335959557_job_200904211745_0002_conf.xml!! - -mapred.child.java.opts-server -Xmx640m -Djava.net.preferIPv4Stack=true - diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-topology-output b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-topology-output deleted file mode 100644 index 22b3ea5f22a..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-topology-output +++ /dev/null @@ -1,343 +0,0 @@ -{ - "name" : "", - "children" : [ { - "name" : "194\\.6\\.133\\.192", - "children" : [ { - "name" : "cluster50218\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50232\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.128", - "children" : [ { - "name" : "cluster1218\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1236\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1200\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1227\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1223\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.128", - "children" : [ { - "name" : "cluster1872\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1876\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1854\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1869\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1846\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1867\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.64", - "children" : [ { - "name" : "cluster50286\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.192", - "children" : [ { - "name" : "cluster1143\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1157\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.0", - "children" : [ { - "name" : "cluster1586\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.192", - "children" : [ { - "name" : "cluster1553\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1551\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.64", - "children" : [ { - "name" : "cluster50130\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50141\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50152\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.192", - "children" : [ { - "name" : "cluster1259\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1245\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1260\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1261\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.64", - "children" : [ { - "name" : "cluster1064\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1059\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.0", - "children" : [ { - "name" : "cluster1288\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1294\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.128", - "children" : [ { - "name" : "cluster1080\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1097\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.135\\.64", - "children" : [ { - "name" : "cluster3071\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3049\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.135\\.128", - "children" : [ { - "name" : "cluster3097\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3098\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3082\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3112\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3104\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.192", - "children" : [ { - "name" : "cluster50510\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50478\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50505\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50481\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.130\\.0", - "children" : [ { - "name" : "cluster1930\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1928\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1932\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.0", - "children" : [ { - "name" : "cluster1770\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1779\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1771\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.128", - "children" : [ { - "name" : "cluster1694\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1718\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.192", - "children" : [ { - "name" : "cluster1412\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1405\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.64", - "children" : [ { - "name" : "cluster1357\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.192", - "children" : [ { - "name" : "cluster1911\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1889\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.192", - "children" : [ { - "name" : "cluster1004\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1020\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1028\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.132\\.128", - "children" : [ { - "name" : "cluster50021\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.64", - "children" : [ { - "name" : "cluster1457\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1450\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1466\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.64", - "children" : [ { - "name" : "cluster1816\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1803\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.128", - "children" : [ { - "name" : "cluster50359\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.128", - "children" : [ { - "name" : "cluster1393\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1396\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1369\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.64", - "children" : [ { - "name" : "cluster1660\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1649\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.0", - "children" : [ { - "name" : "cluster50543\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.64", - "children" : [ { - "name" : "cluster1639\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1607\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.192", - "children" : [ { - "name" : "cluster50427\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50411\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.132\\.192", - "children" : [ { - "name" : "cluster50053\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50057\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50050\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.64", - "children" : [ { - "name" : "cluster1733\\.secondleveldomain\\.com", - "children" : null - } ] - } ] -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-trace-output b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-trace-output deleted file mode 100644 index 5dafe18a795..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/truncated-trace-output +++ /dev/null @@ -1,1410 +0,0 @@ -{ - "priority" : "NORMAL", - "user" : "hadoopqa", - "jobName" : null, - "jobID" : "job_200904211745_0002", - "jobProperties" : { - "mapred.child.java.opts" : "-server -Xmx640m -Djava.net.preferIPv4Stack=true" - }, - "mapTasks" : [ { - "startTime" : 1240336753705, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.128", "cluster1396\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.128/cluster1396\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336647215, - "finishTime" : 1240336651127, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000010_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1369\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1854\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1872\\.secondleveldomain\\.com" ] - } ], - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000010", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "MAP" - }, { - "startTime" : 1240336753750, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1553\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1553\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336752204, - "finishTime" : 1240336755959, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000017_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.64", "cluster1803\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1930\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1932\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818110, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000017", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336753796, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1771\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1771\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336766565, - "finishTime" : 1240336770163, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000019_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.192", "cluster50481\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50505\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.128", "cluster50359\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818106, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000019", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336753840, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1779\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1779\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336765887, - "finishTime" : 1240336785565, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000005_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1245\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1261\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.128", "cluster50021\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818114, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000005", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336753888, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.128", "cluster1218\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.128/cluster1218\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336755247, - "finishTime" : 1240336763432, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000000_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1236\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1889\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1911\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818113, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336753938, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1586\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1586\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336758229, - "finishTime" : 1240336761612, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000009_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1227\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1259\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1260\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818116, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000009", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336753987, - "attempts" : [ { - "location" : null, - "hostName" : "cluster1670\\.secondleveldomain\\.com", - "result" : "FAILED", - "startTime" : 1240336754665, - "finishTime" : 1240336777673, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000002_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - }, { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1586\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1586\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336821839, - "finishTime" : 1240336824652, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000002_1", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1357\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1450\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1457\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336842768, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000002", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754030, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1869\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1869\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336757380, - "finishTime" : 1240336763176, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000004_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.0", "cluster1294\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.0", "cluster1288\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1876\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818118, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000004", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754077, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1770\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1770\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336763994, - "finishTime" : 1240336767750, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000007_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.64", "cluster1466\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50218\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50232\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818119, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000007", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754124, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.64", "cluster1816\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.64/cluster1816\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336758341, - "finishTime" : 1240336762025, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000013_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.134\\.64", "cluster50286\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3071\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3049\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818120, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000013", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754176, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3097\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3097\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336768390, - "finishTime" : 1240336771627, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000012_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.132\\.192", "cluster50057\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3112\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3082\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336774482, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000012", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754223, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1660\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1660\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336754801, - "finishTime" : 1240336758231, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000001_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1080\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.128", "cluster1097\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.0", "cluster50543\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336774476, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000001", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754270, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3104\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3104\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336762622, - "finishTime" : 1240336765834, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000015_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.192", "cluster1412\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50427\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50411\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336774468, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000015", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754319, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1867\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1867\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336759528, - "finishTime" : 1240336764456, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000016_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1393\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50130\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50141\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336817903, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000016", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754366, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1649\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1649\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336759909, - "finishTime" : 1240336763727, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000018_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1223\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.128", "cluster1200\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50152\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336818420, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000018", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754409, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1551\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1551\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336755433, - "finishTime" : 1240336758966, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000003_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.64", "cluster1733\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1607\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1639\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336816560, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000003", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754452, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.192", "cluster1405\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.192/cluster1405\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336747936, - "finishTime" : 1240336751464, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000006_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1064\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50510\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50478\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336817330, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000006", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754500, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.130\\.0", "cluster1928\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.130\\.0/cluster1928\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336756161, - "finishTime" : 1240336760016, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000014_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1059\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50053\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50050\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336824060, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000014", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754548, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1846\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1846\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336758220, - "finishTime" : 1240336762903, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000008_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1157\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1718\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1694\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336824556, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000008", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "startTime" : 1240336754596, - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3098\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3098\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336744261, - "finishTime" : 1240336747517, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000011_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1143\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1004\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1020\\.secondleveldomain\\.com" ] - } ], - "finishTime" : 1240336776031, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000011", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - } ], - "reduceTasks" : [ { - "startTime" : 1240336774548, - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.63\\.0/cluster1771\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336786769, - "finishTime" : 1240336870553, - "shuffleFinished" : 1240336859759, - "sortFinished" : 1240336860092, - "attemptID" : "attempt_200904211745_0002_r_000000_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 56630, - "fileBytesRead" : 71200, - "fileBytesWritten" : 71200, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 0, - "reduceInputRecords" : 106300, - "reduceShuffleBytes" : 705622, - "reduceOutputRecords" : 0, - "spilledRecords" : 106300, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "finishTime" : 1240336873648, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_r_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - } ], - "otherTasks" : [ { - "startTime" : 1240336739508, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.192", "cluster1028\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.192/cluster1028\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336739565, - "finishTime" : 1240336742217, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000021_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "finishTime" : 1240336752812, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000021", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "SETUP" - }, { - "startTime" : 1240336873651, - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1771\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1771\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336885885, - "finishTime" : 1240336887642, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000020_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "finishTime" : 1240336889658, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000020", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "CLEANUP" - } ], - "finishTime" : 1240336889659, - "computonsPerMapInputByte" : -1, - "computonsPerMapOutputByte" : -1, - "computonsPerReduceInputByte" : -1, - "computonsPerReduceOutputByte" : -1, - "submitTime" : 1240335962848, - "launchTime" : 1240335964437, - "heapMegabytes" : 640, - "totalMaps" : 20, - "totalReduces" : 1, - "outcome" : "SUCCESS", - "jobtype" : "JAVA", - "directDependantJobs" : [ ], - "successfulMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 8185, - "minimum" : 3237, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 3237 - }, { - "relativeRanking" : 0.1, - "datum" : 3237 - }, { - "relativeRanking" : 0.15, - "datum" : 3237 - }, { - "relativeRanking" : 0.2, - "datum" : 3237 - }, { - "relativeRanking" : 0.25, - "datum" : 3237 - }, { - "relativeRanking" : 0.3, - "datum" : 3237 - }, { - "relativeRanking" : 0.35, - "datum" : 3237 - }, { - "relativeRanking" : 0.4, - "datum" : 3237 - }, { - "relativeRanking" : 0.45, - "datum" : 3237 - }, { - "relativeRanking" : 0.5, - "datum" : 3912 - }, { - "relativeRanking" : 0.55, - "datum" : 3912 - }, { - "relativeRanking" : 0.6, - "datum" : 3912 - }, { - "relativeRanking" : 0.65, - "datum" : 3912 - }, { - "relativeRanking" : 0.7, - "datum" : 3912 - }, { - "relativeRanking" : 0.75, - "datum" : 5796 - }, { - "relativeRanking" : 0.8, - "datum" : 5796 - }, { - "relativeRanking" : 0.85, - "datum" : 5796 - }, { - "relativeRanking" : 0.9, - "datum" : 5796 - }, { - "relativeRanking" : 0.95, - "datum" : 5796 - } ], - "numberValues" : 4 - }, { - "maximum" : 19678, - "minimum" : 2813, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 2813 - }, { - "relativeRanking" : 0.1, - "datum" : 2813 - }, { - "relativeRanking" : 0.15, - "datum" : 3212 - }, { - "relativeRanking" : 0.2, - "datum" : 3256 - }, { - "relativeRanking" : 0.25, - "datum" : 3383 - }, { - "relativeRanking" : 0.3, - "datum" : 3383 - }, { - "relativeRanking" : 0.35, - "datum" : 3430 - }, { - "relativeRanking" : 0.4, - "datum" : 3528 - }, { - "relativeRanking" : 0.45, - "datum" : 3533 - }, { - "relativeRanking" : 0.5, - "datum" : 3598 - }, { - "relativeRanking" : 0.55, - "datum" : 3598 - }, { - "relativeRanking" : 0.6, - "datum" : 3684 - }, { - "relativeRanking" : 0.65, - "datum" : 3755 - }, { - "relativeRanking" : 0.7, - "datum" : 3756 - }, { - "relativeRanking" : 0.75, - "datum" : 3818 - }, { - "relativeRanking" : 0.8, - "datum" : 3818 - }, { - "relativeRanking" : 0.85, - "datum" : 3855 - }, { - "relativeRanking" : 0.9, - "datum" : 4683 - }, { - "relativeRanking" : 0.95, - "datum" : 4928 - } ], - "numberValues" : 16 - }, { - "maximum" : 2652, - "minimum" : 1757, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 1757 - }, { - "relativeRanking" : 0.1, - "datum" : 1757 - }, { - "relativeRanking" : 0.15, - "datum" : 1757 - }, { - "relativeRanking" : 0.2, - "datum" : 1757 - }, { - "relativeRanking" : 0.25, - "datum" : 1757 - }, { - "relativeRanking" : 0.3, - "datum" : 1757 - }, { - "relativeRanking" : 0.35, - "datum" : 1757 - }, { - "relativeRanking" : 0.4, - "datum" : 1757 - }, { - "relativeRanking" : 0.45, - "datum" : 1757 - }, { - "relativeRanking" : 0.5, - "datum" : 1757 - }, { - "relativeRanking" : 0.55, - "datum" : 1757 - }, { - "relativeRanking" : 0.6, - "datum" : 1757 - }, { - "relativeRanking" : 0.65, - "datum" : 1757 - }, { - "relativeRanking" : 0.7, - "datum" : 1757 - }, { - "relativeRanking" : 0.75, - "datum" : 1757 - }, { - "relativeRanking" : 0.8, - "datum" : 1757 - }, { - "relativeRanking" : 0.85, - "datum" : 1757 - }, { - "relativeRanking" : 0.9, - "datum" : 1757 - }, { - "relativeRanking" : 0.95, - "datum" : 1757 - } ], - "numberValues" : 2 - } ], - "failedMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 23008, - "minimum" : 23008, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 23008 - }, { - "relativeRanking" : 0.1, - "datum" : 23008 - }, { - "relativeRanking" : 0.15, - "datum" : 23008 - }, { - "relativeRanking" : 0.2, - "datum" : 23008 - }, { - "relativeRanking" : 0.25, - "datum" : 23008 - }, { - "relativeRanking" : 0.3, - "datum" : 23008 - }, { - "relativeRanking" : 0.35, - "datum" : 23008 - }, { - "relativeRanking" : 0.4, - "datum" : 23008 - }, { - "relativeRanking" : 0.45, - "datum" : 23008 - }, { - "relativeRanking" : 0.5, - "datum" : 23008 - }, { - "relativeRanking" : 0.55, - "datum" : 23008 - }, { - "relativeRanking" : 0.6, - "datum" : 23008 - }, { - "relativeRanking" : 0.65, - "datum" : 23008 - }, { - "relativeRanking" : 0.7, - "datum" : 23008 - }, { - "relativeRanking" : 0.75, - "datum" : 23008 - }, { - "relativeRanking" : 0.8, - "datum" : 23008 - }, { - "relativeRanking" : 0.85, - "datum" : 23008 - }, { - "relativeRanking" : 0.9, - "datum" : 23008 - }, { - "relativeRanking" : 0.95, - "datum" : 23008 - } ], - "numberValues" : 1 - } ], - "successfulReduceAttemptCDF" : { - "maximum" : 83784, - "minimum" : 83784, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 83784 - }, { - "relativeRanking" : 0.1, - "datum" : 83784 - }, { - "relativeRanking" : 0.15, - "datum" : 83784 - }, { - "relativeRanking" : 0.2, - "datum" : 83784 - }, { - "relativeRanking" : 0.25, - "datum" : 83784 - }, { - "relativeRanking" : 0.3, - "datum" : 83784 - }, { - "relativeRanking" : 0.35, - "datum" : 83784 - }, { - "relativeRanking" : 0.4, - "datum" : 83784 - }, { - "relativeRanking" : 0.45, - "datum" : 83784 - }, { - "relativeRanking" : 0.5, - "datum" : 83784 - }, { - "relativeRanking" : 0.55, - "datum" : 83784 - }, { - "relativeRanking" : 0.6, - "datum" : 83784 - }, { - "relativeRanking" : 0.65, - "datum" : 83784 - }, { - "relativeRanking" : 0.7, - "datum" : 83784 - }, { - "relativeRanking" : 0.75, - "datum" : 83784 - }, { - "relativeRanking" : 0.8, - "datum" : 83784 - }, { - "relativeRanking" : 0.85, - "datum" : 83784 - }, { - "relativeRanking" : 0.9, - "datum" : 83784 - }, { - "relativeRanking" : 0.95, - "datum" : 83784 - } ], - "numberValues" : 1 - }, - "failedReduceAttemptCDF" : { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, - "mapperTriesToSucceed" : [ 0.9565217391304348, 0.043478260869565216 ], - "failedMapperFraction" : 0.0, - "relativeTime" : 0, - "queue" : null, - "clusterMapMB" : -1, - "clusterReduceMB" : -1, - "jobMapMB" : -1, - "jobReduceMB" : -1 -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz deleted file mode 100644 index 6d0dbeb0bf9..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/v20-resource-usage-log.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz b/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz deleted file mode 100644 index 96c66aaaefd..00000000000 Binary files a/hadoop-mapreduce-project/src/test/tools/data/rumen/small-trace-test/v20-single-input-log.gz and /dev/null differ diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/zombie/input-topology.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/zombie/input-topology.json deleted file mode 100644 index af9c537db7d..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/zombie/input-topology.json +++ /dev/null @@ -1,1693 +0,0 @@ -{ - "name" : "", - "children" : [ { - "name" : "194\\.6\\.133\\.192", - "children" : [ { - "name" : "cluster50213\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50226\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50228\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50217\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50214\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50231\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50232\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50204\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50206\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50203\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50205\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50210\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50208\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50218\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50225\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.128", - "children" : [ { - "name" : "cluster1209\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1205\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1235\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1239\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1200\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1227\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1212\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1223\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1217\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1207\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1228\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1218\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1221\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1215\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1226\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1236\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.64", - "children" : [ { - "name" : "cluster50317\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50292\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50291\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50294\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50285\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50300\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50281\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50311\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50297\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50319\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50286\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50307\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50296\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50315\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50316\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.128", - "children" : [ { - "name" : "cluster1859\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1877\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1871\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1876\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1854\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1841\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1858\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1843\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1857\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1842\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1872\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1869\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1853\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1846\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1867\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.130\\.64", - "children" : [ { - "name" : "cluster1976\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1969\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1961\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1963\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1968\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1979\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1967\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1989\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1970\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1999\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.192", - "children" : [ { - "name" : "cluster1150\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1127\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1139\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1154\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1138\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1137\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1130\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1151\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1131\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1141\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1124\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1158\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1140\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1144\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1136\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1157\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1143\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.0", - "children" : [ { - "name" : "cluster1592\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1567\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1594\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1586\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1561\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1585\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1562\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1581\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1566\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1598\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1568\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1560\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1574\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1573\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1583\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1579\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.192", - "children" : [ { - "name" : "cluster50364\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50372\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50365\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50377\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50368\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50396\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50375\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50389\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50382\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.192", - "children" : [ { - "name" : "cluster1533\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1531\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1557\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1555\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1534\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1553\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1550\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1540\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1538\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1520\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1559\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1535\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1525\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1529\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1551\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.64", - "children" : [ { - "name" : "cluster50124\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50159\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50144\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50145\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50133\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50120\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50130\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50142\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50147\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50156\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50125\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50141\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50152\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.192", - "children" : [ { - "name" : "cluster1250\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1276\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1248\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1246\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1251\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1259\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1261\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1260\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1243\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1256\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1272\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1274\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1245\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1249\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.64", - "children" : [ { - "name" : "cluster1041\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1075\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1042\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1078\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1072\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1053\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1056\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1064\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1055\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1070\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1061\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1059\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1040\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.128", - "children" : [ { - "name" : "cluster50171\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50195\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50161\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50191\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50174\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50185\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50177\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50166\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50170\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50179\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.0", - "children" : [ { - "name" : "cluster1283\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1299\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1281\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1288\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1302\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1294\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1289\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1315\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1305\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1316\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.116\\.128", - "children" : [ { - "name" : "cluster1107\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1118\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1080\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1093\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1102\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1104\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1097\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1087\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1095\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1110\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.135\\.64", - "children" : [ { - "name" : "cluster3071\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3079\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3068\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3057\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3058\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3070\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3054\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3077\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3049\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3063\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3075\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3065\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3076\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3061\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3073\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3055\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.128", - "children" : [ { - "name" : "cluster50468\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50476\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50440\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50473\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50477\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50460\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50475\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50459\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50447\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50464\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50441\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50444\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.135\\.128", - "children" : [ { - "name" : "cluster3097\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3089\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3111\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3093\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3099\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3106\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3108\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3112\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3085\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3094\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3103\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3098\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3082\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3104\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster3114\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.192", - "children" : [ { - "name" : "cluster50485\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50493\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50510\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50494\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50484\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50481\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50490\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50501\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50478\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50491\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50505\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50488\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50509\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50513\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.133\\.0", - "children" : [ { - "name" : "cluster50085\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50117\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50113\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50101\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50108\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50090\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.0", - "children" : [ { - "name" : "cluster1789\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1777\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1785\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1770\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1793\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1779\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1788\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1776\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1773\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1798\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1762\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1772\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1778\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1782\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1774\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1781\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1760\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1796\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1775\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1768\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1786\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1771\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.130\\.0", - "children" : [ { - "name" : "cluster1959\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1957\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1931\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1920\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1938\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1925\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1932\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1927\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1933\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1930\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1928\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1924\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1953\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1936\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.128", - "children" : [ { - "name" : "cluster1717\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1708\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1707\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1690\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1714\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1683\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1703\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1702\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1694\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1700\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1711\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1713\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1718\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.192", - "children" : [ { - "name" : "cluster1418\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1429\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1420\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1412\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1400\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1415\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1437\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1405\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1427\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.64", - "children" : [ { - "name" : "cluster1334\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1332\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1346\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1350\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1328\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1333\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1321\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1358\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1357\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1356\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.192", - "children" : [ { - "name" : "cluster1914\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1883\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1896\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1911\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1913\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1915\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1903\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1906\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1900\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1891\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1889\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1907\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.192", - "children" : [ { - "name" : "cluster1006\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1035\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1018\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1026\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1020\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1021\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1027\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1031\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1036\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1032\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1029\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1004\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1011\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1008\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1025\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1002\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1030\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1019\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1017\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1028\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.64", - "children" : [ { - "name" : "cluster1445\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1470\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1449\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1462\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1450\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1454\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1466\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1465\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1474\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1444\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1448\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1463\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1457\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1447\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1455\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1442\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1479\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1467\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1446\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.132\\.128", - "children" : [ { - "name" : "cluster50034\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50011\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50023\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50025\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50021\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.64", - "children" : [ { - "name" : "cluster1800\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1809\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1816\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1819\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1813\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1806\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1803\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1835\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1822\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1807\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1823\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1832\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.128", - "children" : [ { - "name" : "cluster50359\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50326\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50348\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50325\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50342\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50352\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.126\\.128", - "children" : [ { - "name" : "cluster1383\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1378\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1393\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1395\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1396\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1373\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1388\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1379\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1370\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1368\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1371\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1377\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1369\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.64", - "children" : [ { - "name" : "cluster1643\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1660\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1652\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1672\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1654\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1648\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1657\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1655\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1641\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1669\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1662\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1649\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1666\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1678\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1650\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1679\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.127\\.128", - "children" : [ { - "name" : "cluster1482\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1517\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1491\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1498\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1490\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1504\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1515\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1480\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1518\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1493\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1503\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.129\\.0", - "children" : [ { - "name" : "cluster50520\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50539\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50530\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50526\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50543\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.62\\.192", - "children" : [ { - "name" : "cluster50407\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50409\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50423\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50427\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50429\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50416\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50420\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50418\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50411\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50425\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.134\\.0", - "children" : [ { - "name" : "cluster50275\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50254\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50272\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50274\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50245\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50276\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50243\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50252\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50263\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50279\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50273\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50261\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50260\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.128\\.64", - "children" : [ { - "name" : "cluster1639\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1615\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1628\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1635\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1611\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1607\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1629\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1623\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1633\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1610\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1632\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1614\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1636\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1600\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1626\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1602\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "194\\.6\\.132\\.192", - "children" : [ { - "name" : "cluster50047\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50055\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50051\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50059\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50050\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50076\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50077\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50046\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50053\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50057\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50072\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50044\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50043\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster50058\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.117\\.64", - "children" : [ { - "name" : "cluster1193\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1175\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1185\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1171\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1174\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1167\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1180\\.secondleveldomain\\.com", - "children" : null - } ] - }, { - "name" : "192\\.30\\.63\\.64", - "children" : [ { - "name" : "cluster1755\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1757\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1725\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1727\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1736\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1722\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1752\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1759\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1758\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1732\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1743\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1731\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1733\\.secondleveldomain\\.com", - "children" : null - }, { - "name" : "cluster1751\\.secondleveldomain\\.com", - "children" : null - } ] - } ] -} diff --git a/hadoop-mapreduce-project/src/test/tools/data/rumen/zombie/input-trace.json b/hadoop-mapreduce-project/src/test/tools/data/rumen/zombie/input-trace.json deleted file mode 100644 index 4d820b480b7..00000000000 --- a/hadoop-mapreduce-project/src/test/tools/data/rumen/zombie/input-trace.json +++ /dev/null @@ -1,11364 +0,0 @@ -{ - "priority" : "NORMAL", - "jobID" : "job_200904211745_0001", - "mapTasks" : [ ], - "reduceTasks" : [ ], - "otherTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.192", "cluster1020\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.192/cluster1020\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336740671, - "finishTime" : 1240336743094, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0001_m_000001_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336739206, - "finishTime" : 1240336760537, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0001_m_000001", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "CLEANUP" - } ], - "finishTime" : 1240336760540, - "user" : "geek1", - "jobName" : null, - "computonsPerMapInputByte" : -1, - "computonsPerMapOutputByte" : -1, - "computonsPerReduceInputByte" : -1, - "computonsPerReduceOutputByte" : -1, - "submitTime" : 1240335960685, - "launchTime" : 1240335961050, - "heapMegabytes" : 1024, - "totalMaps" : 1, - "totalReduces" : 1, - "outcome" : "KILLED", - "jobtype" : "JAVA", - "directDependantJobs" : [ ], - "successfulMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 2423, - "minimum" : 2423, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 2423 - }, { - "relativeRanking" : 0.1, - "datum" : 2423 - }, { - "relativeRanking" : 0.15, - "datum" : 2423 - }, { - "relativeRanking" : 0.2, - "datum" : 2423 - }, { - "relativeRanking" : 0.25, - "datum" : 2423 - }, { - "relativeRanking" : 0.3, - "datum" : 2423 - }, { - "relativeRanking" : 0.35, - "datum" : 2423 - }, { - "relativeRanking" : 0.4, - "datum" : 2423 - }, { - "relativeRanking" : 0.45, - "datum" : 2423 - }, { - "relativeRanking" : 0.5, - "datum" : 2423 - }, { - "relativeRanking" : 0.55, - "datum" : 2423 - }, { - "relativeRanking" : 0.6, - "datum" : 2423 - }, { - "relativeRanking" : 0.65, - "datum" : 2423 - }, { - "relativeRanking" : 0.7, - "datum" : 2423 - }, { - "relativeRanking" : 0.75, - "datum" : 2423 - }, { - "relativeRanking" : 0.8, - "datum" : 2423 - }, { - "relativeRanking" : 0.85, - "datum" : 2423 - }, { - "relativeRanking" : 0.9, - "datum" : 2423 - }, { - "relativeRanking" : 0.95, - "datum" : 2423 - } ], - "numberValues" : 1 - } ], - "failedMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - } ], - "successfulReduceAttemptCDF" : { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, - "failedReduceAttemptCDF" : { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, - "mapperTriesToSucceed" : [ 1.0 ], - "failedMapperFraction" : 0.0, - "relativeTime" : 0, - "queue" : null, - "clusterMapMB" : -1, - "clusterReduceMB" : -1, - "jobMapMB" : -1, - "jobReduceMB" : -1 -} - { - "priority" : "NORMAL", - "jobID" : "job_200904211745_0002", - "mapTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.128", "cluster1396\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.128/cluster1396\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336647215, - "finishTime" : 1240336651127, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000010_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1369\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1854\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1872\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753705, - "finishTime" : 1240336818108, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000010", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1553\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1553\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336752204, - "finishTime" : 1240336755959, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000017_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.64", "cluster1803\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1930\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1932\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753750, - "finishTime" : 1240336818110, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000017", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1771\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1771\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336766565, - "finishTime" : 1240336770163, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000019_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.192", "cluster50481\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50505\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.128", "cluster50359\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753796, - "finishTime" : 1240336818106, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000019", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1779\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1779\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336765887, - "finishTime" : 1240336785565, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000005_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1245\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1261\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.128", "cluster50021\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753840, - "finishTime" : 1240336818114, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000005", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.128", "cluster1218\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.128/cluster1218\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336755247, - "finishTime" : 1240336763432, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000000_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1236\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1889\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1911\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753888, - "finishTime" : 1240336818113, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1586\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1586\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336758229, - "finishTime" : 1240336761612, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000009_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1227\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1259\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1260\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753938, - "finishTime" : 1240336818116, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000009", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "cluster1670\\.secondleveldomain\\.com", - "result" : "FAILED", - "startTime" : 1240336754665, - "finishTime" : 1240336777673, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000002_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - }, { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1586\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1586\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336821839, - "finishTime" : 1240336824652, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000002_1", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1357\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1450\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1457\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336753987, - "finishTime" : 1240336842768, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000002", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1869\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1869\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336757380, - "finishTime" : 1240336763176, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000004_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.0", "cluster1294\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.0", "cluster1288\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1876\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754030, - "finishTime" : 1240336818118, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000004", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1770\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1770\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336763994, - "finishTime" : 1240336767750, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000007_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.64", "cluster1466\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50218\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50232\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754077, - "finishTime" : 1240336818119, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000007", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.64", "cluster1816\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.64/cluster1816\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336758341, - "finishTime" : 1240336762025, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000013_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.134\\.64", "cluster50286\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3071\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3049\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754124, - "finishTime" : 1240336818120, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000013", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3097\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3097\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336768390, - "finishTime" : 1240336771627, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000012_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.132\\.192", "cluster50057\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3112\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3082\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754176, - "finishTime" : 1240336774482, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000012", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1660\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1660\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336754801, - "finishTime" : 1240336758231, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000001_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1080\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.128", "cluster1097\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.0", "cluster50543\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754223, - "finishTime" : 1240336774476, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000001", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3104\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3104\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336762622, - "finishTime" : 1240336765834, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000015_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.192", "cluster1412\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50427\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50411\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754270, - "finishTime" : 1240336774468, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000015", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1867\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1867\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336759528, - "finishTime" : 1240336764456, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000016_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1393\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50130\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50141\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754319, - "finishTime" : 1240336817903, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000016", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1649\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1649\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336759909, - "finishTime" : 1240336763727, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000018_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1223\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.128", "cluster1200\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50152\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754366, - "finishTime" : 1240336818420, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000018", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1551\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1551\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336755433, - "finishTime" : 1240336758966, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000003_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.64", "cluster1733\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1607\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1639\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754409, - "finishTime" : 1240336816560, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000003", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.192", "cluster1405\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.192/cluster1405\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336747936, - "finishTime" : 1240336751464, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000006_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1064\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50510\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50478\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754452, - "finishTime" : 1240336817330, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000006", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.130\\.0", "cluster1928\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.130\\.0/cluster1928\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336756161, - "finishTime" : 1240336760016, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000014_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1059\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50053\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50050\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754500, - "finishTime" : 1240336824060, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000014", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1846\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1846\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336758220, - "finishTime" : 1240336762903, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000008_0", - "hdfsBytesRead" : 148286, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1157\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1718\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1694\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754548, - "finishTime" : 1240336824556, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000008", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3098\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3098\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336744261, - "finishTime" : 1240336747517, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000011_0", - "hdfsBytesRead" : 53639, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 37170, - "mapInputRecords" : 3601, - "mapOutputBytes" : 247925, - "mapOutputRecords" : 26425, - "combineInputRecords" : 26425, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5315, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1143\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1004\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1020\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336754596, - "finishTime" : 1240336776031, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000011", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - } ], - "reduceTasks" : [ { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.63\\.0/cluster1771\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336786769, - "finishTime" : 1240336870553, - "shuffleFinished" : 1240336859759, - "sortFinished" : 1240336860092, - "attemptID" : "attempt_200904211745_0002_r_000000_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 56630, - "fileBytesRead" : 71200, - "fileBytesWritten" : 71200, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 0, - "reduceInputRecords" : 106300, - "reduceShuffleBytes" : 705622, - "reduceOutputRecords" : 0, - "spilledRecords" : 106300, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336774548, - "finishTime" : 1240336873648, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_r_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - } ], - "otherTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.192", "cluster1028\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.192/cluster1028\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336739565, - "finishTime" : 1240336742217, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000021_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336739508, - "finishTime" : 1240336752812, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000021", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "SETUP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1771\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1771\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336885885, - "finishTime" : 1240336887642, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0002_m_000020_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336873651, - "finishTime" : 1240336889658, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0002_m_000020", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "CLEANUP" - } ], - "finishTime" : 1240336889659, - "user" : "hadoopqa", - "jobName" : null, - "computonsPerMapInputByte" : -1, - "computonsPerMapOutputByte" : -1, - "computonsPerReduceInputByte" : -1, - "computonsPerReduceOutputByte" : -1, - "submitTime" : 1240335962848, - "launchTime" : 1240335964437, - "heapMegabytes" : 640, - "totalMaps" : 20, - "totalReduces" : 1, - "outcome" : "SUCCESS", - "jobtype" : "JAVA", - "directDependantJobs" : [ ], - "successfulMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 8185, - "minimum" : 3237, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 3237 - }, { - "relativeRanking" : 0.1, - "datum" : 3237 - }, { - "relativeRanking" : 0.15, - "datum" : 3237 - }, { - "relativeRanking" : 0.2, - "datum" : 3237 - }, { - "relativeRanking" : 0.25, - "datum" : 3237 - }, { - "relativeRanking" : 0.3, - "datum" : 3237 - }, { - "relativeRanking" : 0.35, - "datum" : 3237 - }, { - "relativeRanking" : 0.4, - "datum" : 3237 - }, { - "relativeRanking" : 0.45, - "datum" : 3237 - }, { - "relativeRanking" : 0.5, - "datum" : 3912 - }, { - "relativeRanking" : 0.55, - "datum" : 3912 - }, { - "relativeRanking" : 0.6, - "datum" : 3912 - }, { - "relativeRanking" : 0.65, - "datum" : 3912 - }, { - "relativeRanking" : 0.7, - "datum" : 3912 - }, { - "relativeRanking" : 0.75, - "datum" : 5796 - }, { - "relativeRanking" : 0.8, - "datum" : 5796 - }, { - "relativeRanking" : 0.85, - "datum" : 5796 - }, { - "relativeRanking" : 0.9, - "datum" : 5796 - }, { - "relativeRanking" : 0.95, - "datum" : 5796 - } ], - "numberValues" : 4 - }, { - "maximum" : 19678, - "minimum" : 2813, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 2813 - }, { - "relativeRanking" : 0.1, - "datum" : 2813 - }, { - "relativeRanking" : 0.15, - "datum" : 3212 - }, { - "relativeRanking" : 0.2, - "datum" : 3256 - }, { - "relativeRanking" : 0.25, - "datum" : 3383 - }, { - "relativeRanking" : 0.3, - "datum" : 3383 - }, { - "relativeRanking" : 0.35, - "datum" : 3430 - }, { - "relativeRanking" : 0.4, - "datum" : 3528 - }, { - "relativeRanking" : 0.45, - "datum" : 3533 - }, { - "relativeRanking" : 0.5, - "datum" : 3598 - }, { - "relativeRanking" : 0.55, - "datum" : 3598 - }, { - "relativeRanking" : 0.6, - "datum" : 3684 - }, { - "relativeRanking" : 0.65, - "datum" : 3755 - }, { - "relativeRanking" : 0.7, - "datum" : 3756 - }, { - "relativeRanking" : 0.75, - "datum" : 3818 - }, { - "relativeRanking" : 0.8, - "datum" : 3818 - }, { - "relativeRanking" : 0.85, - "datum" : 3855 - }, { - "relativeRanking" : 0.9, - "datum" : 4683 - }, { - "relativeRanking" : 0.95, - "datum" : 4928 - } ], - "numberValues" : 16 - }, { - "maximum" : 2652, - "minimum" : 1757, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 1757 - }, { - "relativeRanking" : 0.1, - "datum" : 1757 - }, { - "relativeRanking" : 0.15, - "datum" : 1757 - }, { - "relativeRanking" : 0.2, - "datum" : 1757 - }, { - "relativeRanking" : 0.25, - "datum" : 1757 - }, { - "relativeRanking" : 0.3, - "datum" : 1757 - }, { - "relativeRanking" : 0.35, - "datum" : 1757 - }, { - "relativeRanking" : 0.4, - "datum" : 1757 - }, { - "relativeRanking" : 0.45, - "datum" : 1757 - }, { - "relativeRanking" : 0.5, - "datum" : 1757 - }, { - "relativeRanking" : 0.55, - "datum" : 1757 - }, { - "relativeRanking" : 0.6, - "datum" : 1757 - }, { - "relativeRanking" : 0.65, - "datum" : 1757 - }, { - "relativeRanking" : 0.7, - "datum" : 1757 - }, { - "relativeRanking" : 0.75, - "datum" : 1757 - }, { - "relativeRanking" : 0.8, - "datum" : 1757 - }, { - "relativeRanking" : 0.85, - "datum" : 1757 - }, { - "relativeRanking" : 0.9, - "datum" : 1757 - }, { - "relativeRanking" : 0.95, - "datum" : 1757 - } ], - "numberValues" : 2 - } ], - "failedMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 23008, - "minimum" : 23008, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 23008 - }, { - "relativeRanking" : 0.1, - "datum" : 23008 - }, { - "relativeRanking" : 0.15, - "datum" : 23008 - }, { - "relativeRanking" : 0.2, - "datum" : 23008 - }, { - "relativeRanking" : 0.25, - "datum" : 23008 - }, { - "relativeRanking" : 0.3, - "datum" : 23008 - }, { - "relativeRanking" : 0.35, - "datum" : 23008 - }, { - "relativeRanking" : 0.4, - "datum" : 23008 - }, { - "relativeRanking" : 0.45, - "datum" : 23008 - }, { - "relativeRanking" : 0.5, - "datum" : 23008 - }, { - "relativeRanking" : 0.55, - "datum" : 23008 - }, { - "relativeRanking" : 0.6, - "datum" : 23008 - }, { - "relativeRanking" : 0.65, - "datum" : 23008 - }, { - "relativeRanking" : 0.7, - "datum" : 23008 - }, { - "relativeRanking" : 0.75, - "datum" : 23008 - }, { - "relativeRanking" : 0.8, - "datum" : 23008 - }, { - "relativeRanking" : 0.85, - "datum" : 23008 - }, { - "relativeRanking" : 0.9, - "datum" : 23008 - }, { - "relativeRanking" : 0.95, - "datum" : 23008 - } ], - "numberValues" : 1 - } ], - "successfulReduceAttemptCDF" : { - "maximum" : 83784, - "minimum" : 83784, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 83784 - }, { - "relativeRanking" : 0.1, - "datum" : 83784 - }, { - "relativeRanking" : 0.15, - "datum" : 83784 - }, { - "relativeRanking" : 0.2, - "datum" : 83784 - }, { - "relativeRanking" : 0.25, - "datum" : 83784 - }, { - "relativeRanking" : 0.3, - "datum" : 83784 - }, { - "relativeRanking" : 0.35, - "datum" : 83784 - }, { - "relativeRanking" : 0.4, - "datum" : 83784 - }, { - "relativeRanking" : 0.45, - "datum" : 83784 - }, { - "relativeRanking" : 0.5, - "datum" : 83784 - }, { - "relativeRanking" : 0.55, - "datum" : 83784 - }, { - "relativeRanking" : 0.6, - "datum" : 83784 - }, { - "relativeRanking" : 0.65, - "datum" : 83784 - }, { - "relativeRanking" : 0.7, - "datum" : 83784 - }, { - "relativeRanking" : 0.75, - "datum" : 83784 - }, { - "relativeRanking" : 0.8, - "datum" : 83784 - }, { - "relativeRanking" : 0.85, - "datum" : 83784 - }, { - "relativeRanking" : 0.9, - "datum" : 83784 - }, { - "relativeRanking" : 0.95, - "datum" : 83784 - } ], - "numberValues" : 1 - }, - "failedReduceAttemptCDF" : { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, - "mapperTriesToSucceed" : [ 0.9565217391304348, 0.043478260869565216 ], - "failedMapperFraction" : 0.0, - "relativeTime" : 0, - "queue" : null, - "clusterMapMB" : -1, - "clusterReduceMB" : -1, - "jobMapMB" : -1, - "jobReduceMB" : -1 -} - { - "priority" : "NORMAL", - "jobID" : "job_200904211745_0003", - "mapTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.192", "cluster50494\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.192/cluster50494\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336870159, - "finishTime" : 1240336876906, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0003_m_000000_0", - "hdfsBytesRead" : 40, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 120, - "mapInputRecords" : 6, - "mapOutputBytes" : 64, - "mapOutputRecords" : 6, - "combineInputRecords" : 6, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 6, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.64", "cluster1823\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.64", "cluster1800\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1979\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336862787, - "finishTime" : 1240336878867, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0003_m_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.192", "cluster50396\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.192/cluster50396\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336863528, - "finishTime" : 1240336878419, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0003_m_000002_0", - "hdfsBytesRead" : 29, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 100, - "mapInputRecords" : 4, - "mapOutputBytes" : 45, - "mapOutputRecords" : 4, - "combineInputRecords" : 4, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.64", "cluster1457\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1463\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50144\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336862891, - "finishTime" : 1240336878973, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0003_m_000002", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.192", "cluster50416\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.192/cluster50416\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336863725, - "finishTime" : 1240336868610, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0003_m_000001_0", - "hdfsBytesRead" : 36, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 109, - "mapInputRecords" : 5, - "mapOutputBytes" : 56, - "mapOutputRecords" : 5, - "combineInputRecords" : 5, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1346\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.64", "cluster1333\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50476\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336862992, - "finishTime" : 1240336879103, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0003_m_000001", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - } ], - "reduceTasks" : [ { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.133\\.192/cluster50225\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879990, - "finishTime" : 1240336912326, - "shuffleFinished" : 1240336906008, - "sortFinished" : 1240336906089, - "attemptID" : "attempt_200904211745_0003_r_000000_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 61, - "fileBytesRead" : 112, - "fileBytesWritten" : 112, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 0, - "reduceInputRecords" : 15, - "reduceShuffleBytes" : 233, - "reduceOutputRecords" : 0, - "spilledRecords" : 15, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336879303, - "finishTime" : 1240336928223, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0003_r_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - } ], - "otherTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.0", "cluster50261\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.0/cluster50261\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336846902, - "finishTime" : 1240336858206, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0003_m_000004_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336846190, - "finishTime" : 1240336862349, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0003_m_000004", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "SETUP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.192", "cluster50225\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.192/cluster50225\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336928353, - "finishTime" : 1240336930147, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0003_m_000003_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336928224, - "finishTime" : 1240336944237, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0003_m_000003", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "CLEANUP" - } ], - "finishTime" : 1240336944238, - "user" : "geek2", - "jobName" : null, - "computonsPerMapInputByte" : -1, - "computonsPerMapOutputByte" : -1, - "computonsPerReduceInputByte" : -1, - "computonsPerReduceOutputByte" : -1, - "submitTime" : 1240336843916, - "launchTime" : 1240336844448, - "heapMegabytes" : 640, - "totalMaps" : 3, - "totalReduces" : 1, - "outcome" : "SUCCESS", - "jobtype" : "JAVA", - "directDependantJobs" : [ ], - "successfulMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 14891, - "minimum" : 4885, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 4885 - }, { - "relativeRanking" : 0.1, - "datum" : 4885 - }, { - "relativeRanking" : 0.15, - "datum" : 4885 - }, { - "relativeRanking" : 0.2, - "datum" : 4885 - }, { - "relativeRanking" : 0.25, - "datum" : 4885 - }, { - "relativeRanking" : 0.3, - "datum" : 4885 - }, { - "relativeRanking" : 0.35, - "datum" : 4885 - }, { - "relativeRanking" : 0.4, - "datum" : 4885 - }, { - "relativeRanking" : 0.45, - "datum" : 4885 - }, { - "relativeRanking" : 0.5, - "datum" : 4885 - }, { - "relativeRanking" : 0.55, - "datum" : 4885 - }, { - "relativeRanking" : 0.6, - "datum" : 4885 - }, { - "relativeRanking" : 0.65, - "datum" : 4885 - }, { - "relativeRanking" : 0.7, - "datum" : 6747 - }, { - "relativeRanking" : 0.75, - "datum" : 6747 - }, { - "relativeRanking" : 0.8, - "datum" : 6747 - }, { - "relativeRanking" : 0.85, - "datum" : 6747 - }, { - "relativeRanking" : 0.9, - "datum" : 6747 - }, { - "relativeRanking" : 0.95, - "datum" : 6747 - } ], - "numberValues" : 3 - }, { - "maximum" : 11304, - "minimum" : 1794, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 1794 - }, { - "relativeRanking" : 0.1, - "datum" : 1794 - }, { - "relativeRanking" : 0.15, - "datum" : 1794 - }, { - "relativeRanking" : 0.2, - "datum" : 1794 - }, { - "relativeRanking" : 0.25, - "datum" : 1794 - }, { - "relativeRanking" : 0.3, - "datum" : 1794 - }, { - "relativeRanking" : 0.35, - "datum" : 1794 - }, { - "relativeRanking" : 0.4, - "datum" : 1794 - }, { - "relativeRanking" : 0.45, - "datum" : 1794 - }, { - "relativeRanking" : 0.5, - "datum" : 1794 - }, { - "relativeRanking" : 0.55, - "datum" : 1794 - }, { - "relativeRanking" : 0.6, - "datum" : 1794 - }, { - "relativeRanking" : 0.65, - "datum" : 1794 - }, { - "relativeRanking" : 0.7, - "datum" : 1794 - }, { - "relativeRanking" : 0.75, - "datum" : 1794 - }, { - "relativeRanking" : 0.8, - "datum" : 1794 - }, { - "relativeRanking" : 0.85, - "datum" : 1794 - }, { - "relativeRanking" : 0.9, - "datum" : 1794 - }, { - "relativeRanking" : 0.95, - "datum" : 1794 - } ], - "numberValues" : 2 - } ], - "failedMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - } ], - "successfulReduceAttemptCDF" : { - "maximum" : 32336, - "minimum" : 32336, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 32336 - }, { - "relativeRanking" : 0.1, - "datum" : 32336 - }, { - "relativeRanking" : 0.15, - "datum" : 32336 - }, { - "relativeRanking" : 0.2, - "datum" : 32336 - }, { - "relativeRanking" : 0.25, - "datum" : 32336 - }, { - "relativeRanking" : 0.3, - "datum" : 32336 - }, { - "relativeRanking" : 0.35, - "datum" : 32336 - }, { - "relativeRanking" : 0.4, - "datum" : 32336 - }, { - "relativeRanking" : 0.45, - "datum" : 32336 - }, { - "relativeRanking" : 0.5, - "datum" : 32336 - }, { - "relativeRanking" : 0.55, - "datum" : 32336 - }, { - "relativeRanking" : 0.6, - "datum" : 32336 - }, { - "relativeRanking" : 0.65, - "datum" : 32336 - }, { - "relativeRanking" : 0.7, - "datum" : 32336 - }, { - "relativeRanking" : 0.75, - "datum" : 32336 - }, { - "relativeRanking" : 0.8, - "datum" : 32336 - }, { - "relativeRanking" : 0.85, - "datum" : 32336 - }, { - "relativeRanking" : 0.9, - "datum" : 32336 - }, { - "relativeRanking" : 0.95, - "datum" : 32336 - } ], - "numberValues" : 1 - }, - "failedReduceAttemptCDF" : { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, - "mapperTriesToSucceed" : [ 1.0 ], - "failedMapperFraction" : 0.0, - "relativeTime" : 0, - "queue" : null, - "clusterMapMB" : -1, - "clusterReduceMB" : -1, - "jobMapMB" : -1, - "jobReduceMB" : -1 -} - { - "priority" : "NORMAL", - "jobID" : "job_200904211745_0004", - "mapTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.128", "cluster1702\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.128/cluster1702\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873318, - "finishTime" : 1240336916053, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000024_0", - "hdfsBytesRead" : 64208964, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2476698, - "mapInputRecords" : 4024803, - "mapOutputBytes" : 4143839, - "mapOutputRecords" : 64187, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 64187, - "mapInputBytes" : 249623354 - }, { - "location" : null, - "hostName" : "cluster1687\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915251, - "finishTime" : 1240336948507, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000024_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.128", "cluster1683\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1025\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1030\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872259, - "finishTime" : 1240336920373, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000024", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.64", "cluster1185\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.64/cluster1185\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875106, - "finishTime" : 1240336929081, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000007_0", - "hdfsBytesRead" : 67416667, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2601457, - "mapInputRecords" : 4254279, - "mapOutputBytes" : 4350539, - "mapOutputRecords" : 67759, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 67759, - "mapInputBytes" : 263667529 - }, { - "location" : null, - "hostName" : "cluster1195\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336918908, - "finishTime" : 1240336988786, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000007_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.64", "cluster1174\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.64", "cluster1171\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1600\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872260, - "finishTime" : 1240336936418, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000007", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.128", "cluster1209\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.128/cluster1209\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874012, - "finishTime" : 1240336912483, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000037_0", - "hdfsBytesRead" : 62083938, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2407514, - "mapInputRecords" : 3862317, - "mapOutputBytes" : 4057593, - "mapOutputRecords" : 62680, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 62680, - "mapInputBytes" : 241895332 - }, { - "location" : null, - "hostName" : "cluster1227\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915459, - "finishTime" : 1240336949436, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000037_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1107\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.128", "cluster1215\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.128", "cluster1226\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872265, - "finishTime" : 1240336920359, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000037", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.128", "cluster1110\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.128/cluster1110\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871992, - "finishTime" : 1240336912072, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000015_0", - "hdfsBytesRead" : 65491754, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2477099, - "mapInputRecords" : 4163656, - "mapOutputBytes" : 4140319, - "mapOutputRecords" : 64248, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 64248, - "mapInputBytes" : 256585350 - }, { - "location" : null, - "hostName" : "cluster1825\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336921125, - "finishTime" : 1240336954463, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000015_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1104\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1465\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1462\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872268, - "finishTime" : 1240336920434, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000015", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.64", "cluster1755\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.64/cluster1755\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876311, - "finishTime" : 1240336942586, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000041_0", - "hdfsBytesRead" : 33589630, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 35182817, - "mapInputRecords" : 1398039, - "mapOutputBytes" : 74392390, - "mapOutputRecords" : 1217208, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1217208, - "mapInputBytes" : 103351186 - }, { - "location" : null, - "hostName" : "cluster1441\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916237, - "finishTime" : 1240336981644, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000041_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1346\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.64", "cluster1332\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1759\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872270, - "finishTime" : 1240336952476, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000041", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1534\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1534\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873710, - "finishTime" : 1240336886646, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000115_0", - "hdfsBytesRead" : 7802358, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 61768, - "mapInputRecords" : 338528, - "mapOutputBytes" : 117149, - "mapOutputRecords" : 1831, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1831, - "mapInputBytes" : 25534769 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1070\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.64", "cluster1041\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.192", "cluster1534\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872271, - "finishTime" : 1240336888399, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000115", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1657\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1657\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872974, - "finishTime" : 1240336912238, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000022_0", - "hdfsBytesRead" : 64534282, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2454984, - "mapInputRecords" : 4070141, - "mapOutputBytes" : 4111780, - "mapOutputRecords" : 64137, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 64137, - "mapInputBytes" : 257286422 - }, { - "location" : null, - "hostName" : "cluster1973\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336917218, - "finishTime" : 1240336950542, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000022_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1657\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1782\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1762\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872273, - "finishTime" : 1240336920438, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000022", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.64", "cluster1835\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.64/cluster1835\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874481, - "finishTime" : 1240336949882, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000045_0", - "hdfsBytesRead" : 30958862, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2445, - "mapInputRecords" : 2212648, - "mapOutputBytes" : 508, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 134455678 - }, { - "location" : null, - "hostName" : "cluster1804\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336917613, - "finishTime" : 1240336951416, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000045_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.64", "cluster1819\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.64", "cluster1809\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50316\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872275, - "finishTime" : 1240336952483, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000045", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.0", "cluster50273\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.0/cluster50273\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872929, - "finishTime" : 1240336917687, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000000_0", - "hdfsBytesRead" : 70551688, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2705106, - "mapInputRecords" : 4456455, - "mapOutputBytes" : 4576941, - "mapOutputRecords" : 71582, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 71582, - "mapInputBytes" : 275178411 - }, { - "location" : null, - "hostName" : "cluster50279\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915037, - "finishTime" : 1240336948454, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000000_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.128", "cluster50191\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50252\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50263\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872276, - "finishTime" : 1240336920429, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "cluster1484\\.secondleveldomain\\.com", - "result" : "FAILED", - "startTime" : 1240336873052, - "finishTime" : 1240336891468, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000026_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - }, { - "location" : { - "layers" : [ "194\\.6\\.133\\.64", "cluster50159\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.64/cluster50159\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336905498, - "finishTime" : 1240336942729, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000026_1", - "hdfsBytesRead" : 63978817, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2435223, - "mapInputRecords" : 4013967, - "mapOutputBytes" : 4114611, - "mapOutputRecords" : 63929, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63929, - "mapInputBytes" : 248841916 - }, { - "location" : null, - "hostName" : "cluster50025\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916380, - "finishTime" : 1240336958665, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000026_2", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1504\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1035\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1008\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872278, - "finishTime" : 1240336952813, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000026", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.64", "cluster50125\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.64/cluster50125\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872976, - "finishTime" : 1240336915820, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000012_0", - "hdfsBytesRead" : 65683422, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2514927, - "mapInputRecords" : 4135021, - "mapOutputBytes" : 4224631, - "mapOutputRecords" : 66548, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 66548, - "mapInputBytes" : 255713201 - }, { - "location" : null, - "hostName" : "cluster1283\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914458, - "finishTime" : 1240336947756, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000012_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.0", "cluster1299\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.0", "cluster1315\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50147\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872299, - "finishTime" : 1240336920463, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000012", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50285\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50285\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872964, - "finishTime" : 1240336939111, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000006_0", - "hdfsBytesRead" : 67732698, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2596196, - "mapInputRecords" : 4299703, - "mapOutputBytes" : 4415200, - "mapOutputRecords" : 70202, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 70202, - "mapInputBytes" : 264286110 - }, { - "location" : null, - "hostName" : "cluster50282\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915054, - "finishTime" : 1240336955710, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000006_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.0", "cluster1781\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50307\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50311\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872300, - "finishTime" : 1240336952474, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000006", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.192", "cluster50204\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.192/cluster50204\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872970, - "finishTime" : 1240336919341, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000002_0", - "hdfsBytesRead" : 68694241, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2641160, - "mapInputRecords" : 4318855, - "mapOutputBytes" : 4523968, - "mapOutputRecords" : 71535, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 71535, - "mapInputBytes" : 268342920 - }, { - "location" : null, - "hostName" : "cluster1894\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915343, - "finishTime" : 1240336956992, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000002_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.192", "cluster1896\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50213\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50228\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872301, - "finishTime" : 1240336920455, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000002", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1568\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1568\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336918628, - "finishTime" : 1240336945487, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000060_1", - "hdfsBytesRead" : 30143641, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2581, - "mapInputRecords" : 2183668, - "mapOutputBytes" : 619, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 128698969 - }, { - "location" : null, - "hostName" : "cluster1560\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336873218, - "finishTime" : 1240336938166, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000060_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.0", "cluster1581\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1560\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50276\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872374, - "finishTime" : 1240336946441, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000060", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.128", "cluster1207\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.128/cluster1207\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336867477, - "finishTime" : 1240336891579, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000068_0", - "hdfsBytesRead" : 29914824, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2149, - "mapInputRecords" : 2146323, - "mapOutputBytes" : 176, - "mapOutputRecords" : 3, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 3, - "mapInputBytes" : 127372829 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1239\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.128", "cluster1217\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1479\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872374, - "finishTime" : 1240336904545, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000068", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.192", "cluster1911\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.192/cluster1911\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875928, - "finishTime" : 1240336914911, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000008_0", - "hdfsBytesRead" : 67080786, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2606104, - "mapInputRecords" : 4182439, - "mapOutputBytes" : 4393005, - "mapOutputRecords" : 68146, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 68146, - "mapInputBytes" : 260552004 - }, { - "location" : null, - "hostName" : "cluster50001\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914959, - "finishTime" : 1240336959329, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000008_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.192", "cluster1914\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1911\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.128", "cluster50011\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872375, - "finishTime" : 1240336920488, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000008", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1858\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1858\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874685, - "finishTime" : 1240336917858, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000010_0", - "hdfsBytesRead" : 66510009, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2504720, - "mapInputRecords" : 4229507, - "mapOutputBytes" : 4193214, - "mapOutputRecords" : 65150, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65150, - "mapInputBytes" : 261348815 - }, { - "location" : null, - "hostName" : "cluster50030\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914970, - "finishTime" : 1240336948391, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000010_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.64", "cluster1180\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.64", "cluster1171\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1857\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872376, - "finishTime" : 1240336920531, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000010", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.192", "cluster1021\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.192/cluster1021\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872982, - "finishTime" : 1240336912193, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000019_0", - "hdfsBytesRead" : 64941817, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2419356, - "mapInputRecords" : 4085619, - "mapOutputBytes" : 4074644, - "mapOutputRecords" : 63304, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63304, - "mapInputBytes" : 254958753 - }, { - "location" : null, - "hostName" : "cluster1576\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336919809, - "finishTime" : 1240336953416, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000019_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1006\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1011\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1573\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872376, - "finishTime" : 1240336920515, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000019", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.128", "cluster1700\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.128/cluster1700\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336869803, - "finishTime" : 1240336893577, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000047_0", - "hdfsBytesRead" : 30559058, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2349, - "mapInputRecords" : 2184357, - "mapOutputBytes" : 444, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 129670187 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.128", "cluster1717\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1713\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50389\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872377, - "finishTime" : 1240336904552, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000047", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1457\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1457\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871305, - "finishTime" : 1240336910899, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000017_0", - "hdfsBytesRead" : 65262059, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2517094, - "mapInputRecords" : 4042279, - "mapOutputBytes" : 4239279, - "mapOutputRecords" : 66022, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 66022, - "mapInputBytes" : 252581214 - }, { - "location" : null, - "hostName" : "cluster50539\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915026, - "finishTime" : 1240336948436, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000017_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.64", "cluster1454\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1466\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.0", "cluster50539\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872377, - "finishTime" : 1240336920534, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000017", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1598\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1598\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336862833, - "finishTime" : 1240336886002, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000081_0", - "hdfsBytesRead" : 29655169, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2492, - "mapInputRecords" : 2160857, - "mapOutputBytes" : 519, - "mapOutputRecords" : 6, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 6, - "mapInputBytes" : 128065621 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.0", "cluster1566\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1598\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1626\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872378, - "finishTime" : 1240336904475, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000081", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1592\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1592\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876693, - "finishTime" : 1240336916107, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000021_0", - "hdfsBytesRead" : 64599955, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2454477, - "mapInputRecords" : 4026318, - "mapOutputBytes" : 4137511, - "mapOutputRecords" : 63818, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63818, - "mapInputBytes" : 251469382 - }, { - "location" : null, - "hostName" : "cluster1564\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336917825, - "finishTime" : 1240336951212, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000021_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1127\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.192", "cluster1131\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1562\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872379, - "finishTime" : 1240336920487, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000021", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3106\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3106\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879219, - "finishTime" : 1240336913954, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000003_0", - "hdfsBytesRead" : 68427487, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2623342, - "mapInputRecords" : 4252689, - "mapOutputBytes" : 4454505, - "mapOutputRecords" : 70042, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 70042, - "mapInputBytes" : 264536055 - }, { - "location" : null, - "hostName" : "cluster1829\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336917410, - "finishTime" : 1240336950698, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000003_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1672\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3089\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3114\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872379, - "finishTime" : 1240336920465, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000003", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.64", "cluster1334\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.64/cluster1334\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874371, - "finishTime" : 1240336916964, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000013_0", - "hdfsBytesRead" : 65647050, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2545518, - "mapInputRecords" : 4090204, - "mapOutputBytes" : 4275432, - "mapOutputRecords" : 66126, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 66126, - "mapInputBytes" : 255910823 - }, { - "location" : null, - "hostName" : "cluster1410\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914216, - "finishTime" : 1240336963861, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000013_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.192", "cluster1415\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.192", "cluster1412\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.64", "cluster1332\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872380, - "finishTime" : 1240336920509, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000013", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1442\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1442\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871928, - "finishTime" : 1240336911159, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000033_0", - "hdfsBytesRead" : 62843496, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2438028, - "mapInputRecords" : 3993110, - "mapOutputBytes" : 4086319, - "mapOutputRecords" : 63785, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63785, - "mapInputBytes" : 246369440 - }, { - "location" : null, - "hostName" : "cluster1941\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336909373, - "finishTime" : 1240336966403, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000033_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1356\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.64", "cluster1350\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1455\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872380, - "finishTime" : 1240336920485, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000033", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "cluster1751\\.secondleveldomain\\.com", - "result" : "FAILED", - "startTime" : 1240336873015, - "finishTime" : 1240336891599, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000046_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - }, { - "location" : { - "layers" : [ "194\\.6\\.134\\.192", "cluster50368\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.192/cluster50368\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336905058, - "finishTime" : 1240336926560, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000046_1", - "hdfsBytesRead" : 30599610, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2489, - "mapInputRecords" : 2280773, - "mapOutputBytes" : 658, - "mapOutputRecords" : 12, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 12, - "mapInputBytes" : 133157243 - }, { - "location" : null, - "hostName" : "cluster1247\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916005, - "finishTime" : 1240336942980, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000046_2", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.64", "cluster1757\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1751\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50292\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872381, - "finishTime" : 1240336936977, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000046", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1777\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1777\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336878797, - "finishTime" : 1240336901414, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000059_0", - "hdfsBytesRead" : 30176905, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2160, - "mapInputRecords" : 2227855, - "mapOutputBytes" : 210, - "mapOutputRecords" : 4, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : 130221301 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.192", "cluster50425\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50407\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1774\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872381, - "finishTime" : 1240336904493, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000059", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.192", "cluster1027\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.192/cluster1027\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873401, - "finishTime" : 1240336909982, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000038_0", - "hdfsBytesRead" : 61899686, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2392428, - "mapInputRecords" : 3861737, - "mapOutputBytes" : 3999150, - "mapOutputRecords" : 61718, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 61718, - "mapInputBytes" : 240872637 - }, { - "location" : null, - "hostName" : "cluster3117\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336917616, - "finishTime" : 1240336952203, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000038_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1002\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1027\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3093\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872382, - "finishTime" : 1240336920531, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000038", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.128", "cluster1093\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.128/cluster1093\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336877844, - "finishTime" : 1240336902594, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000066_0", - "hdfsBytesRead" : 29972278, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2311, - "mapInputRecords" : 2151755, - "mapOutputBytes" : 476, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 127537268 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1118\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1026\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1031\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872383, - "finishTime" : 1240336904501, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000066", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.64", "cluster1632\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.64/cluster1632\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874600, - "finishTime" : 1240336886276, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000098_0", - "hdfsBytesRead" : 8241095, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 53850, - "mapInputRecords" : 359279, - "mapOutputBytes" : 97992, - "mapOutputRecords" : 1544, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1544, - "mapInputBytes" : 26848771 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.64", "cluster1758\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1614\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1632\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872383, - "finishTime" : 1240336888503, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000098", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1785\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1785\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879910, - "finishTime" : 1240336910063, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000062_0", - "hdfsBytesRead" : 30075619, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2239, - "mapInputRecords" : 2131858, - "mapOutputBytes" : 300, - "mapOutputRecords" : 6, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 6, - "mapInputBytes" : 126857339 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1357\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1776\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1773\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872384, - "finishTime" : 1240336904537, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000062", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.64", "cluster1626\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.64/cluster1626\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876699, - "finishTime" : 1240336914881, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000025_0", - "hdfsBytesRead" : 64200239, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2529950, - "mapInputRecords" : 3996858, - "mapOutputBytes" : 4239266, - "mapOutputRecords" : 65959, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65959, - "mapInputBytes" : 248767046 - }, { - "location" : null, - "hostName" : "cluster1601\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336918879, - "finishTime" : 1240336952244, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000025_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.64", "cluster1633\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1611\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50275\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872384, - "finishTime" : 1240336920528, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000025", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.64", "cluster1628\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.64/cluster1628\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336881912, - "finishTime" : 1240336906234, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000079_0", - "hdfsBytesRead" : 29704868, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2212, - "mapInputRecords" : 2170913, - "mapOutputBytes" : 222, - "mapOutputRecords" : 4, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : 127927659 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1490\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.128", "cluster1482\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1636\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872385, - "finishTime" : 1240336904599, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000079", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1470\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1470\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871928, - "finishTime" : 1240336893040, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000088_0", - "hdfsBytesRead" : 8927130, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 63070, - "mapInputRecords" : 416911, - "mapOutputBytes" : 119856, - "mapOutputRecords" : 2051, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 2051, - "mapInputBytes" : 29933360 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.64", "cluster1474\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1479\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1927\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872385, - "finishTime" : 1240336904691, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000088", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1662\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1662\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336880282, - "finishTime" : 1240336919548, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000011_0", - "hdfsBytesRead" : 66183568, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2597810, - "mapInputRecords" : 4149962, - "mapOutputBytes" : 4367215, - "mapOutputRecords" : 67906, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 67906, - "mapInputBytes" : 256962052 - }, { - "location" : null, - "hostName" : "cluster50234\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915110, - "finishTime" : 1240336948409, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000011_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1652\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3065\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3076\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872386, - "finishTime" : 1240336920520, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000011", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.64", "cluster1623\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.64/cluster1623\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879066, - "finishTime" : 1240336901295, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000087_0", - "hdfsBytesRead" : 29290661, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2299, - "mapInputRecords" : 2093812, - "mapOutputBytes" : 307, - "mapOutputRecords" : 5, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5, - "mapInputBytes" : 124249977 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1008\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1020\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1602\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872387, - "finishTime" : 1240336904528, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000087", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1535\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1535\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874347, - "finishTime" : 1240336896706, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000057_0", - "hdfsBytesRead" : 30235345, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2182, - "mapInputRecords" : 2261814, - "mapOutputBytes" : 234, - "mapOutputRecords" : 2, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 2, - "mapInputBytes" : 131804880 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.192", "cluster1559\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1594\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1579\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872387, - "finishTime" : 1240336904515, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000057", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1574\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1574\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876540, - "finishTime" : 1240336915909, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000028_0", - "hdfsBytesRead" : 63494931, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2479449, - "mapInputRecords" : 3946537, - "mapOutputBytes" : 4204287, - "mapOutputRecords" : 65600, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65600, - "mapInputBytes" : 246575765 - }, { - "location" : null, - "hostName" : "cluster50205\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915586, - "finishTime" : 1240336949032, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000028_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.0", "cluster1561\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50206\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50217\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872388, - "finishTime" : 1240336920508, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000028", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.0", "cluster1302\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.0/cluster1302\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875716, - "finishTime" : 1240336915224, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000014_0", - "hdfsBytesRead" : 65542507, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2519207, - "mapInputRecords" : 4123667, - "mapOutputBytes" : 4223635, - "mapOutputRecords" : 65390, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65390, - "mapInputBytes" : 256189404 - }, { - "location" : null, - "hostName" : "cluster50154\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915022, - "finishTime" : 1240336954536, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000014_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.0", "cluster1299\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.0", "cluster1289\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50142\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872388, - "finishTime" : 1240336920547, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000014", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.128", "cluster1377\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.128/cluster1377\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336870160, - "finishTime" : 1240336910333, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000018_0", - "hdfsBytesRead" : 65179399, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2488791, - "mapInputRecords" : 4131630, - "mapOutputBytes" : 4155682, - "mapOutputRecords" : 64683, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 64683, - "mapInputBytes" : 255327265 - }, { - "location" : null, - "hostName" : "cluster1364\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336911917, - "finishTime" : 1240336945214, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000018_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1383\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.128", "cluster1395\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50441\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872389, - "finishTime" : 1240336920490, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000018", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.64", "cluster1357\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.64/cluster1357\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336866494, - "finishTime" : 1240336887708, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000064_0", - "hdfsBytesRead" : 29993789, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2363, - "mapInputRecords" : 2168531, - "mapOutputBytes" : 454, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 128381652 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1321\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50475\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50473\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872389, - "finishTime" : 1240336904527, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000064", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.128", "cluster1368\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.128/cluster1368\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871607, - "finishTime" : 1240336898707, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000063_0", - "hdfsBytesRead" : 29998454, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2352, - "mapInputRecords" : 2182549, - "mapOutputBytes" : 428, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 128497476 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1371\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.64", "cluster1822\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.64", "cluster1832\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872390, - "finishTime" : 1240336904536, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000063", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.192", "cluster1251\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.192/cluster1251\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872977, - "finishTime" : 1240336910764, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000023_0", - "hdfsBytesRead" : 64457911, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2454975, - "mapInputRecords" : 4073107, - "mapOutputBytes" : 4111625, - "mapOutputRecords" : 63981, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63981, - "mapInputBytes" : 252476783 - }, { - "location" : null, - "hostName" : "cluster1255\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916099, - "finishTime" : 1240336969463, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000023_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1246\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1274\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50423\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872390, - "finishTime" : 1240336920495, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000023", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.0", "cluster1281\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.0/cluster1281\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873567, - "finishTime" : 1240336910205, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000074_0", - "hdfsBytesRead" : 29798610, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 3066, - "mapInputRecords" : 2137289, - "mapOutputBytes" : 1604, - "mapOutputRecords" : 38, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 38, - "mapInputBytes" : 126797079 - }, { - "location" : null, - "hostName" : "cluster1830\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336920116, - "finishTime" : 1240336954303, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000074_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.0", "cluster1316\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50493\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50488\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872391, - "finishTime" : 1240336920505, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000074", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.192", "cluster1420\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.192/cluster1420\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336870902, - "finishTime" : 1240336911334, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000001_0", - "hdfsBytesRead" : 69761283, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2709488, - "mapInputRecords" : 4346205, - "mapOutputBytes" : 4543664, - "mapOutputRecords" : 70626, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 70626, - "mapInputBytes" : 270462297 - }, { - "location" : null, - "hostName" : "cluster1850\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336917236, - "finishTime" : 1240336950666, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000001_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.192", "cluster1405\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.192", "cluster1427\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50365\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872391, - "finishTime" : 1240336920486, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000001", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1540\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1540\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871750, - "finishTime" : 1240336899790, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000061_0", - "hdfsBytesRead" : 30111997, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2348, - "mapInputRecords" : 2160034, - "mapOutputBytes" : 354, - "mapOutputRecords" : 6, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 6, - "mapInputBytes" : 128130156 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1138\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.192", "cluster1137\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.192", "cluster1520\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872392, - "finishTime" : 1240336904500, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000061", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.128", "cluster1493\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.128/cluster1493\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874481, - "finishTime" : 1240336923543, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000049_0", - "hdfsBytesRead" : 30486394, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2076, - "mapInputRecords" : 2239224, - "mapOutputBytes" : 102, - "mapOutputRecords" : 2, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 2, - "mapInputBytes" : 131723681 - }, { - "location" : null, - "hostName" : "cluster1519\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915105, - "finishTime" : 1240336939262, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000049_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1518\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50501\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50484\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872393, - "finishTime" : 1240336936512, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000049", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1843\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1843\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873996, - "finishTime" : 1240336930684, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000029_0", - "hdfsBytesRead" : 63373946, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2388744, - "mapInputRecords" : 4033224, - "mapOutputBytes" : 3991838, - "mapOutputRecords" : 62073, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 62073, - "mapInputBytes" : 248134254 - }, { - "location" : null, - "hostName" : "cluster1872\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916333, - "finishTime" : 1240336965543, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000029_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1035\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1018\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1853\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872393, - "finishTime" : 1240336936568, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000029", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1555\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1555\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875187, - "finishTime" : 1240336898191, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000075_0", - "hdfsBytesRead" : 29746839, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2502, - "mapInputRecords" : 2130992, - "mapOutputBytes" : 642, - "mapOutputRecords" : 13, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 13, - "mapInputBytes" : 126084263 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.192", "cluster1557\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50214\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50210\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872394, - "finishTime" : 1240336904493, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000075", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.128", "cluster1228\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.128/cluster1228\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873902, - "finishTime" : 1240336914414, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000085_0", - "hdfsBytesRead" : 29405954, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2264, - "mapInputRecords" : 2125410, - "mapOutputBytes" : 307, - "mapOutputRecords" : 6, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 6, - "mapInputBytes" : 125216183 - }, { - "location" : null, - "hostName" : "cluster1239\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914422, - "finishTime" : 1240336947813, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000085_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1212\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.128", "cluster1205\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1743\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872394, - "finishTime" : 1240336920525, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000085", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.64", "cluster1180\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.64/cluster1180\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873955, - "finishTime" : 1240336917137, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000009_0", - "hdfsBytesRead" : 66872279, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2517067, - "mapInputRecords" : 4251406, - "mapOutputBytes" : 4217383, - "mapOutputRecords" : 65906, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65906, - "mapInputBytes" : 261611387 - }, { - "location" : null, - "hostName" : "cluster50545\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916823, - "finishTime" : 1240336950112, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000009_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.64", "cluster1180\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.64", "cluster1193\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50281\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872395, - "finishTime" : 1240336920563, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000009", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.128", "cluster1690\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.128/cluster1690\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874755, - "finishTime" : 1240336907982, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000078_0", - "hdfsBytesRead" : 29710276, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2244, - "mapInputRecords" : 2103351, - "mapOutputBytes" : 429, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 125317251 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.128", "cluster1703\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1694\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1768\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872395, - "finishTime" : 1240336920560, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000078", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.64", "cluster1639\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.64/cluster1639\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879086, - "finishTime" : 1240336891448, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000091_0", - "hdfsBytesRead" : 8632081, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 61030, - "mapInputRecords" : 415389, - "mapOutputBytes" : 125127, - "mapOutputRecords" : 2025, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 2025, - "mapInputBytes" : 30313509 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.64", "cluster1635\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50317\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50281\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872396, - "finishTime" : 1240336888496, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000091", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.192", "cluster1250\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.192/cluster1250\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874153, - "finishTime" : 1240336895738, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000083_0", - "hdfsBytesRead" : 29568513, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2278, - "mapInputRecords" : 2122899, - "mapOutputBytes" : 302, - "mapOutputRecords" : 5, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5, - "mapInputBytes" : 125860315 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1250\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1931\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1957\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872396, - "finishTime" : 1240336904518, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000083", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.64", "cluster1727\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.64/cluster1727\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879765, - "finishTime" : 1240336932976, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000043_0", - "hdfsBytesRead" : 32717815, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 34330747, - "mapInputRecords" : 1341156, - "mapOutputBytes" : 71461506, - "mapOutputRecords" : 1155088, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1155088, - "mapInputBytes" : 99883113 - }, { - "location" : null, - "hostName" : "cluster50135\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915113, - "finishTime" : 1240336964459, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000043_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.64", "cluster1752\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1725\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50156\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872397, - "finishTime" : 1240336936550, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000043", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.64", "cluster1356\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.64/cluster1356\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872994, - "finishTime" : 1240336894558, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000069_0", - "hdfsBytesRead" : 29896312, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2490, - "mapInputRecords" : 2161066, - "mapOutputBytes" : 626, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 127337881 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.64", "cluster1358\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.64", "cluster1346\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.128", "cluster1518\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872397, - "finishTime" : 1240336904506, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000069", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.192", "cluster1418\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.192/cluster1418\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872526, - "finishTime" : 1240336920970, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000042_0", - "hdfsBytesRead" : 32920579, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 34480517, - "mapInputRecords" : 1354003, - "mapOutputBytes" : 72052350, - "mapOutputRecords" : 1167455, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1167455, - "mapInputBytes" : 100609874 - }, { - "location" : null, - "hostName" : "cluster50192\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914080, - "finishTime" : 1240336963396, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000042_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.192", "cluster1400\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50195\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50179\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872397, - "finishTime" : 1240336936525, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000042", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.64", "cluster1807\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.64/cluster1807\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874347, - "finishTime" : 1240336897429, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000052_0", - "hdfsBytesRead" : 30352683, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2466, - "mapInputRecords" : 2223842, - "mapOutputBytes" : 728, - "mapOutputRecords" : 12, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 12, - "mapInputBytes" : 130818373 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.64", "cluster1832\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50226\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50205\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872398, - "finishTime" : 1240336904498, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000052", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.192", "cluster1249\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.192/cluster1249\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873038, - "finishTime" : 1240336937714, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000031_0", - "hdfsBytesRead" : 63310753, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2411979, - "mapInputRecords" : 3945460, - "mapOutputBytes" : 4026410, - "mapOutputRecords" : 62373, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 62373, - "mapInputBytes" : 245235770 - }, { - "location" : null, - "hostName" : "cluster50242\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916045, - "finishTime" : 1240336953642, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000031_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1248\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50274\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50260\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872398, - "finishTime" : 1240336952565, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000031", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1561\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1561\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876300, - "finishTime" : 1240336927625, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000032_0", - "hdfsBytesRead" : 63245290, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2440937, - "mapInputRecords" : 3984125, - "mapOutputBytes" : 4070942, - "mapOutputRecords" : 62997, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 62997, - "mapInputBytes" : 247088467 - }, { - "location" : null, - "hostName" : "cluster1585\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915947, - "finishTime" : 1240336965244, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000032_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.0", "cluster1585\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3061\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3079\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872399, - "finishTime" : 1240336936618, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000032", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.128", "cluster1395\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.128/cluster1395\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872064, - "finishTime" : 1240336894757, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000077_0", - "hdfsBytesRead" : 29714767, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2099, - "mapInputRecords" : 2121327, - "mapOutputBytes" : 152, - "mapOutputRecords" : 3, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 3, - "mapInputBytes" : 126146844 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1379\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.128", "cluster1373\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3085\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872400, - "finishTime" : 1240336904494, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000077", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.128", "cluster1877\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.128/cluster1877\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872985, - "finishTime" : 1240336894181, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000086_0", - "hdfsBytesRead" : 29331367, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2457, - "mapInputRecords" : 2098982, - "mapOutputBytes" : 513, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 124536660 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.128", "cluster1859\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1871\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50072\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872400, - "finishTime" : 1240336904496, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000086", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.192", "cluster1437\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.192/cluster1437\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873019, - "finishTime" : 1240336921372, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000080_0", - "hdfsBytesRead" : 29702894, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2351, - "mapInputRecords" : 2127301, - "mapOutputBytes" : 379, - "mapOutputRecords" : 5, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 5, - "mapInputBytes" : 126316979 - }, { - "location" : null, - "hostName" : "cluster50007\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336924581, - "finishTime" : 1240336946151, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000080_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.192", "cluster1533\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.192", "cluster1529\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1036\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872401, - "finishTime" : 1240336936524, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000080", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.128", "cluster1711\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.128/cluster1711\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876373, - "finishTime" : 1240336889533, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000099_0", - "hdfsBytesRead" : 8195626, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 53268, - "mapInputRecords" : 364433, - "mapOutputBytes" : 92731, - "mapOutputRecords" : 1473, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1473, - "mapInputBytes" : 26915776 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.128", "cluster1708\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1714\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.0", "cluster50085\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872401, - "finishTime" : 1240336888509, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000099", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1775\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1775\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874334, - "finishTime" : 1240336895935, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000065_0", - "hdfsBytesRead" : 29991531, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2256, - "mapInputRecords" : 2146107, - "mapOutputBytes" : 291, - "mapOutputRecords" : 4, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : 127398601 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.64", "cluster1175\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1762\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1786\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872405, - "finishTime" : 1240336904543, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000065", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.64", "cluster1072\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.64/cluster1072\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876623, - "finishTime" : 1240336920733, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000016_0", - "hdfsBytesRead" : 65311537, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2517895, - "mapInputRecords" : 4137686, - "mapOutputBytes" : 4245719, - "mapOutputRecords" : 65718, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65718, - "mapInputBytes" : 257371314 - }, { - "location" : null, - "hostName" : "cluster1524\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336913870, - "finishTime" : 1240336947267, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000016_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1053\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.64", "cluster1055\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50459\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872405, - "finishTime" : 1240336920528, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000016", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.64", "cluster1806\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.64/cluster1806\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875605, - "finishTime" : 1240336887030, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000100_0", - "hdfsBytesRead" : 8167681, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54317, - "mapInputRecords" : 363347, - "mapOutputBytes" : 93315, - "mapOutputRecords" : 1533, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1533, - "mapInputBytes" : 26787874 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1669\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1643\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.64", "cluster1816\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872406, - "finishTime" : 1240336888526, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000100", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.130\\.0", "cluster1920\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.130\\.0/cluster1920\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875470, - "finishTime" : 1240336912179, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000040_0", - "hdfsBytesRead" : 60683975, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2370660, - "mapInputRecords" : 3782452, - "mapOutputBytes" : 3971981, - "mapOutputRecords" : 61525, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 61525, - "mapInputBytes" : 235446580 - }, { - "location" : null, - "hostName" : "cluster1953\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916862, - "finishTime" : 1240336950159, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000040_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.64", "cluster1167\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1924\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1925\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872407, - "finishTime" : 1240336920542, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000040", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1448\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1448\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336872975, - "finishTime" : 1240336883733, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000097_0", - "hdfsBytesRead" : 8245699, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57116, - "mapInputRecords" : 361754, - "mapOutputBytes" : 109299, - "mapOutputRecords" : 1733, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1733, - "mapInputBytes" : 27052310 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1515\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1449\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1446\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872407, - "finishTime" : 1240336888495, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000097", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.64", "cluster1610\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.64/cluster1610\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874243, - "finishTime" : 1240336886322, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000114_0", - "hdfsBytesRead" : 7815881, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 56450, - "mapInputRecords" : 338147, - "mapOutputBytes" : 101843, - "mapOutputRecords" : 1713, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1713, - "mapInputBytes" : 25352645 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.64", "cluster1629\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.64", "cluster1615\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1989\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872408, - "finishTime" : 1240336888543, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000114", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.64", "cluster3068\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.64/cluster3068\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871157, - "finishTime" : 1240336903920, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000030_0", - "hdfsBytesRead" : 63316983, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2499856, - "mapInputRecords" : 3998244, - "mapOutputBytes" : 4194242, - "mapOutputRecords" : 65558, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65558, - "mapInputBytes" : 247082392 - }, { - "location" : null, - "hostName" : "cluster50114\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915039, - "finishTime" : 1240336948328, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000030_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.0", "cluster50101\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3054\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3070\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872408, - "finishTime" : 1240336920494, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000030", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.64", "cluster3073\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.64/cluster3073\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873209, - "finishTime" : 1240336892920, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000048_0", - "hdfsBytesRead" : 30548276, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2470, - "mapInputRecords" : 2272297, - "mapOutputBytes" : 588, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 133071879 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.192", "cluster50232\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3077\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3055\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872409, - "finishTime" : 1240336904497, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000048", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1788\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1788\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336887602, - "finishTime" : 1240336909157, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000072_0", - "hdfsBytesRead" : 29828627, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2687, - "mapInputRecords" : 2132740, - "mapOutputBytes" : 935, - "mapOutputRecords" : 16, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 16, - "mapInputBytes" : 126719922 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1650\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1666\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1760\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872409, - "finishTime" : 1240336904505, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000072", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1538\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1538\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874182, - "finishTime" : 1240336886198, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000105_0", - "hdfsBytesRead" : 7999674, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 56463, - "mapInputRecords" : 351714, - "mapOutputBytes" : 102216, - "mapOutputRecords" : 1667, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1667, - "mapInputBytes" : 26137712 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.192", "cluster1550\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.192", "cluster1557\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50490\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872410, - "finishTime" : 1240336888521, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000105", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.128", "cluster1095\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.128/cluster1095\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873957, - "finishTime" : 1240336885026, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000095_0", - "hdfsBytesRead" : 8324595, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 60582, - "mapInputRecords" : 370323, - "mapOutputBytes" : 112110, - "mapOutputRecords" : 1745, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1745, - "mapInputBytes" : 27631638 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1097\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50058\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50047\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872410, - "finishTime" : 1240336888498, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000095", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3085\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3085\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336883981, - "finishTime" : 1240336903185, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000058_0", - "hdfsBytesRead" : 30204544, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2544, - "mapInputRecords" : 2174809, - "mapOutputBytes" : 619, - "mapOutputRecords" : 12, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 12, - "mapInputBytes" : 128829948 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1029\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3111\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3085\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872411, - "finishTime" : 1240336904451, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000058", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.64", "cluster1736\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.64/cluster1736\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874390, - "finishTime" : 1240336885111, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000129_0", - "hdfsBytesRead" : 7596302, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 56324, - "mapInputRecords" : 329237, - "mapOutputBytes" : 101487, - "mapOutputRecords" : 1619, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1619, - "mapInputBytes" : 24704627 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.0", "cluster1796\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1736\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1757\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872411, - "finishTime" : 1240336888504, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000129", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.64", "cluster3075\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.64/cluster3075\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871519, - "finishTime" : 1240336890113, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000084_0", - "hdfsBytesRead" : 29527352, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2526, - "mapInputRecords" : 2107299, - "mapOutputBytes" : 542, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 125356348 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.134\\.192", "cluster50375\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50382\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3076\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872412, - "finishTime" : 1240336904452, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000084", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.117\\.192", "cluster1276\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.117\\.192/cluster1276\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873160, - "finishTime" : 1240336910195, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000034_0", - "hdfsBytesRead" : 62826152, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2442116, - "mapInputRecords" : 3912487, - "mapOutputBytes" : 4109087, - "mapOutputRecords" : 63793, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63793, - "mapInputBytes" : 243815199 - }, { - "location" : null, - "hostName" : "cluster50061\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336948611, - "finishTime" : 1240336986156, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000034_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1276\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1243\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50046\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872412, - "finishTime" : 1240336920545, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000034", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.64", "cluster1722\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.64/cluster1722\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336886217, - "finishTime" : 1240336899148, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000092_0", - "hdfsBytesRead" : 8571804, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 65830, - "mapInputRecords" : 382509, - "mapOutputBytes" : 153711, - "mapOutputRecords" : 2418, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 2418, - "mapInputBytes" : 28713382 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1221\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1731\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.64", "cluster1732\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872413, - "finishTime" : 1240336888549, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000092", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3103\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3103\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871792, - "finishTime" : 1240336904149, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000039_0", - "hdfsBytesRead" : 61493951, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2383004, - "mapInputRecords" : 3826526, - "mapOutputBytes" : 4014747, - "mapOutputRecords" : 62222, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 62222, - "mapInputBytes" : 238695445 - }, { - "location" : null, - "hostName" : "cluster50119\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914788, - "finishTime" : 1240336948132, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000039_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.134\\.128", "cluster50348\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.128", "cluster50352\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3094\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872415, - "finishTime" : 1240336920500, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000039", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3099\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3099\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879295, - "finishTime" : 1240336889446, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000094_0", - "hdfsBytesRead" : 8363130, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 61542, - "mapInputRecords" : 362353, - "mapOutputBytes" : 116919, - "mapOutputRecords" : 1812, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1812, - "mapInputBytes" : 27305556 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.0", "cluster1316\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.0", "cluster1283\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.128", "cluster3097\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872416, - "finishTime" : 1240336888457, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000094", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.128", "cluster50185\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.128/cluster50185\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874482, - "finishTime" : 1240336901837, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000112_0", - "hdfsBytesRead" : 7897873, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 59138, - "mapInputRecords" : 348461, - "mapOutputBytes" : 109578, - "mapOutputRecords" : 1815, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1815, - "mapInputBytes" : 25836442 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.128", "cluster50174\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50185\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50120\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872418, - "finishTime" : 1240336904531, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000112", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50294\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50294\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873068, - "finishTime" : 1240336912576, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000020_0", - "hdfsBytesRead" : 64761324, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2512516, - "mapInputRecords" : 4036072, - "mapOutputBytes" : 4220545, - "mapOutputRecords" : 65595, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 65595, - "mapInputBytes" : 251142910 - }, { - "location" : null, - "hostName" : "cluster50045\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915002, - "finishTime" : 1240336950197, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000020_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.132\\.192", "cluster50055\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50316\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.64", "cluster50315\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872419, - "finishTime" : 1240336920577, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000020", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1447\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1447\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336871738, - "finishTime" : 1240336885628, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000120_0", - "hdfsBytesRead" : 7756197, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57141, - "mapInputRecords" : 332797, - "mapOutputBytes" : 105980, - "mapOutputRecords" : 1680, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1680, - "mapInputBytes" : 25063052 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1491\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.128", "cluster1480\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1444\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872419, - "finishTime" : 1240336888501, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000120", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50300\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50300\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873077, - "finishTime" : 1240336890562, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000117_0", - "hdfsBytesRead" : 7779599, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54489, - "mapInputRecords" : 342102, - "mapOutputBytes" : 99769, - "mapOutputRecords" : 1593, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1593, - "mapInputBytes" : 25758088 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1259\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.117\\.192", "cluster1256\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.192", "cluster1525\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872420, - "finishTime" : 1240336904516, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000117", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.130\\.64", "cluster1969\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.130\\.64/cluster1969\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874912, - "finishTime" : 1240336912529, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000027_0", - "hdfsBytesRead" : 63744100, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2482609, - "mapInputRecords" : 3916909, - "mapOutputBytes" : 4146823, - "mapOutputRecords" : 64050, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 64050, - "mapInputBytes" : 245872068 - }, { - "location" : null, - "hostName" : "cluster1990\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336919940, - "finishTime" : 1240336953269, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000027_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1154\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1999\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1967\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872420, - "finishTime" : 1240336920525, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000027", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.128", "cluster1498\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.128/cluster1498\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876312, - "finishTime" : 1240336913198, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000056_0", - "hdfsBytesRead" : 30250367, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2417, - "mapInputRecords" : 2228688, - "mapOutputBytes" : 448, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 130399482 - }, { - "location" : null, - "hostName" : "cluster50040\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915085, - "finishTime" : 1240336948419, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000056_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1517\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50053\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50057\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872421, - "finishTime" : 1240336921375, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000056", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.128", "cluster3108\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.128/cluster3108\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336744382, - "finishTime" : 1240336754327, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000118_0", - "hdfsBytesRead" : 7761361, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54570, - "mapInputRecords" : 333787, - "mapOutputBytes" : 97693, - "mapOutputRecords" : 1550, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1550, - "mapInputBytes" : 25134207 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.192", "cluster1550\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.192", "cluster1533\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1928\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872421, - "finishTime" : 1240336888458, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000118", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50296\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50296\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873049, - "finishTime" : 1240336907869, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000053_0", - "hdfsBytesRead" : 30302479, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2191, - "mapInputRecords" : 2169494, - "mapOutputBytes" : 201, - "mapOutputRecords" : 4, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : 129763826 - }, { - "location" : null, - "hostName" : "cluster1937\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915752, - "finishTime" : 1240336948962, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000053_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1075\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.64", "cluster1042\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1938\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872422, - "finishTime" : 1240336920575, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000053", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1768\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1768\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336886240, - "finishTime" : 1240336908239, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000082_0", - "hdfsBytesRead" : 29653562, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2207, - "mapInputRecords" : 2109744, - "mapOutputBytes" : 228, - "mapOutputRecords" : 4, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : 125417788 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.0", "cluster1772\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.128", "cluster50025\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.128", "cluster50023\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872423, - "finishTime" : 1240336904530, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000082", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.0", "cluster50108\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.0/cluster50108\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336915063, - "finishTime" : 1240336930514, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000110_1", - "hdfsBytesRead" : 7921493, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54235, - "mapInputRecords" : 349247, - "mapOutputBytes" : 110600, - "mapOutputRecords" : 1804, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1804, - "mapInputBytes" : 26033361 - }, { - "location" : null, - "hostName" : "cluster50097\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336873099, - "finishTime" : 1240336927622, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000110_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1652\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.0", "cluster50108\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.0", "cluster50113\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872423, - "finishTime" : 1240336930526, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000110", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.192", "cluster50382\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.192/cluster50382\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873058, - "finishTime" : 1240336913430, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000005_0", - "hdfsBytesRead" : 68085382, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2636901, - "mapInputRecords" : 4335874, - "mapOutputBytes" : 4439823, - "mapOutputRecords" : 69636, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 69636, - "mapInputBytes" : 266679989 - }, { - "location" : null, - "hostName" : "cluster50186\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915168, - "finishTime" : 1240336948458, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000005_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.128", "cluster50166\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50372\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50377\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872424, - "finishTime" : 1240336920570, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000005", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.192", "cluster50377\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.192/cluster50377\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873119, - "finishTime" : 1240336889361, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000089_0", - "hdfsBytesRead" : 8799897, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 55966, - "mapInputRecords" : 414955, - "mapOutputBytes" : 103189, - "mapOutputRecords" : 1701, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1701, - "mapInputBytes" : 29805669 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.192", "cluster50232\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50365\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.192", "cluster50364\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872425, - "finishTime" : 1240336904514, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000089", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.128", "cluster50348\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.128/cluster50348\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873048, - "finishTime" : 1240336886665, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000107_0", - "hdfsBytesRead" : 7976981, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54786, - "mapInputRecords" : 382253, - "mapOutputBytes" : 99015, - "mapOutputRecords" : 1613, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1613, - "mapInputBytes" : 27687269 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.132\\.192", "cluster50043\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.128", "cluster50326\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.128", "cluster50325\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872425, - "finishTime" : 1240336888490, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000107", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.128", "cluster50477\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.128/cluster50477\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336877770, - "finishTime" : 1240336917699, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000004_0", - "hdfsBytesRead" : 68248954, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2553348, - "mapInputRecords" : 4274813, - "mapOutputBytes" : 4305896, - "mapOutputRecords" : 66582, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 66582, - "mapInputBytes" : 265135117 - }, { - "location" : null, - "hostName" : "cluster1164\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336916017, - "finishTime" : 1240336949262, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000004_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.128", "cluster50447\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50464\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50491\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872426, - "finishTime" : 1240336920537, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000004", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.64", "cluster1078\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.64/cluster1078\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873042, - "finishTime" : 1240336905504, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000073_0", - "hdfsBytesRead" : 29810727, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2406, - "mapInputRecords" : 2120499, - "mapOutputBytes" : 442, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 126971404 - }, { - "location" : null, - "hostName" : "cluster1301\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915497, - "finishTime" : 1240336948840, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000073_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1061\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.64", "cluster1040\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50245\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872427, - "finishTime" : 1240336920518, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000073", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.0", "cluster50520\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.0/cluster50520\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874158, - "finishTime" : 1240336896172, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000054_0", - "hdfsBytesRead" : 30291933, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2467, - "mapInputRecords" : 2197164, - "mapOutputBytes" : 537, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 130029656 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.0", "cluster50530\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.0", "cluster50526\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1970\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872427, - "finishTime" : 1240336904495, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000054", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50319\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50319\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873105, - "finishTime" : 1240336899922, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000071_0", - "hdfsBytesRead" : 29867527, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2529, - "mapInputRecords" : 2168718, - "mapOutputBytes" : 639, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 128006719 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1032\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1026\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1936\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872428, - "finishTime" : 1240336904508, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000071", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.0", "cluster1583\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.0/cluster1583\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336878767, - "finishTime" : 1240336889771, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000116_0", - "hdfsBytesRead" : 7788625, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 55129, - "mapInputRecords" : 334841, - "mapOutputBytes" : 105424, - "mapOutputRecords" : 1675, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1675, - "mapInputBytes" : 25214728 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.0", "cluster1567\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.0", "cluster1594\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1915\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872429, - "finishTime" : 1240336888563, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000116", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.192", "cluster1883\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.192/cluster1883\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875263, - "finishTime" : 1240336916121, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000035_0", - "hdfsBytesRead" : 62277748, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2394254, - "mapInputRecords" : 3851918, - "mapOutputBytes" : 4034362, - "mapOutputRecords" : 62292, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 62292, - "mapInputBytes" : 241639956 - }, { - "location" : null, - "hostName" : "cluster1891\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914750, - "finishTime" : 1240336948061, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000035_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1017\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1019\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1900\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872430, - "finishTime" : 1240336920630, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000035", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50297\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50297\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873087, - "finishTime" : 1240336887207, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000103_0", - "hdfsBytesRead" : 8054880, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57203, - "mapInputRecords" : 352581, - "mapOutputBytes" : 105232, - "mapOutputRecords" : 1677, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1677, - "mapInputBytes" : 26276597 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.192", "cluster50490\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1959\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1933\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872430, - "finishTime" : 1240336888512, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000103", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.192", "cluster50203\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.192/cluster50203\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873054, - "finishTime" : 1240336940061, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000036_0", - "hdfsBytesRead" : 62256077, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2425449, - "mapInputRecords" : 3896871, - "mapOutputBytes" : 4079715, - "mapOutputRecords" : 63812, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 63812, - "mapInputBytes" : 242202529 - }, { - "location" : null, - "hostName" : "cluster50170\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915065, - "finishTime" : 1240336955862, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000036_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.133\\.128", "cluster50170\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50208\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50210\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872431, - "finishTime" : 1240336952548, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000036", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50291\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50291\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873125, - "finishTime" : 1240336888240, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000125_0", - "hdfsBytesRead" : 7644012, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 56569, - "mapInputRecords" : 330028, - "mapOutputBytes" : 107007, - "mapOutputRecords" : 1684, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1684, - "mapInputBytes" : 24814596 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1144\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.192", "cluster1141\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.0", "cluster1953\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872431, - "finishTime" : 1240336888528, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000125", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.130\\.0", "cluster1936\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.130\\.0/cluster1936\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873757, - "finishTime" : 1240336886908, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000101_0", - "hdfsBytesRead" : 8100684, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 64050, - "mapInputRecords" : 353384, - "mapOutputBytes" : 148877, - "mapOutputRecords" : 2652, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 2652, - "mapInputBytes" : 26767647 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.128", "cluster1503\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1961\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1963\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872433, - "finishTime" : 1240336888492, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000101", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.63\\.0", "cluster1778\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.63\\.0/cluster1778\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873042, - "finishTime" : 1240336884233, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000119_0", - "hdfsBytesRead" : 7760781, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54166, - "mapInputRecords" : 342134, - "mapOutputBytes" : 100849, - "mapOutputRecords" : 1633, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1633, - "mapInputBytes" : 25522379 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1107\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1798\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1789\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872435, - "finishTime" : 1240336888500, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000119", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.128", "cluster50342\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.128/cluster50342\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873123, - "finishTime" : 1240336886459, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000130_0", - "hdfsBytesRead" : 7567997, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 58226, - "mapInputRecords" : 330387, - "mapOutputBytes" : 106187, - "mapOutputRecords" : 1681, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1681, - "mapInputBytes" : 24831142 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.192", "cluster1272\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1683\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.128", "cluster1707\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872440, - "finishTime" : 1240336888519, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000130", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.64", "cluster1813\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.64/cluster1813\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336875527, - "finishTime" : 1240336911619, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000050_0", - "hdfsBytesRead" : 30483937, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2986, - "mapInputRecords" : 2181690, - "mapOutputBytes" : 2645, - "mapOutputRecords" : 59, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 59, - "mapInputBytes" : 129508563 - }, { - "location" : null, - "hostName" : "cluster50099\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915651, - "finishTime" : 1240336949203, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000050_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1151\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1679\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1678\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872447, - "finishTime" : 1240336920621, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000050", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.192", "cluster1906\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.192/cluster1906\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336877854, - "finishTime" : 1240336915858, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000090_0", - "hdfsBytesRead" : 8744991, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 59096, - "mapInputRecords" : 400419, - "mapOutputBytes" : 114548, - "mapOutputRecords" : 1879, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1879, - "mapInputBytes" : 28968801 - }, { - "location" : null, - "hostName" : "cluster50041\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915097, - "finishTime" : 1240336966677, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000090_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.192", "cluster1891\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1913\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50051\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872493, - "finishTime" : 1240336920834, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000090", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.64", "cluster50124\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.64/cluster50124\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336915274, - "finishTime" : 1240336938869, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000067_1", - "hdfsBytesRead" : 29917139, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2179, - "mapInputRecords" : 2151889, - "mapOutputBytes" : 189, - "mapOutputRecords" : 4, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 4, - "mapInputBytes" : 127323156 - }, { - "location" : null, - "hostName" : "cluster50475\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336877497, - "finishTime" : 1240336951548, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000067_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.128", "cluster50444\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50124\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50156\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872506, - "finishTime" : 1240336946437, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000067", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.129\\.192", "cluster1903\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.129\\.192/cluster1903\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336877965, - "finishTime" : 1240336889156, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000109_0", - "hdfsBytesRead" : 7954082, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 59357, - "mapInputRecords" : 344387, - "mapOutputBytes" : 107739, - "mapOutputRecords" : 1775, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1775, - "mapInputBytes" : 25743781 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.129\\.128", "cluster1841\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.128", "cluster1842\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.129\\.192", "cluster1907\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872544, - "finishTime" : 1240336888614, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000109", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.192", "cluster50231\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.192/cluster50231\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874559, - "finishTime" : 1240336902529, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000051_0", - "hdfsBytesRead" : 30393039, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2309, - "mapInputRecords" : 2170141, - "mapOutputBytes" : 418, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 128768431 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1130\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.192", "cluster1158\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.192", "cluster50205\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872547, - "finishTime" : 1240336904655, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000051", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.64", "cluster50159\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.64/cluster50159\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873969, - "finishTime" : 1240336897513, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000070_0", - "hdfsBytesRead" : 29891233, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2372, - "mapInputRecords" : 2181020, - "mapOutputBytes" : 492, - "mapOutputRecords" : 9, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 9, - "mapInputBytes" : 128287582 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.192", "cluster50484\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50509\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50133\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872651, - "finishTime" : 1240336904748, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000070", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.128", "cluster1102\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.128/cluster1102\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336876290, - "finishTime" : 1240336900685, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000126_0", - "hdfsBytesRead" : 7643780, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 50624, - "mapInputRecords" : 332992, - "mapOutputBytes" : 89335, - "mapOutputRecords" : 1413, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1413, - "mapInputBytes" : 24892336 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.128", "cluster1087\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50420\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50429\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872665, - "finishTime" : 1240336905377, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000126", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.130\\.64", "cluster1976\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.130\\.64/cluster1976\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336916201, - "finishTime" : 1240336948646, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000055_1", - "hdfsBytesRead" : 30263792, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2383, - "mapInputRecords" : 2169198, - "mapOutputBytes" : 547, - "mapOutputRecords" : 7, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 7, - "mapInputBytes" : 128672563 - }, { - "location" : null, - "hostName" : "cluster1735\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336884134, - "finishTime" : 1240336962153, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000055_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1669\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50076\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50077\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872669, - "finishTime" : 1240336962561, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000055", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.0", "cluster50252\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.0/cluster50252\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873488, - "finishTime" : 1240336913959, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000108_0", - "hdfsBytesRead" : 7959315, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 55584, - "mapInputRecords" : 358754, - "mapOutputBytes" : 100438, - "mapOutputRecords" : 1658, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1658, - "mapInputBytes" : 26607263 - }, { - "location" : null, - "hostName" : "cluster1738\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336925060, - "finishTime" : 1240336958446, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000108_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.128", "cluster50460\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.128", "cluster50440\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50272\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872723, - "finishTime" : 1240336920897, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000108", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.132\\.128", "cluster50034\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.132\\.128/cluster50034\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336915103, - "finishTime" : 1240336927032, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000102_1", - "hdfsBytesRead" : 8093114, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57157, - "mapInputRecords" : 352241, - "mapOutputBytes" : 105550, - "mapOutputRecords" : 1675, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1675, - "mapInputBytes" : 26466064 - }, { - "location" : null, - "hostName" : "cluster50303\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336873423, - "finishTime" : 1240336926231, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000102_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1655\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1648\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50059\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872762, - "finishTime" : 1240336930552, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000102", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.192", "cluster50368\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.192/cluster50368\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873439, - "finishTime" : 1240336902317, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000106_0", - "hdfsBytesRead" : 7984742, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 61170, - "mapInputRecords" : 347188, - "mapOutputBytes" : 117181, - "mapOutputRecords" : 1839, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1839, - "mapInputBytes" : 26617003 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.64", "cluster1678\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1654\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.128\\.192", "cluster50485\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872831, - "finishTime" : 1240336904966, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000106", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.133\\.64", "cluster50145\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.133\\.64/cluster50145\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874931, - "finishTime" : 1240336890399, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000121_0", - "hdfsBytesRead" : 7753496, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54465, - "mapInputRecords" : 335167, - "mapOutputBytes" : 104220, - "mapOutputRecords" : 1650, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1650, - "mapInputBytes" : 25170333 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1388\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.128", "cluster1383\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.64", "cluster50133\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872869, - "finishTime" : 1240336904994, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000121", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.128\\.192", "cluster50513\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.128\\.192/cluster50513\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874944, - "finishTime" : 1240336885951, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000123_0", - "hdfsBytesRead" : 7711498, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57707, - "mapInputRecords" : 336739, - "mapOutputBytes" : 111288, - "mapOutputRecords" : 1762, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1762, - "mapInputBytes" : 25234362 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.127\\.64", "cluster1474\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1445\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.64", "cluster1654\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872873, - "finishTime" : 1240336888962, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000123", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "cluster50458\\.secondleveldomain\\.com", - "result" : "FAILED", - "startTime" : 1240336873610, - "finishTime" : 1240336892202, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000113_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - }, { - "location" : { - "layers" : [ "192\\.30\\.116\\.192", "cluster1150\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.192/cluster1150\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336905590, - "finishTime" : 1240336916127, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000113_1", - "hdfsBytesRead" : 7838525, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 54305, - "mapInputRecords" : 342277, - "mapOutputBytes" : 100228, - "mapOutputRecords" : 1564, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1564, - "mapInputBytes" : 25563399 - }, { - "location" : null, - "hostName" : "cluster3063\\.secondleveldomain\\.com", - "result" : "FAILED", - "startTime" : 1240336937806, - "finishTime" : 1240336971856, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000113_2", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - }, { - "location" : null, - "hostName" : "cluster3063\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336937806, - "finishTime" : 1240336971856, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000113_3", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.128\\.128", "cluster50468\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3058\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3063\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336872938, - "finishTime" : 1240336921233, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000113", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.135\\.64", "cluster3077\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.135\\.64/cluster3077\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336883475, - "finishTime" : 1240336893425, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000128_0", - "hdfsBytesRead" : 7614230, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57139, - "mapInputRecords" : 327184, - "mapOutputBytes" : 98814, - "mapOutputRecords" : 1560, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1560, - "mapInputBytes" : 24696916 - } ], - "preferredLocations" : [ { - "layers" : [ "194\\.6\\.134\\.0", "cluster50243\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.134\\.0", "cluster50279\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.135\\.64", "cluster3057\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873047, - "finishTime" : 1240336889130, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000128", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.192", "cluster1429\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.192/cluster1429\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336869816, - "finishTime" : 1240336880564, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000124_0", - "hdfsBytesRead" : 7699474, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57466, - "mapInputRecords" : 335634, - "mapOutputBytes" : 114714, - "mapOutputRecords" : 1809, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1809, - "mapInputBytes" : 25212522 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1139\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1467\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.127\\.64", "cluster1479\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873048, - "finishTime" : 1240336889146, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000124", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1466\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1466\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336869967, - "finishTime" : 1240336905157, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000093_0", - "hdfsBytesRead" : 8500796, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 61110, - "mapInputRecords" : 381639, - "mapOutputBytes" : 111387, - "mapOutputRecords" : 1871, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1871, - "mapInputBytes" : 28132206 - }, { - "location" : null, - "hostName" : "cluster50174\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336914921, - "finishTime" : 1240336926260, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000093_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1157\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50174\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50177\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873049, - "finishTime" : 1240336921197, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000093", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.116\\.192", "cluster1150\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.116\\.192/cluster1150\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336873948, - "finishTime" : 1240336901907, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000044_0", - "hdfsBytesRead" : 31080028, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2324, - "mapInputRecords" : 2317829, - "mapOutputBytes" : 406, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 136117012 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1140\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.192", "cluster1124\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50058\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873050, - "finishTime" : 1240336905227, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000044", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.192", "cluster1531\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.192/cluster1531\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874990, - "finishTime" : 1240336911553, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000104_0", - "hdfsBytesRead" : 8040935, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 55143, - "mapInputRecords" : 350537, - "mapOutputBytes" : 100554, - "mapOutputRecords" : 1616, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1616, - "mapInputBytes" : 26367904 - }, { - "location" : null, - "hostName" : "cluster3092\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336961643, - "finishTime" : 1240336971382, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000104_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.117\\.128", "cluster1235\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50171\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.128", "cluster50161\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873051, - "finishTime" : 1240336921172, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000104", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.0", "cluster1305\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.0/cluster1305\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874698, - "finishTime" : 1240336924089, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000076_0", - "hdfsBytesRead" : 29718365, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 2434, - "mapInputRecords" : 2122176, - "mapOutputBytes" : 514, - "mapOutputRecords" : 8, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 8, - "mapInputBytes" : 125964532 - }, { - "location" : null, - "hostName" : "cluster1964\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915897, - "finishTime" : 1240336940807, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000076_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.64", "cluster1053\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.64", "cluster1056\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.130\\.64", "cluster1968\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873052, - "finishTime" : 1240336937226, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000076", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.127\\.64", "cluster1454\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.127\\.64/cluster1454\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874635, - "finishTime" : 1240336886965, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000096_0", - "hdfsBytesRead" : 8294959, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 57076, - "mapInputRecords" : 373384, - "mapOutputBytes" : 115997, - "mapOutputRecords" : 1854, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1854, - "mapInputBytes" : 27404774 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.63\\.192", "cluster1028\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.192", "cluster1030\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.132\\.192", "cluster50044\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873053, - "finishTime" : 1240336889184, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000096", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.0", "cluster50254\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.0/cluster50254\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874658, - "finishTime" : 1240336888484, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000127_0", - "hdfsBytesRead" : 7627825, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 60903, - "mapInputRecords" : 330989, - "mapOutputBytes" : 120185, - "mapOutputRecords" : 1985, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1985, - "mapInputBytes" : 24806195 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.116\\.192", "cluster1157\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.116\\.192", "cluster1136\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.62\\.192", "cluster50409\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873054, - "finishTime" : 1240336889174, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000127", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.126\\.64", "cluster1328\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.126\\.64/cluster1328\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336874488, - "finishTime" : 1240336913539, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000122_0", - "hdfsBytesRead" : 7746591, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 55261, - "mapInputRecords" : 337548, - "mapOutputBytes" : 110325, - "mapOutputRecords" : 1750, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1750, - "mapInputBytes" : 25159067 - }, { - "location" : null, - "hostName" : "cluster1060\\.secondleveldomain\\.com", - "result" : "KILLED", - "startTime" : 1240336915247, - "finishTime" : 1240336927337, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000122_1", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : -1, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.126\\.128", "cluster1370\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.126\\.128", "cluster1378\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "192\\.30\\.63\\.0", "cluster1793\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873055, - "finishTime" : 1240336921185, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000122", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - }, { - "attempts" : [ { - "location" : { - "layers" : [ "192\\.30\\.62\\.64", "cluster1641\\.secondleveldomain\\.com" ] - }, - "hostName" : "/192\\.30\\.62\\.64/cluster1641\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336879658, - "finishTime" : 1240336890529, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000111_0", - "hdfsBytesRead" : 7914429, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : 58343, - "mapInputRecords" : 345958, - "mapOutputBytes" : 107374, - "mapOutputRecords" : 1748, - "combineInputRecords" : 0, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 1748, - "mapInputBytes" : 25806139 - } ], - "preferredLocations" : [ { - "layers" : [ "192\\.30\\.62\\.192", "cluster50418\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.0", "cluster50090\\.secondleveldomain\\.com" ] - }, { - "layers" : [ "194\\.6\\.133\\.0", "cluster50117\\.secondleveldomain\\.com" ] - } ], - "startTime" : 1240336873056, - "finishTime" : 1240336889145, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000111", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "MAP" - } ], - "reduceTasks" : [ { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.134\\.64/cluster50303\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336888934, - "finishTime" : 1240336985264, - "shuffleFinished" : 1240336974089, - "sortFinished" : 1240336974613, - "attemptID" : "attempt_200904211745_0004_r_000000_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1514383, - "fileBytesRead" : 2632927, - "fileBytesWritten" : 2632927, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 135445, - "reduceInputRecords" : 139391, - "reduceShuffleBytes" : 4405338, - "reduceOutputRecords" : 139391, - "spilledRecords" : 139391, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336888851, - "finishTime" : 1240337000993, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000000", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.134\\.0/cluster50252\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336888952, - "finishTime" : 1240336985501, - "shuffleFinished" : 1240336971396, - "sortFinished" : 1240336971892, - "attemptID" : "attempt_200904211745_0004_r_000001_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1499786, - "fileBytesRead" : 2600635, - "fileBytesWritten" : 2600635, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 127806, - "reduceInputRecords" : 130935, - "reduceShuffleBytes" : 4415977, - "reduceOutputRecords" : 130935, - "spilledRecords" : 130935, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336888869, - "finishTime" : 1240337000960, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000001", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.134\\.192/cluster50368\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889019, - "finishTime" : 1240336985309, - "shuffleFinished" : 1240336975164, - "sortFinished" : 1240336975677, - "attemptID" : "attempt_200904211745_0004_r_000002_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1530537, - "fileBytesRead" : 2654430, - "fileBytesWritten" : 2654430, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 133326, - "reduceInputRecords" : 136708, - "reduceShuffleBytes" : 4499527, - "reduceOutputRecords" : 136708, - "spilledRecords" : 136708, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336888941, - "finishTime" : 1240337001041, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000002", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.128\\.192/cluster50513\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890410, - "finishTime" : 1240336987320, - "shuffleFinished" : 1240336974158, - "sortFinished" : 1240336974678, - "attemptID" : "attempt_200904211745_0004_r_000003_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1499750, - "fileBytesRead" : 2604086, - "fileBytesWritten" : 2604086, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 129054, - "reduceInputRecords" : 132376, - "reduceShuffleBytes" : 4422289, - "reduceOutputRecords" : 132376, - "spilledRecords" : 132376, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336888963, - "finishTime" : 1240337001095, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000003", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.133\\.64/cluster50145\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890439, - "finishTime" : 1240336986603, - "shuffleFinished" : 1240336973537, - "sortFinished" : 1240336974021, - "attemptID" : "attempt_200904211745_0004_r_000004_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1463089, - "fileBytesRead" : 2534927, - "fileBytesWritten" : 2534927, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 125284, - "reduceInputRecords" : 128270, - "reduceShuffleBytes" : 4292451, - "reduceOutputRecords" : 128270, - "spilledRecords" : 128270, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336888984, - "finishTime" : 1240337001109, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000004", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.63\\.64/cluster1735\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336898197, - "finishTime" : 1240336994614, - "shuffleFinished" : 1240336982632, - "sortFinished" : 1240336983604, - "attemptID" : "attempt_200904211745_0004_r_000005_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1524925, - "fileBytesRead" : 2651643, - "fileBytesWritten" : 2651643, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 133361, - "reduceInputRecords" : 136557, - "reduceShuffleBytes" : 4483041, - "reduceOutputRecords" : 136557, - "spilledRecords" : 136557, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889084, - "finishTime" : 1240337001321, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000005", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.133\\.64/cluster50132\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889699, - "finishTime" : 1240336985779, - "shuffleFinished" : 1240336974653, - "sortFinished" : 1240336975212, - "attemptID" : "attempt_200904211745_0004_r_000006_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1490233, - "fileBytesRead" : 2582328, - "fileBytesWritten" : 2582328, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 127906, - "reduceInputRecords" : 131571, - "reduceShuffleBytes" : 4383017, - "reduceOutputRecords" : 131571, - "spilledRecords" : 131571, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889089, - "finishTime" : 1240337001250, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000006", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.133\\.192/cluster50235\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889794, - "finishTime" : 1240336986114, - "shuffleFinished" : 1240336973868, - "sortFinished" : 1240336974400, - "attemptID" : "attempt_200904211745_0004_r_000007_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1505343, - "fileBytesRead" : 2610391, - "fileBytesWritten" : 2610391, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 129022, - "reduceInputRecords" : 132145, - "reduceShuffleBytes" : 4444172, - "reduceOutputRecords" : 132145, - "spilledRecords" : 132145, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889090, - "finishTime" : 1240337001307, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000007", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.128\\.192/cluster50491\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336892799, - "finishTime" : 1240336989074, - "shuffleFinished" : 1240336977913, - "sortFinished" : 1240336978491, - "attemptID" : "attempt_200904211745_0004_r_000008_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1504684, - "fileBytesRead" : 2608073, - "fileBytesWritten" : 2608073, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 127971, - "reduceInputRecords" : 131200, - "reduceShuffleBytes" : 4441998, - "reduceOutputRecords" : 131200, - "spilledRecords" : 131200, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889091, - "finishTime" : 1240337001271, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000008", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.62\\.64/cluster1679\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889759, - "finishTime" : 1240336986187, - "shuffleFinished" : 1240336974015, - "sortFinished" : 1240336974545, - "attemptID" : "attempt_200904211745_0004_r_000009_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1496159, - "fileBytesRead" : 2593399, - "fileBytesWritten" : 2593399, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 128913, - "reduceInputRecords" : 131980, - "reduceShuffleBytes" : 4397570, - "reduceOutputRecords" : 131980, - "spilledRecords" : 131980, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889094, - "finishTime" : 1240337001265, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000009", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.132\\.128/cluster50024\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889731, - "finishTime" : 1240336985667, - "shuffleFinished" : 1240336975405, - "sortFinished" : 1240336975928, - "attemptID" : "attempt_200904211745_0004_r_000010_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1516034, - "fileBytesRead" : 2633863, - "fileBytesWritten" : 2633863, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 130278, - "reduceInputRecords" : 133696, - "reduceShuffleBytes" : 4454003, - "reduceOutputRecords" : 133696, - "spilledRecords" : 133696, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889095, - "finishTime" : 1240337001270, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000010", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.133\\.192/cluster50223\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889705, - "finishTime" : 1240336985456, - "shuffleFinished" : 1240336972242, - "sortFinished" : 1240336972740, - "attemptID" : "attempt_200904211745_0004_r_000011_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1493749, - "fileBytesRead" : 2585694, - "fileBytesWritten" : 2585694, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 128462, - "reduceInputRecords" : 131413, - "reduceShuffleBytes" : 4380350, - "reduceOutputRecords" : 131413, - "spilledRecords" : 131413, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889096, - "finishTime" : 1240337001272, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000011", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.126\\.0/cluster1314\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890592, - "finishTime" : 1240336986175, - "shuffleFinished" : 1240336973921, - "sortFinished" : 1240336974467, - "attemptID" : "attempt_200904211745_0004_r_000012_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1542372, - "fileBytesRead" : 2681618, - "fileBytesWritten" : 2681618, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 136804, - "reduceInputRecords" : 139932, - "reduceShuffleBytes" : 4537451, - "reduceOutputRecords" : 139932, - "spilledRecords" : 139932, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889100, - "finishTime" : 1240337001225, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000012", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.127\\.64/cluster1440\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890397, - "finishTime" : 1240336986407, - "shuffleFinished" : 1240336974415, - "sortFinished" : 1240336974913, - "attemptID" : "attempt_200904211745_0004_r_000013_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1518799, - "fileBytesRead" : 2631693, - "fileBytesWritten" : 2631693, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 129644, - "reduceInputRecords" : 132582, - "reduceShuffleBytes" : 4464832, - "reduceOutputRecords" : 132582, - "spilledRecords" : 132582, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889101, - "finishTime" : 1240337001247, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000013", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.62\\.64/cluster1647\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889674, - "finishTime" : 1240336985701, - "shuffleFinished" : 1240336974501, - "sortFinished" : 1240336975018, - "attemptID" : "attempt_200904211745_0004_r_000014_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1529493, - "fileBytesRead" : 2660190, - "fileBytesWritten" : 2660190, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 130930, - "reduceInputRecords" : 134318, - "reduceShuffleBytes" : 4508491, - "reduceOutputRecords" : 134318, - "spilledRecords" : 134318, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889106, - "finishTime" : 1240337001250, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000014", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.126\\.0/cluster1295\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890630, - "finishTime" : 1240336986643, - "shuffleFinished" : 1240336974515, - "sortFinished" : 1240336975047, - "attemptID" : "attempt_200904211745_0004_r_000015_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1488785, - "fileBytesRead" : 2587120, - "fileBytesWritten" : 2587120, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 129374, - "reduceInputRecords" : 132757, - "reduceShuffleBytes" : 4397939, - "reduceOutputRecords" : 132757, - "spilledRecords" : 132757, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889107, - "finishTime" : 1240337001221, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000015", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.135\\.64/cluster3077\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336899098, - "finishTime" : 1240336995768, - "shuffleFinished" : 1240336982067, - "sortFinished" : 1240336982475, - "attemptID" : "attempt_200904211745_0004_r_000016_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1482497, - "fileBytesRead" : 2568924, - "fileBytesWritten" : 2568924, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 125907, - "reduceInputRecords" : 128797, - "reduceShuffleBytes" : 4350055, - "reduceOutputRecords" : 128797, - "spilledRecords" : 128797, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889131, - "finishTime" : 1240337001197, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000016", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.62\\.64/cluster1641\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336895225, - "finishTime" : 1240336991501, - "shuffleFinished" : 1240336978367, - "sortFinished" : 1240336978877, - "attemptID" : "attempt_200904211745_0004_r_000017_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1501337, - "fileBytesRead" : 2604597, - "fileBytesWritten" : 2604597, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 131288, - "reduceInputRecords" : 134365, - "reduceShuffleBytes" : 4400916, - "reduceOutputRecords" : 134365, - "spilledRecords" : 134365, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889145, - "finishTime" : 1240337001222, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000017", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.126\\.192/cluster1429\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336885349, - "finishTime" : 1240336981576, - "shuffleFinished" : 1240336968527, - "sortFinished" : 1240336969054, - "attemptID" : "attempt_200904211745_0004_r_000018_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1512739, - "fileBytesRead" : 2623583, - "fileBytesWritten" : 2623583, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 131750, - "reduceInputRecords" : 134780, - "reduceShuffleBytes" : 4448997, - "reduceOutputRecords" : 134780, - "spilledRecords" : 134780, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889147, - "finishTime" : 1240337001223, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000018", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.127\\.192/cluster1531\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890514, - "finishTime" : 1240336987216, - "shuffleFinished" : 1240336974189, - "sortFinished" : 1240336974682, - "attemptID" : "attempt_200904211745_0004_r_000019_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1497900, - "fileBytesRead" : 2595945, - "fileBytesWritten" : 2595945, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 127094, - "reduceInputRecords" : 130020, - "reduceShuffleBytes" : 4421936, - "reduceOutputRecords" : 130020, - "spilledRecords" : 130020, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889152, - "finishTime" : 1240337001238, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000019", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.126\\.64/cluster1328\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890066, - "finishTime" : 1240336986478, - "shuffleFinished" : 1240336976422, - "sortFinished" : 1240336976934, - "attemptID" : "attempt_200904211745_0004_r_000020_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1527353, - "fileBytesRead" : 2649256, - "fileBytesWritten" : 2649256, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 133593, - "reduceInputRecords" : 136882, - "reduceShuffleBytes" : 4474494, - "reduceOutputRecords" : 136882, - "spilledRecords" : 136882, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889157, - "finishTime" : 1240337001251, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000020", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.134\\.0/cluster50254\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890210, - "finishTime" : 1240336986556, - "shuffleFinished" : 1240336973579, - "sortFinished" : 1240336974104, - "attemptID" : "attempt_200904211745_0004_r_000021_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1513275, - "fileBytesRead" : 2630609, - "fileBytesWritten" : 2630609, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 131516, - "reduceInputRecords" : 135124, - "reduceShuffleBytes" : 4466234, - "reduceOutputRecords" : 135124, - "spilledRecords" : 135124, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889174, - "finishTime" : 1240337001247, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000021", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.127\\.64/cluster1466\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336885559, - "finishTime" : 1240336982206, - "shuffleFinished" : 1240336972182, - "sortFinished" : 1240336972682, - "attemptID" : "attempt_200904211745_0004_r_000022_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1502055, - "fileBytesRead" : 2608484, - "fileBytesWritten" : 2608484, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 129784, - "reduceInputRecords" : 132724, - "reduceShuffleBytes" : 4414804, - "reduceOutputRecords" : 132724, - "spilledRecords" : 132724, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889179, - "finishTime" : 1240337001263, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000022", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.127\\.64/cluster1454\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890222, - "finishTime" : 1240336986523, - "shuffleFinished" : 1240336973420, - "sortFinished" : 1240336973931, - "attemptID" : "attempt_200904211745_0004_r_000023_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1468760, - "fileBytesRead" : 2543252, - "fileBytesWritten" : 2543252, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 126859, - "reduceInputRecords" : 129871, - "reduceShuffleBytes" : 4296232, - "reduceOutputRecords" : 129871, - "spilledRecords" : 129871, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889185, - "finishTime" : 1240337001262, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000023", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.126\\.0/cluster1305\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890266, - "finishTime" : 1240336986394, - "shuffleFinished" : 1240336975908, - "sortFinished" : 1240336976433, - "attemptID" : "attempt_200904211745_0004_r_000024_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1527960, - "fileBytesRead" : 2649290, - "fileBytesWritten" : 2649290, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 133015, - "reduceInputRecords" : 136281, - "reduceShuffleBytes" : 4485423, - "reduceOutputRecords" : 136281, - "spilledRecords" : 136281, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889196, - "finishTime" : 1240337001291, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000024", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889205, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000025", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/192\\.30\\.116\\.192/cluster1150\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889529, - "finishTime" : 1240336986187, - "shuffleFinished" : 1240336973131, - "sortFinished" : 1240336973659, - "attemptID" : "attempt_200904211745_0004_r_000026_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1482330, - "fileBytesRead" : 2569201, - "fileBytesWritten" : 2569201, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 125873, - "reduceInputRecords" : 129080, - "reduceShuffleBytes" : 4363921, - "reduceOutputRecords" : 129080, - "spilledRecords" : 129080, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889208, - "finishTime" : 1240337001309, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000026", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.128\\.192/cluster50511\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336890681, - "finishTime" : 1240336986867, - "shuffleFinished" : 1240336975793, - "sortFinished" : 1240336976337, - "attemptID" : "attempt_200904211745_0004_r_000027_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1443497, - "fileBytesRead" : 2503238, - "fileBytesWritten" : 2503238, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 122459, - "reduceInputRecords" : 125626, - "reduceShuffleBytes" : 4238108, - "reduceOutputRecords" : 125626, - "spilledRecords" : 125626, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889209, - "finishTime" : 1240337001366, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000027", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "REDUCE" - }, { - "attempts" : [ { - "location" : null, - "hostName" : "/194\\.6\\.133\\.128/cluster50189\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336889839, - "finishTime" : 1240336986369, - "shuffleFinished" : 1240336976232, - "sortFinished" : 1240336976742, - "attemptID" : "attempt_200904211745_0004_r_000028_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : 1495322, - "fileBytesRead" : 2590588, - "fileBytesWritten" : 2590588, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : 0, - "reduceInputGroups" : 126998, - "reduceInputRecords" : 130037, - "reduceShuffleBytes" : 4405490, - "reduceOutputRecords" : 130037, - "spilledRecords" : 130037, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336889210, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000028", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889318, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000029", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889364, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000030", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889419, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000031", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889425, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000032", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889426, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000033", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889427, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000034", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889429, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000035", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889429, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000036", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889430, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000037", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889430, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000038", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889431, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000039", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889431, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000040", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889432, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000041", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889432, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000042", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889433, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000043", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889433, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000044", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889434, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000045", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - }, { - "attempts" : [ ], - "preferredLocations" : [ ], - "startTime" : 1240336889434, - "finishTime" : -1, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_r_000046", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : null, - "taskType" : "REDUCE" - } ], - "otherTasks" : [ { - "attempts" : [ { - "location" : { - "layers" : [ "194\\.6\\.134\\.64", "cluster50316\\.secondleveldomain\\.com" ] - }, - "hostName" : "/194\\.6\\.134\\.64/cluster50316\\.secondleveldomain\\.com", - "result" : "SUCCESS", - "startTime" : 1240336856225, - "finishTime" : 1240336858461, - "shuffleFinished" : -1, - "sortFinished" : -1, - "attemptID" : "attempt_200904211745_0004_m_000132_0", - "hdfsBytesRead" : -1, - "hdfsBytesWritten" : -1, - "fileBytesRead" : -1, - "fileBytesWritten" : -1, - "mapInputRecords" : -1, - "mapOutputBytes" : -1, - "mapOutputRecords" : -1, - "combineInputRecords" : -1, - "reduceInputGroups" : -1, - "reduceInputRecords" : -1, - "reduceShuffleBytes" : -1, - "reduceOutputRecords" : -1, - "spilledRecords" : 0, - "mapInputBytes" : -1 - } ], - "preferredLocations" : [ ], - "startTime" : 1240336855651, - "finishTime" : 1240336871747, - "inputBytes" : -1, - "inputRecords" : -1, - "outputBytes" : -1, - "outputRecords" : -1, - "taskID" : "task_200904211745_0004_m_000132", - "numberMaps" : -1, - "numberReduces" : -1, - "taskStatus" : "SUCCESS", - "taskType" : "SETUP" - } ], - "finishTime" : -1, - "user" : "geek3", - "jobName" : null, - "computonsPerMapInputByte" : -1, - "computonsPerMapOutputByte" : -1, - "computonsPerReduceInputByte" : -1, - "computonsPerReduceOutputByte" : -1, - "submitTime" : 1240336853354, - "launchTime" : 1240336854289, - "heapMegabytes" : 1234, - "totalMaps" : 131, - "totalReduces" : 47, - "outcome" : "SUCCESS", - "jobtype" : "JAVA", - "directDependantJobs" : [ ], - "successfulMapAttemptCDFs" : [ { - "maximum" : 43182, - "minimum" : 10721, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 10721 - }, { - "relativeRanking" : 0.1, - "datum" : 10721 - }, { - "relativeRanking" : 0.15, - "datum" : 11676 - }, { - "relativeRanking" : 0.2, - "datum" : 11676 - }, { - "relativeRanking" : 0.25, - "datum" : 12936 - }, { - "relativeRanking" : 0.3, - "datum" : 15451 - }, { - "relativeRanking" : 0.35, - "datum" : 15451 - }, { - "relativeRanking" : 0.4, - "datum" : 19204 - }, { - "relativeRanking" : 0.45, - "datum" : 21585 - }, { - "relativeRanking" : 0.5, - "datum" : 23169 - }, { - "relativeRanking" : 0.55, - "datum" : 23169 - }, { - "relativeRanking" : 0.6, - "datum" : 23595 - }, { - "relativeRanking" : 0.65, - "datum" : 27355 - }, { - "relativeRanking" : 0.7, - "datum" : 27355 - }, { - "relativeRanking" : 0.75, - "datum" : 36581 - }, { - "relativeRanking" : 0.8, - "datum" : 37035 - }, { - "relativeRanking" : 0.85, - "datum" : 37035 - }, { - "relativeRanking" : 0.9, - "datum" : 38983 - }, { - "relativeRanking" : 0.95, - "datum" : 39264 - } ], - "numberValues" : 14 - }, { - "maximum" : 75401, - "minimum" : 9950, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 11004 - }, { - "relativeRanking" : 0.1, - "datum" : 12016 - }, { - "relativeRanking" : 0.15, - "datum" : 13160 - }, { - "relativeRanking" : 0.2, - "datum" : 18594 - }, { - "relativeRanking" : 0.25, - "datum" : 21555 - }, { - "relativeRanking" : 0.3, - "datum" : 22014 - }, { - "relativeRanking" : 0.35, - "datum" : 23004 - }, { - "relativeRanking" : 0.4, - "datum" : 24102 - }, { - "relativeRanking" : 0.45, - "datum" : 27100 - }, { - "relativeRanking" : 0.5, - "datum" : 32357 - }, { - "relativeRanking" : 0.55, - "datum" : 34735 - }, { - "relativeRanking" : 0.6, - "datum" : 37787 - }, { - "relativeRanking" : 0.65, - "datum" : 39211 - }, { - "relativeRanking" : 0.7, - "datum" : 39508 - }, { - "relativeRanking" : 0.75, - "datum" : 40173 - }, { - "relativeRanking" : 0.8, - "datum" : 40512 - }, { - "relativeRanking" : 0.85, - "datum" : 43173 - }, { - "relativeRanking" : 0.9, - "datum" : 48444 - }, { - "relativeRanking" : 0.95, - "datum" : 56688 - } ], - "numberValues" : 92 - }, { - "maximum" : 49391, - "minimum" : 9945, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 9945 - }, { - "relativeRanking" : 0.1, - "datum" : 10537 - }, { - "relativeRanking" : 0.15, - "datum" : 10748 - }, { - "relativeRanking" : 0.2, - "datum" : 11007 - }, { - "relativeRanking" : 0.25, - "datum" : 11929 - }, { - "relativeRanking" : 0.3, - "datum" : 12330 - }, { - "relativeRanking" : 0.35, - "datum" : 13151 - }, { - "relativeRanking" : 0.4, - "datum" : 13826 - }, { - "relativeRanking" : 0.45, - "datum" : 14120 - }, { - "relativeRanking" : 0.5, - "datum" : 15115 - }, { - "relativeRanking" : 0.55, - "datum" : 17485 - }, { - "relativeRanking" : 0.6, - "datum" : 26817 - }, { - "relativeRanking" : 0.65, - "datum" : 28878 - }, { - "relativeRanking" : 0.7, - "datum" : 32445 - }, { - "relativeRanking" : 0.75, - "datum" : 34820 - }, { - "relativeRanking" : 0.8, - "datum" : 36092 - }, { - "relativeRanking" : 0.85, - "datum" : 36563 - }, { - "relativeRanking" : 0.9, - "datum" : 37231 - }, { - "relativeRanking" : 0.95, - "datum" : 39051 - } ], - "numberValues" : 25 - }, { - "maximum" : 2236, - "minimum" : 2236, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 2236 - }, { - "relativeRanking" : 0.1, - "datum" : 2236 - }, { - "relativeRanking" : 0.15, - "datum" : 2236 - }, { - "relativeRanking" : 0.2, - "datum" : 2236 - }, { - "relativeRanking" : 0.25, - "datum" : 2236 - }, { - "relativeRanking" : 0.3, - "datum" : 2236 - }, { - "relativeRanking" : 0.35, - "datum" : 2236 - }, { - "relativeRanking" : 0.4, - "datum" : 2236 - }, { - "relativeRanking" : 0.45, - "datum" : 2236 - }, { - "relativeRanking" : 0.5, - "datum" : 2236 - }, { - "relativeRanking" : 0.55, - "datum" : 2236 - }, { - "relativeRanking" : 0.6, - "datum" : 2236 - }, { - "relativeRanking" : 0.65, - "datum" : 2236 - }, { - "relativeRanking" : 0.7, - "datum" : 2236 - }, { - "relativeRanking" : 0.75, - "datum" : 2236 - }, { - "relativeRanking" : 0.8, - "datum" : 2236 - }, { - "relativeRanking" : 0.85, - "datum" : 2236 - }, { - "relativeRanking" : 0.9, - "datum" : 2236 - }, { - "relativeRanking" : 0.95, - "datum" : 2236 - } ], - "numberValues" : 1 - } ], - "failedMapAttemptCDFs" : [ { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, { - "maximum" : 18592, - "minimum" : 18416, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 18416 - }, { - "relativeRanking" : 0.1, - "datum" : 18416 - }, { - "relativeRanking" : 0.15, - "datum" : 18416 - }, { - "relativeRanking" : 0.2, - "datum" : 18416 - }, { - "relativeRanking" : 0.25, - "datum" : 18416 - }, { - "relativeRanking" : 0.3, - "datum" : 18416 - }, { - "relativeRanking" : 0.35, - "datum" : 18416 - }, { - "relativeRanking" : 0.4, - "datum" : 18416 - }, { - "relativeRanking" : 0.45, - "datum" : 18416 - }, { - "relativeRanking" : 0.5, - "datum" : 18416 - }, { - "relativeRanking" : 0.55, - "datum" : 18416 - }, { - "relativeRanking" : 0.6, - "datum" : 18416 - }, { - "relativeRanking" : 0.65, - "datum" : 18416 - }, { - "relativeRanking" : 0.7, - "datum" : 18584 - }, { - "relativeRanking" : 0.75, - "datum" : 18584 - }, { - "relativeRanking" : 0.8, - "datum" : 18584 - }, { - "relativeRanking" : 0.85, - "datum" : 18584 - }, { - "relativeRanking" : 0.9, - "datum" : 18584 - }, { - "relativeRanking" : 0.95, - "datum" : 18584 - } ], - "numberValues" : 3 - } ], - "successfulReduceAttemptCDF" : { - "maximum" : 96910, - "minimum" : 95583, - "rankings" : [ { - "relativeRanking" : 0.05, - "datum" : 95583 - }, { - "relativeRanking" : 0.1, - "datum" : 95751 - }, { - "relativeRanking" : 0.15, - "datum" : 96010 - }, { - "relativeRanking" : 0.2, - "datum" : 96013 - }, { - "relativeRanking" : 0.25, - "datum" : 96080 - }, { - "relativeRanking" : 0.3, - "datum" : 96128 - }, { - "relativeRanking" : 0.35, - "datum" : 96164 - }, { - "relativeRanking" : 0.4, - "datum" : 96227 - }, { - "relativeRanking" : 0.45, - "datum" : 96275 - }, { - "relativeRanking" : 0.5, - "datum" : 96290 - }, { - "relativeRanking" : 0.55, - "datum" : 96301 - }, { - "relativeRanking" : 0.6, - "datum" : 96320 - }, { - "relativeRanking" : 0.65, - "datum" : 96346 - }, { - "relativeRanking" : 0.7, - "datum" : 96412 - }, { - "relativeRanking" : 0.75, - "datum" : 96428 - }, { - "relativeRanking" : 0.8, - "datum" : 96530 - }, { - "relativeRanking" : 0.85, - "datum" : 96549 - }, { - "relativeRanking" : 0.9, - "datum" : 96658 - }, { - "relativeRanking" : 0.95, - "datum" : 96670 - } ], - "numberValues" : 28 - }, - "failedReduceAttemptCDF" : { - "maximum" : 9223372036854775807, - "minimum" : -9223372036854775808, - "rankings" : [ ], - "numberValues" : 0 - }, - "mapperTriesToSucceed" : [ 0.6567164179104478, 0.3283582089552239, 0.014925373134328358 ], - "failedMapperFraction" : 0.0, - "relativeTime" : 0, - "queue" : null, - "clusterMapMB" : -1, - "clusterReduceMB" : -1, - "jobMapMB" : -1, - "jobReduceMB" : -1 -} diff --git a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java b/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java deleted file mode 100644 index 7a86a0c2357..00000000000 --- a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestJobTrackerPlugins.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.util.ServicePlugin; -import org.junit.Test; - -public class TestJobTrackerPlugins extends TestCase { - - static class FakeServicePlugin implements ServicePlugin { - - private static FakeServicePlugin instance; - - public static FakeServicePlugin getInstance() { - return instance; - } - - private Object service; - private boolean stopped; - - public Object getService() { - return service; - } - - public boolean isStopped() { - return stopped; - } - - public FakeServicePlugin() { - // store static reference to instance so we can retrieve it in the test - instance = this; - } - - @Override - public void start(Object service) { - this.service = service; - } - - @Override - public void stop() { - stopped = true; - } - - @Override - public void close() throws IOException { - } - } - - @Test - public void test() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setClass(JTConfig.JT_PLUGINS, FakeServicePlugin.class, - ServicePlugin.class); - - assertNull("Plugin not created", FakeServicePlugin.getInstance()); - - JobTracker jobTracker = JobTracker.startTracker(conf); - assertNotNull("Plugin created", FakeServicePlugin.getInstance()); - assertSame("Service is jobTracker", - FakeServicePlugin.getInstance().getService(), jobTracker); - assertFalse("Plugin not stopped", - FakeServicePlugin.getInstance().isStopped()); - - jobTracker.close(); - assertTrue("Plugin stopped", FakeServicePlugin.getInstance().isStopped()); - } - -} diff --git a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java b/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java deleted file mode 100644 index 99035b808ce..00000000000 --- a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestLostTaskTracker.java +++ /dev/null @@ -1,113 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.mockito.Mockito.*; - -import java.io.IOException; - -import junit.framework.TestCase; - -import org.apache.hadoop.mapred.UtilsForTests.FakeClock; -import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig; -import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker; -import org.hamcrest.Matcher; -import org.mockito.ArgumentCaptor; -import org.mockito.ArgumentMatcher; - -/** - * Tests that trackers that don't heartbeat within a given time are considered - * lost. Note that this test is not a direct replacement for - * {@link TestLostTracker} since it doesn't test that a task - * running on a lost tracker is retried on another tracker. - */ -@SuppressWarnings("deprecation") -public class TestLostTaskTracker extends TestCase { - - private JobTracker jobTracker; - - private FakeClock clock; - - @Override - protected void setUp() throws Exception { - JobConf conf = new JobConf(); - conf.set(JTConfig.JT_IPC_ADDRESS, "localhost:0"); - conf.set(JTConfig.JT_HTTP_ADDRESS, "0.0.0.0:0"); - conf.setLong(JTConfig.JT_TRACKER_EXPIRY_INTERVAL, 1000); - clock = new FakeClock(); - // We use a "partial mock" of JobTracker which lets us see when certain - // methods are called. If we were writing JobTracker from scratch then - // we would make it call another object which we would mock out instead - // (and use a real JobTracker) so we could perform assertions on the mock. - // See http://mockito.googlecode.com/svn/branches/1.8.0/javadoc/org/mockito/Mockito.html#16 - jobTracker = spy(new JobTracker(conf, clock)); - } - - public void testLostTaskTrackerCalledAfterExpiryTime() throws IOException { - - String tracker1 = "tracker_tracker1:1000"; - String tracker2 = "tracker_tracker2:1000"; - - establishFirstContact(tracker1); - - // Wait long enough for tracker1 to be considered lost - // We could have used a Mockito stub here, except we don't know how many - // times JobTracker calls getTime() on the clock, so a static mock - // is appropriate. - clock.advance(8 * 1000); - - establishFirstContact(tracker2); - - jobTracker.checkExpiredTrackers(); - - // Now we check that JobTracker's lostTaskTracker() was called for tracker1 - // but not for tracker2. - - // We use an ArgumentCaptor to capture the task tracker object - // in the lostTaskTracker() call, so we can perform an assertion on its - // name. (We could also have used a custom matcher, see below.) - // See http://mockito.googlecode.com/svn/branches/1.8.0/javadoc/org/mockito/Mockito.html#15 - ArgumentCaptor argument = - ArgumentCaptor.forClass(TaskTracker.class); - - verify(jobTracker).lostTaskTracker(argument.capture()); - assertEquals(tracker1, argument.getValue().getTrackerName()); - - // Check tracker2 was not lost by using the never() construct - // We use a custom Hamcrest matcher to check that it was indeed tracker2 - // that didn't match (since tracker1 did match). - // See http://mockito.googlecode.com/svn/branches/1.8.0/javadoc/org/mockito/Mockito.html#3 - verify(jobTracker, never()).lostTaskTracker( - argThat(taskTrackerWithName(tracker2))); - } - - private Matcher taskTrackerWithName(final String name) { - return new ArgumentMatcher() { - public boolean matches(Object taskTracker) { - return name.equals(((TaskTracker) taskTracker).getTrackerName()); - } - }; - } - - private void establishFirstContact(String tracker) throws IOException { - TaskTrackerStatus status = new TaskTrackerStatus(tracker, - JobInProgress.convertTrackerNameToHostName(tracker)); - jobTracker.heartbeat(status, false, true, false, (short) 0); - } - -} \ No newline at end of file diff --git a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java b/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java deleted file mode 100644 index 7e7235c1a85..00000000000 --- a/hadoop-mapreduce-project/src/test/unit/org/apache/hadoop/mapred/TestTaskTrackerDirectories.java +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.mapred; - -import static org.junit.Assert.*; - -import java.io.File; -import java.io.IOException; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RawLocalFileSystem; -import org.apache.hadoop.mapreduce.MRConfig; -import org.junit.Test; -import org.junit.Before; - -/** - * Tests for the correct behavior of the TaskTracker starting up with - * respect to its local-disk directories. - */ -public class TestTaskTrackerDirectories { - private final String TEST_DIR = new File("build/test/testmapredlocaldir") - .getAbsolutePath(); - - @Before - public void deleteTestDir() throws IOException { - FileUtil.fullyDelete(new File(TEST_DIR)); - assertFalse("Could not delete " + TEST_DIR, - new File(TEST_DIR).exists()); - } - - @Test - public void testCreatesLocalDirs() throws Exception { - Configuration conf = new Configuration(); - String[] dirs = new String[] { - TEST_DIR + "/local1", - TEST_DIR + "/local2" - }; - - conf.setStrings(MRConfig.LOCAL_DIR, dirs); - setupTaskController(conf); - - for (String dir : dirs) { - checkDir(dir); - } - } - - @Test - public void testFixesLocalDirPermissions() throws Exception { - Configuration conf = new Configuration(); - String[] dirs = new String[] { - TEST_DIR + "/badperms" - }; - - new File(dirs[0]).mkdirs(); - FileUtil.chmod(dirs[0], "000"); - - conf.setStrings(MRConfig.LOCAL_DIR, dirs); - setupTaskController(conf); - - for (String dir : dirs) { - checkDir(dir); - } - } - - @Test - public void testCreatesLogDir() throws Exception { - File dir = TaskLog.getUserLogDir(); - FileUtil.fullyDelete(dir); - - setupTaskController(new Configuration()); - - checkDir(dir.getAbsolutePath()); - } - - /** - * If the log dir can't be created, the TT should fail to start since - * it will be unable to localize or run tasks. - */ - @Test - public void testCantCreateLogDir() throws Exception { - File dir = TaskLog.getUserLogDir(); - FileUtil.fullyDelete(dir); - assertTrue("Making file in place of log dir", - dir.createNewFile()); - - try { - setupTaskController(new Configuration()); - fail("Didn't throw!"); - } catch (IOException ioe) { - System.err.println("Got expected exception"); - ioe.printStackTrace(System.out); - } - } - - @Test - public void testFixesLogDirPermissions() throws Exception { - File dir = TaskLog.getUserLogDir(); - FileUtil.fullyDelete(dir); - dir.mkdirs(); - FileUtil.chmod(dir.getAbsolutePath(), "000"); - - setupTaskController(new Configuration()); - - checkDir(dir.getAbsolutePath()); - } - - private void setupTaskController(Configuration conf) throws IOException { - TaskController tc = new DefaultTaskController(); - tc.setConf(conf); - tc.setup(); - } - - private void checkDir(String dir) throws IOException { - FileSystem fs = RawLocalFileSystem.get(new Configuration()); - File f = new File(dir); - assertTrue(dir + "should exist", f.exists()); - FileStatus stat = fs.getFileStatus(new Path(dir)); - assertEquals(dir + " has correct permissions", - 0755, stat.getPermission().toShort()); - } -} diff --git a/hadoop-mapreduce-project/src/tools/org/apache/hadoop/fs/package-info.java b/hadoop-mapreduce-project/src/tools/org/apache/hadoop/fs/package-info.java deleted file mode 100644 index 67ca9584e8d..00000000000 --- a/hadoop-mapreduce-project/src/tools/org/apache/hadoop/fs/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Hadoop filesystem classes for MapReduce. - */ -package org.apache.hadoop.fs; diff --git a/hadoop-mapreduce-project/src/webapps/job/analysejobhistory.jsp b/hadoop-mapreduce-project/src/webapps/job/analysejobhistory.jsp deleted file mode 100644 index 33a5828e9a3..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/analysejobhistory.jsp +++ /dev/null @@ -1,254 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.fs.*" - import="org.apache.hadoop.util.*" - import="java.text.SimpleDateFormat" - import="org.apache.hadoop.mapreduce.jobhistory.*" -%> - -<%! private static SimpleDateFormat dateFormat - = new SimpleDateFormat("d/MM HH:mm:ss") ; -%> -<%! private static final long serialVersionUID = 1L; -%> - - -<% - String logFile = request.getParameter("logFile"); - String numTasks = request.getParameter("numTasks"); - int showTasks = 10 ; - if (numTasks != null) { - showTasks = Integer.parseInt(numTasks); - } - FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - JobTracker jobTracker = (JobTracker) application.getAttribute("job.tracker"); - JobHistoryParser.JobInfo job = JSPUtil.checkAccessAndGetJobInfo(request, - response, jobTracker, fs, new Path(logFile)); - if (job == null) { - return; - } -%> -

Hadoop Job <%=job.getJobId() %>

-User : <%=HtmlQuoting.quoteHtmlChars(job.getUsername()) %>
-JobName : <%=HtmlQuoting.quoteHtmlChars(job.getJobname()) %>
-JobConf : <%=job.getJobConfPath() %>
-Submitted At : <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getSubmitTime(), 0 ) %>
-Launched At : <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLaunchTime(), job.getSubmitTime()) %>
-Finished At : <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getFinishTime(), job.getLaunchTime()) %>
-Status : <%= ((job.getJobStatus() == null)?"Incomplete" :job.getJobStatus()) %>
-
-
-<% - if (!JobStatus.getJobRunState(JobStatus.SUCCEEDED).equals(job.getJobStatus())) { - out.print("

No Analysis available as job did not finish

"); - return; - } - - HistoryViewer.AnalyzedJob avg = new HistoryViewer.AnalyzedJob(job); - JobHistoryParser.TaskAttemptInfo [] mapTasks = avg.getMapTasks(); - JobHistoryParser.TaskAttemptInfo [] reduceTasks = avg.getReduceTasks(); - - Comparator cMap = - new Comparator() { - public int compare(JobHistoryParser.TaskAttemptInfo t1, - JobHistoryParser.TaskAttemptInfo t2) { - long l1 = t1.getFinishTime() - t1.getStartTime(); - long l2 = t2.getFinishTime() - t2.getStartTime(); - return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); - } - }; - - Comparator cShuffle = - new Comparator() { - public int compare(JobHistoryParser.TaskAttemptInfo t1, - JobHistoryParser.TaskAttemptInfo t2) { - long l1 = t1.getShuffleFinishTime() - t1.getStartTime(); - long l2 = t2.getShuffleFinishTime() - t2.getStartTime(); - return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); - } - }; - - Comparator cFinishShuffle = - new Comparator() { - public int compare(JobHistoryParser.TaskAttemptInfo t1, - JobHistoryParser.TaskAttemptInfo t2) { - long l1 = t1.getShuffleFinishTime(); - long l2 = t2.getShuffleFinishTime(); - return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); - } - }; - - Comparator cFinishMapRed = - new Comparator() { - public int compare(JobHistoryParser.TaskAttemptInfo t1, - JobHistoryParser.TaskAttemptInfo t2) { - long l1 = t1.getFinishTime(); - long l2 = t2.getFinishTime(); - return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); - } - }; - - Comparator cReduce = - new Comparator() { - public int compare(JobHistoryParser.TaskAttemptInfo t1, - JobHistoryParser.TaskAttemptInfo t2) { - long l1 = t1.getFinishTime() - - t1.getShuffleFinishTime(); - long l2 = t2.getFinishTime() - - t2.getShuffleFinishTime(); - return (l2 < l1 ? -1 : (l2 == l1 ? 0 : 1)); - } - }; - - if (mapTasks == null || mapTasks.length <= 0) return; - Arrays.sort(mapTasks, cMap); - JobHistoryParser.TaskAttemptInfo minMap = mapTasks[mapTasks.length-1] ; -%> - -

Time taken by best performing Map task - -<%=minMap.getAttemptId().getTaskID() %> : <%=StringUtils.formatTimeDiff(minMap.getFinishTime(), minMap.getStartTime() ) %>

-

Average time taken by Map tasks: -<%=StringUtils.formatTimeDiff(avg.getAvgMapTime(), 0) %>

-

Worse performing map tasks

- - -<% - for (int i=0;i - - - - -<% - } -%> -
Task IdTime taken
- <%=mapTasks[i].getAttemptId().getTaskID() %><%=StringUtils.formatTimeDiff(mapTasks[i].getFinishTime(), mapTasks[i].getStartTime()) %>
-<% - Arrays.sort(mapTasks, cFinishMapRed); - JobHistoryParser.TaskAttemptInfo lastMap = mapTasks[0] ; -%> - -

The last Map task -<%=lastMap.getAttemptId().getTaskID() %> -finished at (relative to the Job launch time): -<%=StringUtils.getFormattedTimeWithDiff(dateFormat, - lastMap.getFinishTime(), - job.getLaunchTime()) %>

-
- -<% - if (reduceTasks.length <= 0) return; - Arrays.sort(reduceTasks, cShuffle); - JobHistoryParser.TaskAttemptInfo minShuffle = reduceTasks[reduceTasks.length-1] ; -%> -

Time taken by best performing shuffle -<%=minShuffle.getAttemptId().getTaskID()%> : -<%=StringUtils.formatTimeDiff(minShuffle.getShuffleFinishTime(), - minShuffle.getStartTime() ) %>

-

Average time taken by Shuffle: -<%=StringUtils.formatTimeDiff(avg.getAvgShuffleTime(), 0) %>

-

Worse performing Shuffle(s)

- - -<% - for (int i=0;i - - - - -<% - } -%> -
Task IdTime taken
-<%=reduceTasks[i].getAttemptId().getTaskID() %><%= - StringUtils.formatTimeDiff( - reduceTasks[i].getShuffleFinishTime(), - reduceTasks[i].getStartTime()) %> -
-<% - Arrays.sort(reduceTasks, cFinishShuffle); - JobHistoryParser.TaskAttemptInfo lastShuffle = reduceTasks[0] ; -%> - -

The last Shuffle -<%=lastShuffle.getAttemptId().getTaskID()%> - finished at (relative to the Job launch time): -<%=StringUtils.getFormattedTimeWithDiff(dateFormat, - lastShuffle.getShuffleFinishTime(), - job.getLaunchTime() ) %>

- -<% - Arrays.sort(reduceTasks, cReduce); - JobHistoryParser.TaskAttemptInfo minReduce = reduceTasks[reduceTasks.length-1] ; -%> -
-

Time taken by best performing Reduce task : - -<%=minReduce.getAttemptId().getTaskID() %> : -<%=StringUtils.formatTimeDiff(minReduce.getFinishTime(), - minReduce.getShuffleFinishTime() ) %>

- -

Average time taken by Reduce tasks: -<%=StringUtils.formatTimeDiff(avg.getAvgReduceTime(), 0) %>

-

Worse performing reduce tasks

- - -<% - for (int i=0;i - - - - -<% - } -%> -
Task IdTime taken
- <%=reduceTasks[i].getAttemptId().getTaskID() %><%=StringUtils.formatTimeDiff( - reduceTasks[i].getFinishTime(), - reduceTasks[i].getShuffleFinishTime()) %>
-<% - Arrays.sort(reduceTasks, cFinishMapRed); - JobHistoryParser.TaskAttemptInfo lastReduce = reduceTasks[0] ; -%> - -

The last Reduce task -<%=lastReduce.getAttemptId().getTaskID()%> - finished at (relative to the Job launch time): -<%=StringUtils.getFormattedTimeWithDiff(dateFormat, - lastReduce.getFinishTime(), - job.getLaunchTime() ) %>

-
- diff --git a/hadoop-mapreduce-project/src/webapps/job/index.html b/hadoop-mapreduce-project/src/webapps/job/index.html deleted file mode 100644 index a20a7e4f696..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/index.html +++ /dev/null @@ -1,35 +0,0 @@ - - - - -Hadoop Administration - - - - -

Hadoop Administration

- - - - - - diff --git a/hadoop-mapreduce-project/src/webapps/job/job_authorization_error.jsp b/hadoop-mapreduce-project/src/webapps/job/job_authorization_error.jsp deleted file mode 100644 index 9e163103114..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/job_authorization_error.jsp +++ /dev/null @@ -1,53 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.net.URL" - import="org.apache.hadoop.util.*" -%> -<%! private static final long serialVersionUID = 1L; -%> - - - - -Error: User cannot access this Job - - -

Error: User cannot do this operation on this Job


- -<% - String errorMsg = (String) request.getAttribute("error.msg"); -%> - - -<% - out.println(errorMsg); -%> - - -
- -<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobblacklistedtrackers.jsp b/hadoop-mapreduce-project/src/webapps/job/jobblacklistedtrackers.jsp deleted file mode 100644 index 6b47ded6073..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobblacklistedtrackers.jsp +++ /dev/null @@ -1,92 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.util.*" -%> -<%! private static final long serialVersionUID = 1L; -%> - -<% - JobTracker tracker = (JobTracker) application.getAttribute( - "job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); -%> -<%! - private void printBlackListedTrackers(JspWriter out, - JobInProgress job) throws IOException { - Map trackerErrors = job.getTaskTrackerErrors(); - out.print(""); - out.print("\n"); - int maxErrorsPerTracker = job.getJobConf().getMaxTaskFailuresPerTracker(); - for (Map.Entry e : trackerErrors.entrySet()) { - if (e.getValue().intValue() >= maxErrorsPerTracker) { - out.print("\n"); - } - } - out.print("
TaskTrackerNo. of Failures
" + HtmlQuoting.quoteHtmlChars(e.getKey()) + - "" + e.getValue() + "
\n"); - } -%> - -<% - String jobId = request.getParameter("jobid"); - if (jobId == null) { - out.println("

Missing 'jobid' for fetching black-listed tasktrackers!

"); - return; - } - - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob(tracker, - JobID.forName(jobId), request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - if (job == null) { - out.print("Job " + jobId + " not found.
\n"); - return; - } -%> - - - -Hadoop <%=jobId%>'s black-listed tasktrackers - -

Hadoop <%=jobId%> - -Black-listed task-trackers

- -<% - printBlackListedTrackers(out, job); -%> - -
-Go back to <%=jobId%>
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobconf.jsp b/hadoop-mapreduce-project/src/webapps/job/jobconf.jsp deleted file mode 100644 index 8c157006f4d..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobconf.jsp +++ /dev/null @@ -1,95 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.net.URL" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.util.*" -%> -<%! private static final long serialVersionUID = 1L; -%> - - -<% - final JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - final String jobId = request.getParameter("jobid"); - if (jobId == null) { - out.println("

Missing 'jobid' for fetching job configuration!

"); - return; - } -%> - - - -Job Configuration: JobId - <%= jobId %> - - -

Job Configuration: JobId - <%= jobId %>


- -<% - - final JobID jobIdObj = JobID.forName(jobId); - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob(tracker, - jobIdObj, request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - // redirect to history page if it cannot be found in memory - if (job == null) { - String historyFile = tracker.getJobHistory().getHistoryFilePath(jobIdObj); - if (historyFile == null) { - out.println("

Job " + jobId + " not known!

"); - return; - } - String historyUrl = "/jobconf_history.jsp?logFile=" + historyFile; - response.sendRedirect(response.encodeRedirectURL(historyUrl)); - return; - } - - String jobFilePath = tracker.getLocalJobFilePath(JobID.forName(jobId)); - FileInputStream jobFile = null; - try { - jobFile = new FileInputStream(jobFilePath); - JobConf jobConf = new JobConf(jobFilePath); - XMLUtils.transform( - jobConf.getConfResourceAsInputStream("webapps/static/jobconf.xsl"), - jobFile, out); - } catch (Exception e) { - out.println("Failed to retreive job configuration for job '" + jobId + "!"); - out.println(e); - } finally { - if (jobFile != null) { - try { - jobFile.close(); - } catch (IOException e) {} - } - } -%> - -
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobconf_history.jsp b/hadoop-mapreduce-project/src/webapps/job/jobconf_history.jsp deleted file mode 100644 index a7879961941..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobconf_history.jsp +++ /dev/null @@ -1,93 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.fs.*" - import="org.apache.hadoop.util.*" - import="org.apache.hadoop.mapreduce.jobhistory.*" - import="org.apache.hadoop.mapreduce.JobACL" - import="org.apache.hadoop.security.UserGroupInformation" - import="org.apache.hadoop.security.authorize.AccessControlList" - import="org.apache.hadoop.security.AccessControlException" -%> - -<%! private static final long serialVersionUID = 1L; -%> - -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - - String logFileString = request.getParameter("logFile"); - if (logFileString == null) { - out.println("

Missing 'logFile' for fetching job configuration!

"); - return; - } - - Path logFile = new Path(logFileString); - String jobId = JobHistory.getJobIDFromHistoryFilePath(logFile).toString(); - -%> - - - - -Job Configuration: JobId - <%= jobId %> - - -

Job Configuration: JobId - <%= jobId %>


- -<% - Path jobFilePath = JSPUtil.getJobConfFilePath(logFile); - FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - FSDataInputStream jobFile = null; - try { - jobFile = fs.open(jobFilePath); - JobConf jobConf = new JobConf(jobFilePath); - JobTracker jobTracker = (JobTracker) application.getAttribute("job.tracker"); - - JobHistoryParser.JobInfo job = JSPUtil.checkAccessAndGetJobInfo(request, - response, jobTracker, fs, logFile); - if (job == null) { - return; - } - - XMLUtils.transform( - jobConf.getConfResourceAsInputStream("webapps/static/jobconf.xsl"), - jobFile, out); - } catch (Exception e) { - out.println("Failed to retreive job configuration for job '" + jobId + "!"); - out.println(e); - } finally { - if (jobFile != null) { - try { - jobFile.close(); - } catch (IOException e) {} - } - } -%> - -
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobdetails.jsp b/hadoop-mapreduce-project/src/webapps/job/jobdetails.jsp deleted file mode 100644 index f39e9e96978..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobdetails.jsp +++ /dev/null @@ -1,493 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.text.*" - import="java.util.*" - import="java.text.DecimalFormat" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.mapreduce.TaskType" - import="org.apache.hadoop.util.*" - import="org.apache.hadoop.fs.Path" - import="org.apache.hadoop.mapreduce.jobhistory.JobHistory" - import="org.apache.hadoop.mapreduce.JobACL" - import="org.apache.hadoop.security.UserGroupInformation" - import="java.security.PrivilegedExceptionAction" - import="org.apache.hadoop.security.AccessControlException" - import="org.apache.hadoop.security.authorize.AccessControlList" -%> - -<%! private static final long serialVersionUID = 1L; -%> - -<% - final JobTracker tracker = (JobTracker) application.getAttribute( - "job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); -%> -<%! - private void printTaskSummary(JspWriter out, - String jobId, - String kind, - double completePercent, - TaskInProgress[] tasks - ) throws IOException { - int totalTasks = tasks.length; - int runningTasks = 0; - int finishedTasks = 0; - int killedTasks = 0; - int failedTaskAttempts = 0; - int killedTaskAttempts = 0; - for(int i=0; i < totalTasks; ++i) { - TaskInProgress task = tasks[i]; - if (task.isComplete()) { - finishedTasks += 1; - } else if (task.isRunning()) { - runningTasks += 1; - } else if (task.wasKilled()) { - killedTasks += 1; - } - failedTaskAttempts += task.numTaskFailures(); - killedTaskAttempts += task.numKilledTasks(); - } - int pendingTasks = totalTasks - runningTasks - killedTasks - finishedTasks; - out.print("" + kind + - "" + - StringUtils.formatPercent(completePercent, 2) + - ServletUtil.percentageGraph((int)(completePercent * 100), 80) + - "" + - totalTasks + - "" + - ((pendingTasks > 0) - ? "" + pendingTasks + "" - : "0") + - "" + - ((runningTasks > 0) - ? "" + runningTasks + "" - : "0") + - "" + - ((finishedTasks > 0) - ?"" + finishedTasks + "" - : "0") + - "" + - ((killedTasks > 0) - ?"" + killedTasks + "" - : "0") + - "" + - ((failedTaskAttempts > 0) ? - ("" + failedTaskAttempts + - "") : - "0" - ) + - " / " + - ((killedTaskAttempts > 0) ? - ("" + killedTaskAttempts + - "") : - "0" - ) + - "\n"); - } - - private void printJobLevelTaskSummary(JspWriter out, - String jobId, - String kind, - TaskInProgress[] tasks - ) throws IOException { - int totalTasks = tasks.length; - int runningTasks = 0; - int finishedTasks = 0; - int killedTasks = 0; - for(int i=0; i < totalTasks; ++i) { - TaskInProgress task = tasks[i]; - if (task.isComplete()) { - finishedTasks += 1; - } else if (task.isRunning()) { - runningTasks += 1; - } else if (task.isFailed()) { - killedTasks += 1; - } - } - int pendingTasks = totalTasks - runningTasks - killedTasks - finishedTasks; - out.print(((runningTasks > 0) - ? "" + " Running" + - "" - : ((pendingTasks > 0) ? " Pending" : - ((finishedTasks > 0) - ?"" + " Successful" - + "" - : ((killedTasks > 0) - ?"" + " Failed" - + "" : "None"))))); - } - - private void printConfirm(JspWriter out, String jobId) throws IOException{ - String url = "jobdetails.jsp?jobid=" + jobId; - out.print("" - + "

Are you sure you want to kill " + jobId - + " ?


" - + "
" - + "" - + "" - + "
" - + "
"); - } - -%> -<% - String jobId = request.getParameter("jobid"); - String refreshParam = request.getParameter("refresh"); - if (jobId == null) { - out.println("

Missing 'jobid'!

"); - return; - } - - int refresh = 60; // refresh every 60 seconds by default - if (refreshParam != null) { - try { - refresh = Integer.parseInt(refreshParam); - } - catch (NumberFormatException ignored) { - } - } - - final JobID jobIdObj = JobID.forName(jobId); - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob(tracker, jobIdObj, - request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - - final String newPriority = request.getParameter("prio"); - String user = request.getRemoteUser(); - UserGroupInformation ugi = null; - if (user != null) { - ugi = UserGroupInformation.createRemoteUser(user); - } - String action = request.getParameter("action"); - if(JSPUtil.privateActionsAllowed(tracker.conf) && - "changeprio".equalsIgnoreCase(action) - && request.getMethod().equalsIgnoreCase("POST")) { - if (ugi != null) { - try { - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException{ - - // checks job modify permission - tracker.setJobPriority(jobIdObj, - JobPriority.valueOf(newPriority)); - return null; - } - }); - } catch(AccessControlException e) { - String errMsg = "User " + user + " failed to modify priority of " + - jobIdObj + "!

" + e.getMessage() + - "
Go back to Job
"; - JSPUtil.setErrorAndForward(errMsg, request, response); - return; - } - } - else {// no authorization needed - tracker.setJobPriority(jobIdObj, - JobPriority.valueOf(newPriority));; - } - } - - if(JSPUtil.privateActionsAllowed(tracker.conf)) { - action = request.getParameter("action"); - if(action!=null && action.equalsIgnoreCase("confirm")) { - printConfirm(out, jobId); - return; - } - else if(action != null && action.equalsIgnoreCase("kill") && - request.getMethod().equalsIgnoreCase("POST")) { - if (ugi != null) { - try { - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException{ - - // checks job modify permission - tracker.killJob(jobIdObj);// checks job modify permission - return null; - } - }); - } catch(AccessControlException e) { - String errMsg = "User " + user + " failed to kill " + jobIdObj + - "!

" + e.getMessage() + - "
Go back to Job
"; - JSPUtil.setErrorAndForward(errMsg, request, response); - return; - } - } - else {// no authorization needed - tracker.killJob(jobIdObj); - } - } - } - -%> - -<%@page import="org.apache.hadoop.mapred.TaskGraphServlet"%> - - - - <% - if (refresh != 0) { - %> - - <% - } - %> -Hadoop <%=jobId%> on <%=trackerName%> - - - -

Hadoop <%=jobId%> on <%=trackerName%>

- -<% - if (job == null) { - String historyFile = tracker.getJobHistory().getHistoryFilePath(jobIdObj); - if (historyFile == null) { - out.println("

Job " + jobId + " not known!

"); - return; - } - String historyUrl = "/jobdetailshistory.jsp?jobid=" + - jobId + "&logFile=" + historyFile; - response.sendRedirect(response.encodeRedirectURL(historyUrl)); - return; - } - JobProfile profile = job.getProfile(); - JobStatus status = job.getStatus(); - int runState = status.getRunState(); - int flakyTaskTrackers = job.getNoOfBlackListedTrackers(); - out.print("User: " + - HtmlQuoting.quoteHtmlChars(profile.getUser()) + "
\n"); - out.print("Job Name: " + - HtmlQuoting.quoteHtmlChars(profile.getJobName()) + "
\n"); - out.print("Job File: " + - profile.getJobFile() + "
\n"); - out.print("Submit Host: " + - HtmlQuoting.quoteHtmlChars(job.getJobSubmitHostName()) + "
\n"); - out.print("Submit Host Address: " + - HtmlQuoting.quoteHtmlChars(job.getJobSubmitHostAddress()) + "
\n"); - - Map jobAcls = status.getJobACLs(); - JSPUtil.printJobACLs(tracker, jobAcls, out); - - out.print("Job Setup:"); - printJobLevelTaskSummary(out, jobId, "setup", - job.getTasks(TaskType.JOB_SETUP)); - out.print("
\n"); - if (runState == JobStatus.RUNNING) { - out.print("Status: Running
\n"); - out.print("Started at: " + new Date(job.getStartTime()) + "
\n"); - out.print("Running for: " + StringUtils.formatTimeDiff( - System.currentTimeMillis(), job.getStartTime()) + "
\n"); - } else { - if (runState == JobStatus.SUCCEEDED) { - out.print("Status: Succeeded
\n"); - out.print("Started at: " + new Date(job.getStartTime()) + "
\n"); - out.print("Finished at: " + new Date(job.getFinishTime()) + - "
\n"); - out.print("Finished in: " + StringUtils.formatTimeDiff( - job.getFinishTime(), job.getStartTime()) + "
\n"); - } else if (runState == JobStatus.FAILED) { - out.print("Status: Failed
\n"); - out.print("Started at: " + new Date(job.getStartTime()) + "
\n"); - out.print("Failed at: " + new Date(job.getFinishTime()) + - "
\n"); - out.print("Failed in: " + StringUtils.formatTimeDiff( - job.getFinishTime(), job.getStartTime()) + "
\n"); - } else if (runState == JobStatus.KILLED) { - out.print("Status: Killed
\n"); - out.print("Started at: " + new Date(job.getStartTime()) + "
\n"); - out.print("Killed at: " + new Date(job.getFinishTime()) + - "
\n"); - out.print("Killed in: " + StringUtils.formatTimeDiff( - job.getFinishTime(), job.getStartTime()) + "
\n"); - } - } - out.print("Job Cleanup:"); - printJobLevelTaskSummary(out, jobId, "cleanup", - job.getTasks(TaskType.JOB_CLEANUP)); - out.print("
\n"); - if (flakyTaskTrackers > 0) { - out.print("Black-listed TaskTrackers: " + - "" + - flakyTaskTrackers + "
\n"); - } - if (job.getSchedulingInfo() != null) { - out.print("Job Scheduling information: " + - job.getSchedulingInfo().toString() +"\n"); - } - out.print("
\n"); - out.print(""); - out.print("" + - "" + - "" + - "\n"); - printTaskSummary(out, jobId, "map", status.mapProgress(), - job.getTasks(TaskType.MAP)); - printTaskSummary(out, jobId, "reduce", status.reduceProgress(), - job.getTasks(TaskType.REDUCE)); - out.print("
Kind% CompleteNum TasksPendingRunningCompleteKilledFailed/Killed
Task Attempts
\n"); - - %> -

- - - - - - - - - <% - Counters mapCounters = job.getMapCounters(); - Counters reduceCounters = job.getReduceCounters(); - Counters totalCounters = job.getCounters(); - - for (String groupName : totalCounters.getGroupNames()) { - Counters.Group totalGroup = totalCounters.getGroup(groupName); - Counters.Group mapGroup = mapCounters.getGroup(groupName); - Counters.Group reduceGroup = reduceCounters.getGroup(groupName); - - Format decimal = new DecimalFormat(); - - boolean isFirst = true; - for (Counters.Counter counter : totalGroup) { - String name = counter.getDisplayName(); - String mapValue = decimal.format(mapGroup.getCounter(name)); - String reduceValue = decimal.format(reduceGroup.getCounter(name)); - String totalValue = decimal.format(counter.getCounter()); - %> - - <% - if (isFirst) { - isFirst = false; - %> - - <% - } - %> - - - - - - <% - } - } - %> -

CounterMapReduceTotal
- <%=HtmlQuoting.quoteHtmlChars(totalGroup.getDisplayName())%><%=HtmlQuoting.quoteHtmlChars(name)%><%=mapValue%><%=reduceValue%><%=totalValue%>
- -


Map Completion Graph - -<% -if("off".equals(request.getParameter("map.graph"))) { - session.setAttribute("map.graph", "off"); -} else if("on".equals(request.getParameter("map.graph"))){ - session.setAttribute("map.graph", "on"); -} -if("off".equals(request.getParameter("reduce.graph"))) { - session.setAttribute("reduce.graph", "off"); -} else if("on".equals(request.getParameter("reduce.graph"))){ - session.setAttribute("reduce.graph", "on"); -} - -if("off".equals(session.getAttribute("map.graph"))) { %> - open -<%} else { %> - close -
-<%}%> - -<%if(job.getTasks(TaskType.REDUCE).length > 0) { %> -
Reduce Completion Graph - -<%if("off".equals(session.getAttribute("reduce.graph"))) { %> - open -<%} else { %> - close - -
-<%} }%> - -
-<% if(JSPUtil.privateActionsAllowed(tracker.conf)) { %> -
- Change priority from <%=job.getPriority()%> to: -
- - -
-<% } %> - - - -<% if(JSPUtil.privateActionsAllowed(tracker.conf) - && runState == JobStatus.RUNNING) { %> -
Kill this job -<% } %> - -
- -
-Go back to JobTracker
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobdetailshistory.jsp b/hadoop-mapreduce-project/src/webapps/job/jobdetailshistory.jsp deleted file mode 100644 index 232e71b3218..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobdetailshistory.jsp +++ /dev/null @@ -1,291 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.fs.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapreduce.TaskAttemptID" - import="org.apache.hadoop.mapreduce.TaskID" - import="org.apache.hadoop.mapreduce.Counter" - import="org.apache.hadoop.mapreduce.Counters" - import="org.apache.hadoop.mapreduce.CounterGroup" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.util.*" - import="java.text.*" - import="org.apache.hadoop.mapreduce.jobhistory.*" - import="java.security.PrivilegedExceptionAction" - import="org.apache.hadoop.security.AccessControlException" - import="org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo" - import="org.apache.hadoop.mapreduce.JobACL" - import="org.apache.hadoop.security.authorize.AccessControlList" -%> -<%!private static final long serialVersionUID = 1L; -%> - -<%! static SimpleDateFormat dateFormat = new SimpleDateFormat("d-MMM-yyyy HH:mm:ss") ; %> -<% - String logFile = request.getParameter("logFile"); - String reasonforFailure = " "; - final Path jobFile = new Path(logFile); - String jobid = JobHistory.getJobIDFromHistoryFilePath(jobFile).toString(); - - final FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - final JobTracker jobTracker = (JobTracker) application.getAttribute("job.tracker"); - JobInfo job = JSPUtil.checkAccessAndGetJobInfo(request, response, - jobTracker, fs, jobFile); - if (job == null) { - return; - } - if (job.getJobStatus().equals("FAILED")) - reasonforFailure = job.getErrorInfo(); -%> - - - - -Hadoop Job <%=jobid%> on History Viewer - - - - -

Hadoop Job <%=jobid %> on History Viewer

- -User: <%=HtmlQuoting.quoteHtmlChars(job.getUsername()) %>
-JobName: <%=HtmlQuoting.quoteHtmlChars(job.getJobname()) %>
-JobConf: - <%=job.getJobConfPath() %>
-<% - Map jobAcls = job.getJobACLs(); - JSPUtil.printJobACLs(jobTracker, jobAcls, out); -%> -Submitted At: <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getSubmitTime(), 0 ) %>
-Launched At: <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLaunchTime(), job.getSubmitTime()) %>
-Finished At: <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getFinishTime(), job.getLaunchTime()) %>
-Status: <%= ((job.getJobStatus()) == null ? "Incomplete" :job.getJobStatus()) %>
-ReasonForFailure: <%=reasonforFailure %>
-<% - HistoryViewer.SummarizedJob sj = new HistoryViewer.SummarizedJob(job); -%> -Analyse This Job -
-
-
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
KindTotal Tasks(successful+failed+killed)Successful tasksFailed tasksKilled tasksStart TimeFinish Time
Setup - <%=sj.getTotalSetups()%> - <%=sj.getNumFinishedSetups()%> - <%=sj.getNumFailedSetups()%> - <%=sj.getNumKilledSetups()%><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getSetupStarted(), 0) %><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getSetupFinished(), sj.getSetupStarted()) %>
Map - <%=sj.getTotalMaps()%> - <%=job.getFinishedMaps() %> - <%=sj.getNumFailedMaps()%> - <%=sj.getNumKilledMaps()%><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getMapStarted(), 0) %><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getMapFinished(), sj.getMapStarted()) %>
Reduce - <%=sj.getTotalReduces()%> - <%=job.getFinishedReduces()%> - <%=sj.getNumFailedReduces()%> - <%=sj.getNumKilledReduces()%><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getReduceStarted(), 0) %><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getReduceFinished(), sj.getReduceStarted()) %>
Cleanup - <%=sj.getTotalCleanups()%> - <%=sj.getNumFinishedCleanups()%> - <%=sj.getNumFailedCleanups()%> - <%=sj.getNumKilledCleanups()%><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getCleanupStarted(), 0) %><%=StringUtils.getFormattedTimeWithDiff(dateFormat, sj.getCleanupFinished(), sj.getCleanupStarted()) %>
- -
-
- - - - - - - - - - -<% - - Counters totalCounters = job.getTotalCounters(); - Counters mapCounters = job.getMapCounters(); - Counters reduceCounters = job.getReduceCounters(); - - if (totalCounters != null) { - for (String groupName : totalCounters.getGroupNames()) { - CounterGroup totalGroup = totalCounters.getGroup(groupName); - CounterGroup mapGroup = mapCounters.getGroup(groupName); - CounterGroup reduceGroup = reduceCounters.getGroup(groupName); - - Format decimal = new DecimalFormat(); - - boolean isFirst = true; - Iterator ctrItr = totalGroup.iterator(); - while(ctrItr.hasNext()) { - Counter counter = ctrItr.next(); - String name = counter.getName(); - String mapValue = - decimal.format(mapGroup.findCounter(name).getValue()); - String reduceValue = - decimal.format(reduceGroup.findCounter(name).getValue()); - String totalValue = - decimal.format(counter.getValue()); -%> - -<% - if (isFirst) { - isFirst = false; -%> - -<% - } -%> - - - - - -<% - } - } - } -%> -

CounterMapReduceTotal
- <%=HtmlQuoting.quoteHtmlChars(totalGroup.getDisplayName())%><%=HtmlQuoting.quoteHtmlChars(counter.getDisplayName())%><%=mapValue%><%=reduceValue%><%=totalValue%>
-
- -
- <% - HistoryViewer.FilteredJob filter = new HistoryViewer.FilteredJob(job,TaskStatus.State.FAILED.toString()); - Map> badNodes = filter.getFilteredMap(); - if (badNodes.size() > 0) { - %> -

Failed tasks attempts by nodes

- - - <% - for (Map.Entry> entry : badNodes.entrySet()) { - String node = entry.getKey(); - Set failedTasks = entry.getValue(); -%> - - - - -<% - } - } - %> -
HostnameFailed Tasks
<%=node %> -<% - boolean firstId = true; - for (TaskID tid : failedTasks) { - if (firstId) { - firstId = false; -%> - <%=tid %> -<% - } else { -%> - , <%=tid %> -<% - } - } -%> -
-
- - <% - filter = new HistoryViewer.FilteredJob(job, TaskStatus.State.KILLED.toString()); - badNodes = filter.getFilteredMap(); - if (badNodes.size() > 0) { - %> -

Killed tasks attempts by nodes

- - - <% - for (Map.Entry> entry : badNodes.entrySet()) { - String node = entry.getKey(); - Set killedTasks = entry.getValue(); -%> - - - - -<% - } - } -%> -
HostnameKilled Tasks
<%=node %> -<% - boolean firstId = true; - for (TaskID tid : killedTasks) { - if (firstId) { - firstId = false; -%> - <%=tid %> -<% - } else { -%> - , <%=tid %> -<% - } - } -%> -
- - diff --git a/hadoop-mapreduce-project/src/webapps/job/jobfailures.jsp b/hadoop-mapreduce-project/src/webapps/job/jobfailures.jsp deleted file mode 100644 index 57530999947..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobfailures.jsp +++ /dev/null @@ -1,202 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.mapreduce.TaskType" - import="org.apache.hadoop.util.*" -%> - -<%! private static final long serialVersionUID = 1L; -%> - -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); -%> -<%! - private void printFailedAttempts(JspWriter out, - JobTracker tracker, - TaskInProgress tip, - TaskStatus.State failState) throws IOException { - TaskStatus[] statuses = tip.getTaskStatuses(); - TaskID tipId = tip.getTIPId(); - for(int i=0; i < statuses.length; ++i) { - TaskStatus.State taskState = statuses[i].getRunState(); - if ((failState == null && (taskState == TaskStatus.State.FAILED || - taskState == TaskStatus.State.KILLED)) || taskState == failState) { - String taskTrackerName = statuses[i].getTaskTracker(); - TaskTrackerStatus taskTracker = tracker.getTaskTrackerStatus(taskTrackerName); - out.print("" + statuses[i].getTaskID() + - "" + - tipId + ""); - if (taskTracker == null) { - out.print("" + taskTrackerName + ""); - } else { - out.print("" + taskTracker.getHost() + - ""); - } - out.print("" + taskState + ""); - out.print("
");
-        String[] failures = 
-                     tracker.getTaskDiagnostics(statuses[i].getTaskID());
-        if (failures == null) {
-          out.print(" ");
-        } else {
-          for(int j = 0 ; j < failures.length ; j++){
-            out.print(HtmlQuoting.quoteHtmlChars(failures[j]));
-            if (j < (failures.length - 1)) {
-              out.print("\n-------\n");
-            }
-          }
-        }
-        out.print("
"); - - out.print(""); - String taskLogUrl = null; - if (taskTracker != null) { - taskLogUrl = TaskLogServlet.getTaskLogUrl(taskTracker.getHost(), - String.valueOf(taskTracker.getHttpPort()), - statuses[i].getTaskID().toString()); - } - if (taskLogUrl != null) { - String tailFourKBUrl = taskLogUrl + "&start=-4097"; - String tailEightKBUrl = taskLogUrl + "&start=-8193"; - String entireLogUrl = taskLogUrl; - out.print("Last 4KB
"); - out.print("Last 8KB
"); - out.print("All
"); - } else { - out.print("n/a"); // task tracker was lost - } - out.print(""); - - out.print("\n"); - } - } - } - - private void printFailures(JspWriter out, - JobTracker tracker, - JobInProgress job, - String kind, - String cause) - throws IOException, InterruptedException, ServletException { - - boolean includeMap = false; - boolean includeReduce = false; - if (kind == null) { - includeMap = true; - includeReduce = true; - } else if ("map".equals(kind)) { - includeMap = true; - } else if ("reduce".equals(kind)) { - includeReduce = true; - } else if ("all".equals(kind)) { - includeMap = true; - includeReduce = true; - } else { - out.print("Kind " + kind + - " not supported.
\n"); - return; - } - - TaskStatus.State state = null; - try { - if (cause != null) { - state = TaskStatus.State.valueOf(cause.toUpperCase()); - if (state != TaskStatus.State.FAILED && state != TaskStatus.State.KILLED) { - out.print("Cause '" + cause + - "' is not an 'unsuccessful' state.
\n"); - return; - } - } - } catch (IllegalArgumentException e) { - out.print("Cause '" + cause + "' not supported.
\n"); - return; - } - - out.print(""); - out.print("" + - "\n"); - if (includeMap) { - TaskInProgress[] tips = job.getTasks(TaskType.MAP); - for(int i=0; i < tips.length; ++i) { - printFailedAttempts(out, tracker, tips[i], state); - } - } - if (includeReduce) { - TaskInProgress[] tips = job.getTasks(TaskType.REDUCE); - for(int i=0; i < tips.length; ++i) { - printFailedAttempts(out, tracker, tips[i], state); - } - } - out.print("
AttemptTaskMachineStateErrorLogs
\n"); - } -%> - -<% - String jobId = request.getParameter("jobid"); - if (jobId == null) { - out.println("

Missing 'jobid'!

"); - return; - } - JobID jobIdObj = JobID.forName(jobId); - - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob( - tracker, jobIdObj, request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - if (job == null) { - out.print("Job " + jobId + " not found.
\n"); - return; - } - - String kind = request.getParameter("kind"); - String cause = request.getParameter("cause"); -%> - - - -Hadoop <%=jobId%> failures on <%=trackerName%> - -

Hadoop <%=jobId%> -failures on <%=trackerName%>

- -<% - printFailures(out, tracker, job, kind, cause); -%> - -
-Go back to JobTracker
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobhistory.jsp b/hadoop-mapreduce-project/src/webapps/job/jobhistory.jsp deleted file mode 100644 index d9cc8e1bb53..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobhistory.jsp +++ /dev/null @@ -1,318 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="java.io.*" - import="java.util.*" - import="java.net.URLEncoder" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.util.*" - import="org.apache.hadoop.fs.*" - import="javax.servlet.jsp.*" - import="java.text.SimpleDateFormat" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapreduce.jobhistory.*" -%> - -<%! private static final long serialVersionUID = 1L; -%> - -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); -%> -<%! - private static SimpleDateFormat dateFormat = - new SimpleDateFormat("d/MM HH:mm:ss"); -%> - - - - - -<%= trackerName %> Hadoop Map/Reduce History Viewer - - - -

<%= trackerName %> Hadoop Map/Reduce - History Viewer

-
-<% - final String search = (request.getParameter("search") == null) - ? "" - : request.getParameter("search"); - - String parts[] = search.split(":"); - - final String user = (parts.length >= 1) - ? parts[0].toLowerCase() - : ""; - final String jobid = (parts.length >= 2) - ? parts[1].toLowerCase() - : ""; - final String rawUser = HtmlQuoting.unquoteHtmlChars(user); - final String rawJobid = HtmlQuoting.unquoteHtmlChars(jobid); - - PathFilter jobLogFileFilter = new PathFilter() { - private boolean matchUser(String fileName) { - // return true if - // - user is not specified - // - user matches - return "".equals(rawUser) || rawUser.equals(fileName.split("_")[3]); - } - - private boolean matchJobId(String fileName) { - // return true if - // - jobid is not specified - // - jobid matches - String[] jobDetails = fileName.split("_"); - String actualId = jobDetails[0] + "_" +jobDetails[1] + "_" + jobDetails[2] ; - return "".equals(rawJobid) || jobid.equalsIgnoreCase(actualId); - } - - public boolean accept(Path path) { - return (!(path.getName().endsWith(".xml") || - path.getName().endsWith(JobHistory.OLD_SUFFIX)) && - matchUser(path.getName()) && matchJobId(path.getName())); - } - }; - - FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - String historyLogDir = (String) application.getAttribute("historyLogDir"); - if (fs == null) { - out.println("Null file system. May be namenode is in safemode!"); - return; - } - Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(new Path(historyLogDir), - jobLogFileFilter)); - out.println(""); - if (null == jobFiles || jobFiles.length == 0) { - out.println("No files found!"); - return ; - } - - // get the pageno - int pageno = request.getParameter("pageno") == null - ? 1 - : Integer.parseInt(request.getParameter("pageno")); - - // get the total number of files to display - int size = 100; - - // if show-all is requested or jobfiles < size(100) - if (pageno == -1 || size > jobFiles.length) { - size = jobFiles.length; - } - - if (pageno == -1) { // special case 'show all' - pageno = 1; - } - - int maxPageNo = (int)Math.ceil((float)jobFiles.length / size); - - // check and fix pageno - if (pageno < 1 || pageno > maxPageNo) { - out.println("Invalid page index"); - return ; - } - - int length = size ; // determine the length of job history files to be displayed - if (pageno == maxPageNo) { - // find the number of files to be shown on the last page - int startOnLast = ((pageno - 1) * size) + 1; - length = jobFiles.length - startOnLast + 1; - } - - // Display the search box - out.println("
Filter (username:jobid) "); // heading - out.println(""); // search box - out.println("
"); - out.println("Example: 'smith' will display jobs submitted by user 'smith'. "); - out.println("Job Ids need to be prefixed with a colon(:) For example, :job_200908311030_0001 will display the job with that id. "); // example - out.println("
"); - - //Show the status - int start = (pageno - 1) * size + 1; - - // DEBUG - out.println(""); - - out.println("Available Jobs in History "); - // display the number of jobs, start index, end index - out.println("( Displaying " + length + " jobs from " + start + " to " + (start + length - 1) + " out of " + jobFiles.length + " jobs"); - if (!"".equals(user)) { - out.println(" for user " + HtmlQuoting.quoteHtmlChars(user) + ""); // show the user if present - } - if (!"".equals(jobid)) { - out.println(" for jobid " + HtmlQuoting.quoteHtmlChars(jobid) + " in it."); // show the jobid keyword if present - } - out.print(")"); - - // show the 'show-all' link - out.println(" [show all]"); - - // show the 'first-page' link - if (pageno > 1) { - out.println(" [first page]"); - } else { - out.println("[first page]"); - } - - // show the 'last-page' link - if (pageno < maxPageNo) { - out.println(" [last page]"); - } else { - out.println("[last page]"); - } - - // sort the files on creation time. - Arrays.sort(jobFiles, new Comparator() { - public int compare(Path p1, Path p2) { - String dp1 = null; - String dp2 = null; - - dp1 = p1.getName(); - dp2 = p2.getName(); - - String[] split1 = dp1.split("_"); - String[] split2 = dp2.split("_"); - - // compare job tracker start time - int res = new Date(Long.parseLong(split1[1])).compareTo( - new Date(Long.parseLong(split2[1]))); - if (res == 0) { - Long l1 = Long.parseLong(split1[2]); - res = l1.compareTo(Long.parseLong(split2[2])); - } - return res; - } - }); - - out.println("

"); - - // print the navigation info (top) - printNavigation(pageno, size, maxPageNo, search, out); - - out.print(""); - out.print(""); - out.print( "") ; - out.print(""); - - Set displayedJobs = new HashSet(); - for (int i = start - 1; i < start + length - 1; ++i) { - Path jobFile = jobFiles[i]; - - String jobId = JobHistory.getJobIDFromHistoryFilePath(jobFile).toString(); - String userName = JobHistory.getUserFromHistoryFilePath(jobFile); - - // Check if the job is already displayed. There can be multiple job - // history files for jobs that have restarted - if (displayedJobs.contains(jobId)) { - continue; - } else { - displayedJobs.add(jobId); - } - -%> -
-<% - printJob(jobId, userName, new Path(jobFile.getParent(), jobFile), - out) ; -%> -
-<% - } // end while trackers - out.print("
Job IdUser
"); - - // show the navigation info (bottom) - printNavigation(pageno, size, maxPageNo, search, out); -%> -<%! - private void printJob(String jobId, - String user, Path logFile, JspWriter out) - throws IOException { - out.print(""); - out.print("" + "" + HtmlQuoting.quoteHtmlChars(jobId) + ""); - out.print("" + HtmlQuoting.quoteHtmlChars(user) + ""); - out.print(""); - } - - private void printNavigation(int pageno, int size, int max, String search, - JspWriter out) throws IOException { - int numIndexToShow = 5; // num indexes to show on either side - - //TODO check this on boundary cases - out.print("
<"); - - // show previous link - if (pageno > 1) { - out.println("Previous"); - } - - // display the numbered index 1 2 3 4 - int firstPage = pageno - numIndexToShow; - if (firstPage < 1) { - firstPage = 1; // boundary condition - } - - int lastPage = pageno + numIndexToShow; - if (lastPage > max) { - lastPage = max; // boundary condition - } - - // debug - out.println(""); - - for (int i = firstPage; i <= lastPage; ++i) { - if (i != pageno) {// needs hyperlink - out.println(" " + i + " "); - } else { // current page - out.println(i); - } - } - - // show the next link - if (pageno < max) { - out.println("Next"); - } - out.print(">
"); - } -%> - diff --git a/hadoop-mapreduce-project/src/webapps/job/jobqueue_details.jsp b/hadoop-mapreduce-project/src/webapps/job/jobqueue_details.jsp deleted file mode 100644 index d429e785200..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobqueue_details.jsp +++ /dev/null @@ -1,107 +0,0 @@ -<%/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.util.Vector" - import="java.util.Collection" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.util.StringUtils" - import="org.apache.hadoop.util.ServletUtil" -%> -<%!private static final long serialVersionUID = 526456771152222127L;%> -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = StringUtils.simpleHostname(tracker - .getJobTrackerMachine()); - String queueName = request.getParameter("queueName"); - TaskScheduler scheduler = tracker.getTaskScheduler(); - JobQueueInfo schedInfo = tracker.getQueueInfo(queueName); -%> - - - -Queue details for <%=queueName != null ? queueName : ""%> - - - - -<% - if (!JSPUtil.processButtons(request, response, tracker)) { - return;// user is not authorized - } -%> -<% - String schedulingInfoString = schedInfo.getSchedulingInfo(); -%> -

Hadoop Job Queue Scheduling Information on - <%=trackerName%> -

-
-Scheduling Information : -<%=HtmlQuoting.quoteHtmlChars(schedulingInfoString).replaceAll("\n", "
")%> -
-
-<% - if (schedInfo.getChildren() != null && schedInfo.getChildren().size() > 0) { -%> -Child Queues : -<% - for (JobQueueInfo childQueue : schedInfo.getChildren()) { - String[] childNameSplits = childQueue.getQueueName().split(":"); - String childName = childNameSplits[childNameSplits.length -1]; -%> - - <%=childName%>    -<% - } -%> -
-<% - } else { - Collection jobs = scheduler.getJobs(queueName); - if (jobs == null || jobs.isEmpty()) { -%> -
-

No Jobs found for the Queue :: <%=queueName != null ? queueName : ""%>

-
-
-<% - } else { -%> -
-

Job Summary for the Queue :: <%=queueName != null ? queueName : ""%>

-
-
-(In the order maintained by the scheduler) -
-
-
-<%=JSPUtil.generateJobTable("Job List", jobs, 30, 0, tracker.conf)%> -
-<% - } - } -%> - -<% - out.println(ServletUtil.htmlFooter()); -%> - diff --git a/hadoop-mapreduce-project/src/webapps/job/jobtable.jsp b/hadoop-mapreduce-project/src/webapps/job/jobtable.jsp deleted file mode 100644 index 7c7fe3b8501..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobtable.jsp +++ /dev/null @@ -1,73 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page -contentType="text/html; charset=UTF-8" -import="org.apache.hadoop.mapred.*" -import="javax.servlet.*" -import="javax.servlet.http.*" -import="java.io.*" -import="java.util.*" -import="org.apache.hadoop.util.ServletUtil" -%> -<%! -private static final long serialVersionUID = 1L; -%> -<% -JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); -QueueManager qmgr = tracker.getQueueManager(); -String queue = request.getParameter("queue_name"); -TaskScheduler scheduler = tracker.getTaskScheduler(); -JobQueueInfo queueInfo = tracker.getQueueInfo(queue); -%> -<% -if(queueInfo == null || (queueInfo.getChildren() != null && - queueInfo.getChildren().size() != 0) ){ -%> -<% -} else { -%> -<% -Collection jobs = scheduler.getJobs(queue); -String[] queueLabelSplits = queue.split(":"); -String queueLabel = - queueLabelSplits.length==0?queue:queueLabelSplits[queueLabelSplits.length-1]; - -if(jobs == null || jobs.isEmpty()) { -%> -
-

No Jobs found for the QueueName:: <%=queueLabel%> -

-
-<% -}else { -%> -
-

-

Job Summary for the Queue :: <%=queueLabel%>

-(In the order maintained by the scheduler) -


-<%= - JSPUtil.generateJobTable("Job List", jobs, 30, 5, tracker.conf) -%> -
-<% -} -%> -<%} %> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobtasks.jsp b/hadoop-mapreduce-project/src/webapps/job/jobtasks.jsp deleted file mode 100644 index e9f574e0160..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobtasks.jsp +++ /dev/null @@ -1,173 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.util.*" - import="java.lang.Integer" - import="java.text.SimpleDateFormat" -%> -<%! private static final long serialVersionUID = 1L; -%> -<%! static SimpleDateFormat dateFormat = new SimpleDateFormat("d-MMM-yyyy HH:mm:ss") ; %> -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); - String jobid = request.getParameter("jobid"); - if (jobid == null) { - out.println("

Missing 'jobid'!

"); - return; - } - JobID jobidObj = JobID.forName(jobid); - - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob(tracker, jobidObj, - request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - - String type = request.getParameter("type"); - String pagenum = request.getParameter("pagenum"); - String state = request.getParameter("state"); - state = (state!=null) ? state : "all"; - int pnum = Integer.parseInt(pagenum); - int next_page = pnum+1; - int numperpage = 2000; - - TaskReport[] reports = null; - int start_index = (pnum - 1) * numperpage; - int end_index = start_index + numperpage; - int report_len = 0; - if ("map".equals(type)) { - reports = (job != null) ? tracker.getMapTaskReports(jobidObj) : null; - } else if ("reduce".equals(type)) { - reports = (job != null) ? tracker.getReduceTaskReports(jobidObj) : null; - } else if ("cleanup".equals(type)) { - reports = (job != null) ? tracker.getCleanupTaskReports(jobidObj) : null; - } else if ("setup".equals(type)) { - reports = (job != null) ? tracker.getSetupTaskReports(jobidObj) : null; - } -%> - - - - - Hadoop <%=type%> task list for <%=jobid%> on <%=trackerName%> - - - -

Hadoop <%=type%> task list for -<%=jobid%> on -<%=trackerName%>

-<% - // redirect to history page if it cannot be found in memory - if (job == null) { - JobID jobIdObj = JobID.forName(jobid); - String historyFile = tracker.getJobHistory().getHistoryFilePath(jobIdObj); - if (historyFile == null) { - out.println("

Job " + jobid + " not known!

"); - return; - } - String historyUrl = "/jobtaskshistory.jsp?logFile=" + historyFile + - "&status=" + state + "&taskType=" + type; - response.sendRedirect(response.encodeRedirectURL(historyUrl)); - return; - } - // Filtering the reports if some filter is specified - if (!"all".equals(state)) { - List filteredReports = new ArrayList(); - for (int i = 0; i < reports.length; ++i) { - if (("completed".equals(state) && reports[i].getCurrentStatus() == TIPStatus.COMPLETE) - || ("running".equals(state) && reports[i].getCurrentStatus() == TIPStatus.RUNNING) - || ("killed".equals(state) && reports[i].getCurrentStatus() == TIPStatus.KILLED) - || ("pending".equals(state) && reports[i].getCurrentStatus() == TIPStatus.PENDING)) { - filteredReports.add(reports[i]); - } - } - // using filtered reports instead of all the reports - reports = filteredReports.toArray(new TaskReport[0]); - filteredReports = null; - } - report_len = reports.length; - - if (report_len <= start_index) { - out.print("No such tasks"); - } else { - out.print("
"); - out.print("

" + Character.toUpperCase(state.charAt(0)) - + state.substring(1).toLowerCase() + " Tasks

"); - out.print("
"); - out.print(""); - out.print("" + - ""); - if (end_index > report_len){ - end_index = report_len; - } - for (int i = start_index ; i < end_index; i++) { - TaskReport report = reports[i]; - out.print(""); - out.print(""); - out.print(""); - out.println(""); - out.println(""); - String[] diagnostics = report.getDiagnostics(); - out.print(""); - out.println(""); - } - out.print("
TaskCompleteStatusStart TimeFinish TimeErrorsCounters
" + report.getTaskID() + "" + StringUtils.formatPercent(report.getProgress(),2) + - ServletUtil.percentageGraph(report.getProgress() * 100f, 80) + "" + HtmlQuoting.quoteHtmlChars(report.getState()) + "
" + StringUtils.getFormattedTimeWithDiff(dateFormat, report.getStartTime(),0) + "
" + StringUtils.getFormattedTimeWithDiff(dateFormat, - report.getFinishTime(), report.getStartTime()) + "
");
-         for (int j = 0; j < diagnostics.length ; j++) {
-             out.println(HtmlQuoting.quoteHtmlChars(diagnostics[j]));
-         }
-         out.println("

" + - "" + report.getCounters().size() + - "
"); - out.print("
"); - } - if (end_index < report_len) { - out.print(""); - } - if (start_index != 0) { - out.print(""); - } -%> - -
-Go back to JobTracker
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobtaskshistory.jsp b/hadoop-mapreduce-project/src/webapps/job/jobtaskshistory.jsp deleted file mode 100644 index 7a40fba7107..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobtaskshistory.jsp +++ /dev/null @@ -1,95 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapreduce.TaskAttemptID" - import="org.apache.hadoop.mapreduce.TaskID" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.fs.*" - import="org.apache.hadoop.util.*" - import="java.text.SimpleDateFormat" - import="org.apache.hadoop.mapreduce.jobhistory.*" -%> - -<%! - private static SimpleDateFormat dateFormat = - new SimpleDateFormat("d/MM HH:mm:ss") ; -%> -<%! private static final long serialVersionUID = 1L; -%> - -<% - String logFile = request.getParameter("logFile"); - String taskStatus = request.getParameter("status"); - String taskType = request.getParameter("taskType"); - - FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - JobTracker jobTracker = (JobTracker) application.getAttribute("job.tracker"); - JobHistoryParser.JobInfo job = JSPUtil.checkAccessAndGetJobInfo(request, - response, jobTracker, fs, new Path(logFile)); - if (job == null) { - return; - } - Map tasks = job.getAllTasks(); -%> - - - -

<%=taskStatus%> <%=taskType %> task list for <%=job.getJobId() %>

-
- - -<% - for (JobHistoryParser.TaskInfo task : tasks.values()) { - if (taskType.equalsIgnoreCase(task.getTaskType().toString())) { - Map taskAttempts = task.getAllTaskAttempts(); - for (JobHistoryParser.TaskAttemptInfo taskAttempt : taskAttempts.values()) { - if (taskStatus.equals(taskAttempt.getTaskStatus()) || - taskStatus.equalsIgnoreCase("all")){ - printTask(logFile, taskAttempt, out); - } - } - } - } -%> -
Task IdStart TimeFinish Time
Error
-<%! - private void printTask(String logFile, - JobHistoryParser.TaskAttemptInfo attempt, JspWriter out) throws IOException{ - out.print(""); - out.print("" + "" + - attempt.getAttemptId().getTaskID() + ""); - out.print("" + StringUtils.getFormattedTimeWithDiff(dateFormat, - attempt.getStartTime(), 0 ) + ""); - out.print("" + StringUtils.getFormattedTimeWithDiff(dateFormat, - attempt.getFinishTime(), - attempt.getStartTime() ) + ""); - out.print(""+ HtmlQuoting.quoteHtmlChars(attempt.getError()) +""); - out.print(""); - } -%> -
- - diff --git a/hadoop-mapreduce-project/src/webapps/job/jobtracker.jsp b/hadoop-mapreduce-project/src/webapps/job/jobtracker.jsp deleted file mode 100644 index 2b7651813fb..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobtracker.jsp +++ /dev/null @@ -1,185 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.lang.management.MemoryUsage" - import="java.lang.management.MemoryMXBean" - import="java.lang.management.ManagementFactory" - import="java.util.*" - import="java.text.DecimalFormat" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapreduce.*" - import="org.apache.hadoop.util.*" -%> -<%! private static final long serialVersionUID = 1L; -%> -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - ClusterStatus status = tracker.getClusterStatus(); - MemoryMXBean mem = ManagementFactory.getMemoryMXBean(); - ClusterMetrics metrics = tracker.getClusterMetrics(); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); - JobQueueInfo[] queues = tracker.getJobQueues(); - List runningJobs = tracker.getRunningJobs(); - List completedJobs = tracker.getCompletedJobs(); - List failedJobs = tracker.getFailedJobs(); -%> -<%! - private static DecimalFormat percentFormat = new DecimalFormat("##0.00"); - - public void generateSummaryTable(JspWriter out, ClusterMetrics metrics, - JobTracker tracker) throws IOException { - String tasksPerNode = metrics.getTaskTrackerCount() > 0 ? - percentFormat.format(((double)(metrics.getMapSlotCapacity() + - metrics.getReduceSlotCapacity())) / metrics.getTaskTrackerCount()): - "-"; - out.print("\n"+ - "" + - "" + - "" + - "" + - "" + - "" + - "" + - "" + - "" + - "\n"); - out.print("
QueuesRunning Map TasksRunning Reduce TasksTotal SubmissionsNodesOccupied Map SlotsOccupied Reduce SlotsReserved Map SlotsReserved Reduce SlotsMap Slot CapacityReduce Slot CapacityAvg. Slots/NodeBlacklisted NodesExcluded Nodes
" + - tracker.getRootQueues().length + "" + - metrics.getRunningMaps() + "" + - metrics.getRunningReduces() + "" + - metrics.getTotalJobSubmissions() + - "" + - metrics.getTaskTrackerCount() + "" + - metrics.getOccupiedMapSlots() + "" + - metrics.getOccupiedReduceSlots() + "" + - metrics.getReservedMapSlots() + "" + - metrics.getReservedReduceSlots() + "" + - + metrics.getMapSlotCapacity() + - "" + metrics.getReduceSlotCapacity() + - "" + tasksPerNode + - "" + - metrics.getBlackListedTaskTrackerCount() + "" + - "" + - metrics.getDecommissionedTaskTrackerCount() + "" + - "
\n"); - - out.print("
"); - if (tracker.recoveryManager.shouldRecover()) { - out.print(""); - if (tracker.hasRecovered()) { - out.print("The JobTracker got restarted and recovered back in " ); - out.print(StringUtils.formatTime(tracker.getRecoveryDuration())); - } else { - out.print("The JobTracker got restarted and is still recovering"); - } - out.print(""); - } - }%> - - - - - -<%= trackerName %> Hadoop Map/Reduce Administration - - - - - -<% if (!JSPUtil.processButtons(request, response, tracker)) { - return;// user is not authorized - } -%> - -

<%= trackerName %> Hadoop Map/Reduce Administration

- - - -State: <%= status.getJobTrackerStatus() %>
-Started: <%= new Date(tracker.getStartTime())%>
-Version: <%= VersionInfo.getVersion()%>, - <%= VersionInfo.getRevision()%>
-Compiled: <%= VersionInfo.getDate()%> by - <%= VersionInfo.getUser()%> from - <%= VersionInfo.getBranch()%>
-Identifier: <%= tracker.getTrackerIdentifier()%>
- -
-

Cluster Summary (Heap Size is - <% MemoryUsage heap = mem.getHeapMemoryUsage(); - out.print(StringUtils.byteDesc(heap.getUsed()) + "/"); - out.print(StringUtils.byteDesc(heap.getCommitted()) + "/"); - out.print(StringUtils.byteDesc(heap.getMax()) + ")"); - %> -<% - generateSummaryTable(out, metrics, tracker); -%> -
-Filter (Jobid, Priority, User, Name)
-Example: 'user:smith 3200' will filter by 'smith' only in the user field and '3200' in all fields -
- -

Running Jobs

-<%=JSPUtil.generateJobTable("Running", runningJobs, 30, 0, tracker.conf)%> -
- -<% -if (completedJobs.size() > 0) { - out.print("

Completed Jobs

"); - out.print(JSPUtil.generateJobTable("Completed", completedJobs, 0, - runningJobs.size(), tracker.conf)); - out.print("
"); -} -%> - -<% -if (failedJobs.size() > 0) { - out.print("

Failed Jobs

"); - out.print(JSPUtil.generateJobTable("Failed", failedJobs, 0, - (runningJobs.size()+completedJobs.size()), tracker.conf)); - out.print("
"); -} -%> - -

Retired Jobs

-<%=JSPUtil.generateRetiredJobTable(tracker, - (runningJobs.size()+completedJobs.size()+failedJobs.size()))%> -
- -

Local Logs

-Log directory, -Job Tracker History - -<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/jobtracker.jspx b/hadoop-mapreduce-project/src/webapps/job/jobtracker.jspx deleted file mode 100644 index be7324de53a..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/jobtracker.jspx +++ /dev/null @@ -1,91 +0,0 @@ - - - - - - - - - - - - - private static final long serialVersionUID = 1L; - - - - response.setHeader("Pragma", "no-cache"); - response.setHeader("Cache-Control", "no-store"); - response.setDateHeader("Expires", -1); - - - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = StringUtils.simpleHostname(tracker.getJobTrackerMachine()); - JobTrackerJspHelper jspHelper = new JobTrackerJspHelper(); - - List<JobInProgress> runningJobs = tracker.getRunningJobs(); - List<JobInProgress> completedJobs = tracker.getCompletedJobs(); - List<JobInProgress> failedJobs = tracker.getFailedJobs(); - - - trackerName - - - tracker.getClusterStatus().getJobTrackerStatus() - new Date(tracker.getStartTime()) - VersionInfo.getVersion() - VersionInfo.getRevision() - VersionInfo.getDate() - VersionInfo.getUser() - tracker.getTrackerIdentifier() - - - - - jspHelper.generateSummaryTable(out, tracker); - - - - - - jspHelper.generateJobTable(out, "running", runningJobs); - - - - - - jspHelper.generateJobTable(out, "completed", completedJobs); - - - - - - jspHelper.generateJobTable(out, "failed", failedJobs); - - - - - - JSPUtil.generateRetiredJobXml(out, tracker, - runningJobs.size() + completedJobs.size() + failedJobs.size()); - - - diff --git a/hadoop-mapreduce-project/src/webapps/job/machines.jsp b/hadoop-mapreduce-project/src/webapps/job/machines.jsp deleted file mode 100644 index e58036a1ec2..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/machines.jsp +++ /dev/null @@ -1,186 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="java.text.DecimalFormat" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.util.*" -%> -<%! private static final long serialVersionUID = 1L; -%> -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); - String type = request.getParameter("type"); -%> -<%! - public void generateTaskTrackerTable(JspWriter out, - String type, - JobTracker tracker) throws IOException { - Collection c; - if (("blacklisted").equals(type)) { - out.println("

Blacklisted Task Trackers

"); - c = tracker.blacklistedTaskTrackers(); - } else if (("active").equals(type)) { - out.println("

Active Task Trackers

"); - c = tracker.activeTaskTrackers(); - } else { - out.println("

Task Trackers

"); - c = tracker.taskTrackers(); - } - int noCols = 9 + - (3 * tracker.getStatistics().collector.DEFAULT_COLLECT_WINDOWS.length); - if(type.equals("blacklisted")) { - noCols = noCols + 1; - } - if (c.size() == 0) { - out.print("There are currently no known " + type + " Task Trackers."); - } else { - out.print("
\n"); - out.print("\n"); - out.print("\n"); - out.print("" + - "" + - "" + - "" + - "" + - "" + - ""); - if(type.equals("blacklisted")) { - out.print(""); - } - for(StatisticsCollector.TimeWindow window : tracker.getStatistics(). - collector.DEFAULT_COLLECT_WINDOWS) { - out.println(""); - out.println(""); - out.println(""); - } - out.print("\n"); - - int maxFailures = 0; - String failureKing = null; - for (Iterator it = c.iterator(); it.hasNext(); ) { - TaskTrackerStatus tt = (TaskTrackerStatus) it.next(); - long sinceHeartbeat = System.currentTimeMillis() - tt.getLastSeen(); - boolean isHealthy = tt.getHealthStatus().isNodeHealthy(); - long sinceHealthCheck = tt.getHealthStatus().getLastReported(); - String healthString = ""; - if(sinceHealthCheck == 0) { - healthString = "N/A"; - } else { - healthString = (isHealthy?"Healthy":"Unhealthy"); - sinceHealthCheck = System.currentTimeMillis() - sinceHealthCheck; - sinceHealthCheck = sinceHealthCheck/1000; - } - if (sinceHeartbeat > 0) { - sinceHeartbeat = sinceHeartbeat / 1000; - } - int numCurTasks = 0; - for (Iterator it2 = tt.getTaskReports().iterator(); it2.hasNext(); ) { - it2.next(); - numCurTasks++; - } - int numFailures = tt.getFailures(); - if (numFailures > maxFailures) { - maxFailures = numFailures; - failureKing = tt.getTrackerName(); - } - out.print("\n"); - } - out.print("
Task Trackers
NameHost# running tasksMax Map TasksMax Reduce TasksFailuresNode Health StatusSeconds Since Node Last HealthyReason For blacklistingTotal Tasks "+window.name+"Succeeded Tasks "+window.name+"Failed Health Checks " - + window.name+"Seconds since heartbeat
"); - out.print(tt.getTrackerName() + ""); - out.print(tt.getHost() + "" + numCurTasks + - "" + tt.getMaxMapSlots() + - "" + tt.getMaxReduceSlots() + - "" + numFailures + - "" + healthString + - "" + sinceHealthCheck); - if(type.equals("blacklisted")) { - out.print("" + tracker.getFaultReport(tt.getHost())); - } - for(StatisticsCollector.TimeWindow window : tracker.getStatistics(). - collector.DEFAULT_COLLECT_WINDOWS) { - JobTrackerStatistics.TaskTrackerStat ttStat = tracker.getStatistics(). - getTaskTrackerStat(tt.getTrackerName()); - out.println("" + ttStat.totalTasksStat.getValues(). - get(window).getValue()); - out.println("" + ttStat.succeededTasksStat.getValues(). - get(window).getValue()); - out.println("" + ttStat.healthCheckFailedStat. - getValues().get(window).getValue()); - } - - out.print("" + sinceHeartbeat + "
\n"); - out.print("
\n"); - if (maxFailures > 0) { - out.print("Highest Failures: " + failureKing + " with " + maxFailures + - " failures
\n"); - } - } - } - - public void generateTableForExcludedNodes(JspWriter out, JobTracker tracker) - throws IOException { - // excluded nodes - out.println("

Excluded Nodes

"); - Collection d = tracker.getExcludedNodes(); - if (d.size() == 0) { - out.print("There are currently no excluded hosts."); - } else { - out.print("
\n"); - out.print("\n"); - out.print(""); - out.print("\n"); - for (Iterator it = d.iterator(); it.hasNext(); ) { - String dt = (String)it.next(); - out.print("\n"); - } - out.print("
Host Name
" + dt + "
\n"); - out.print("
\n"); - } - } -%> - - - - -<%=trackerName%> Hadoop Machine List - - -

<%=trackerName%> Hadoop Machine List

- -<% - if (("excluded").equals(type)) { - generateTableForExcludedNodes(out, tracker); - } else { - generateTaskTrackerTable(out, type, tracker); - } -%> - -<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/queueinfo.jsp b/hadoop-mapreduce-project/src/webapps/job/queueinfo.jsp deleted file mode 100644 index ff4376d52fe..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/queueinfo.jsp +++ /dev/null @@ -1,145 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%!private static final long serialVersionUID = 1L;%> -<%@ page -contentType="text/html; charset=UTF-8" -import="org.apache.hadoop.mapred.*" -import="org.apache.commons.lang.StringUtils" -import="javax.servlet.*" -import="javax.servlet.http.*" -import="java.io.*" -import="java.util.*" -%> - - - - -Job Queue Information page - -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - QueueManager qmgr = tracker.getQueueManager(); - JobQueueInfo[] rootQueues = qmgr.getRootQueues(); -%> -<%! - public static String getTree(String parent, JobQueueInfo[] rootQueues) { - List rootQueueList = new ArrayList(); - for (JobQueueInfo queue : rootQueues) { - rootQueueList.add(queue); - } - return getTree(parent, rootQueueList); - } - - private static String getTree(String parent, List children) { - StringBuilder str = new StringBuilder(); - if (children == null) { - return ""; - } - for (JobQueueInfo queueInfo : children) { - String variableName = StringUtils.replaceChars(queueInfo.getQueueName(), - ":-*+#.^", "_______"); - String label = queueInfo.getQueueName().split(":")[queueInfo - .getQueueName().split(":").length - 1]; - str.append(String.format( - "var %sTreeNode = new YAHOO.widget.MenuNode(\"%s\", %s, false);\n", - variableName, label, parent)); - str.append(String.format("%sTreeNode.data=\"%s\";\n", variableName, - queueInfo.getSchedulingInfo().replaceAll("\n", "
"))); - str.append(String.format("%sTreeNode.name=\"%s\";\n", variableName, - queueInfo.getQueueName())); - str.append(getTree(variableName + "TreeNode", queueInfo.getChildren())); - } - return str.toString(); - } -%> - - - - - - - -
-
-
- - - - diff --git a/hadoop-mapreduce-project/src/webapps/job/queuetable.jsp b/hadoop-mapreduce-project/src/webapps/job/queuetable.jsp deleted file mode 100644 index 8924e7a8b05..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/queuetable.jsp +++ /dev/null @@ -1,78 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%! -private static final long serialVersionUID = 1L; -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="java.text.DecimalFormat" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.util.*" - import="org.apache.hadoop.util.ServletUtil" -%> - - - - -<% -JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); -JobQueueInfo[] queues = tracker.getRootJobQueues(); -%> -Queue Information - - -

Scheduling Information

- - - - - - - - -<% -for(JobQueueInfo queue: queues) { - String queueName = queue.getQueueName(); - String state = queue.getQueueState(); - String schedulingInformation = queue.getSchedulingInfo(); - if(schedulingInformation == null || schedulingInformation.trim().equals("")) { - schedulingInformation = "NA"; - } -%> - - - - -<% -} -%> - -
Queue Name Scheduling Information
<%=queueName%> - -<%=HtmlQuoting.quoteHtmlChars(schedulingInformation).replaceAll("\n","
")%> -
-

Job Tracker

- - diff --git a/hadoop-mapreduce-project/src/webapps/job/taskdetails.jsp b/hadoop-mapreduce-project/src/webapps/job/taskdetails.jsp deleted file mode 100644 index dc557e47a37..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/taskdetails.jsp +++ /dev/null @@ -1,374 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.lang.String" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.util.*" - import="java.text.SimpleDateFormat" - import="org.apache.hadoop.security.UserGroupInformation" - import="java.security.PrivilegedExceptionAction" - import="org.apache.hadoop.security.AccessControlException" -%> -<%!static SimpleDateFormat dateFormat = new SimpleDateFormat( - "d-MMM-yyyy HH:mm:ss"); - -%> -<%! private static final long serialVersionUID = 1L; -%> -<%!private void printConfirm(JspWriter out, - String attemptid, String action) throws IOException { - String url = "taskdetails.jsp?attemptid=" + attemptid; - out.print("" + "

Are you sure you want to kill/fail " - + attemptid + " ?


" - + "
" - + "" - + "" - + "
" - + "
"); - }%> -<% - final JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - - String attemptid = request.getParameter("attemptid"); - final TaskAttemptID attemptidObj = TaskAttemptID.forName(attemptid); - - // Obtain tipid for attemptid, if attemptid is available. - TaskID tipidObj = - (attemptidObj == null) ? TaskID.forName(request.getParameter("tipid")) - : attemptidObj.getTaskID(); - if (tipidObj == null) { - out.print("tipid sent is not valid.
\n"); - return; - } - // Obtain jobid from tipid - final JobID jobidObj = tipidObj.getJobID(); - String jobid = jobidObj.toString(); - - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob(tracker, jobidObj, - request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - // redirect to history page if it cannot be found in memory - if (job == null) { - String historyFile = tracker.getJobHistory().getHistoryFilePath(jobidObj); - if (historyFile == null) { - out.println("

Job " + jobid + " not known!

"); - return; - } - String historyUrl = "/taskdetailshistory.jsp?logFile=" + historyFile + - "&tipid=" + tipidObj.toString(); - response.sendRedirect(response.encodeRedirectURL(historyUrl)); - return; - } - boolean privateActions = JSPUtil.privateActionsAllowed(tracker.conf); - if (privateActions) { - String action = request.getParameter("action"); - if (action != null) { - String user = request.getRemoteUser(); - UserGroupInformation ugi = null; - if (user != null) { - ugi = UserGroupInformation.createRemoteUser(user); - } - if (action.equalsIgnoreCase("confirm")) { - String subAction = request.getParameter("subaction"); - if (subAction == null) - subAction = "fail-task"; - printConfirm(out, attemptid, subAction); - return; - } - else if (action.equalsIgnoreCase("kill-task") - && request.getMethod().equalsIgnoreCase("POST")) { - if (ugi != null) { - try { - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException{ - - tracker.killTask(attemptidObj, false);// checks job modify permission - return null; - } - }); - } catch(AccessControlException e) { - String errMsg = "User " + user + " failed to kill task " - + attemptidObj + "!

" + e.getMessage() + - "
Go back to Job
"; - JSPUtil.setErrorAndForward(errMsg, request, response); - return; - } - } else {// no authorization needed - tracker.killTask(attemptidObj, false); - } - - //redirect again so that refreshing the page will not attempt to rekill the task - response.sendRedirect("/taskdetails.jsp?subaction=kill-task" + - "&tipid=" + tipidObj.toString()); - } - else if (action.equalsIgnoreCase("fail-task") - && request.getMethod().equalsIgnoreCase("POST")) { - if (ugi != null) { - try { - ugi.doAs(new PrivilegedExceptionAction() { - public Void run() throws IOException{ - - tracker.killTask(attemptidObj, true);// checks job modify permission - return null; - } - }); - } catch(AccessControlException e) { - String errMsg = "User " + user + " failed to fail task " - + attemptidObj + "!

" + e.getMessage() + - "
Go back to Job
"; - JSPUtil.setErrorAndForward(errMsg, request, response); - return; - } - } else {// no authorization needed - tracker.killTask(attemptidObj, true); - } - - response.sendRedirect("/taskdetails.jsp?subaction=fail-task" + - "&tipid=" + tipidObj.toString()); - } - } - } - TaskInProgress tip = job.getTaskInProgress(tipidObj); - TaskStatus[] ts = null; - boolean isCleanupOrSetup = false; - if (tip != null) { - ts = tip.getTaskStatuses(); - isCleanupOrSetup = tip.isJobCleanupTask(); - if (!isCleanupOrSetup) { - isCleanupOrSetup = tip.isJobSetupTask(); - } - } -%> - - - - - - - Hadoop Task Details - - -

Job <%=jobid%>

- -
- -

All Task Attempts

-
-<% - if (ts == null || ts.length == 0) { -%> -

No Task Attempts found

-<% - } else { -%> - - - <% - if (ts[0].getIsMap()) { - %> - - <% - } - else if(!isCleanupOrSetup) { - %> - - <% - } - %> - - <% - for (int i = 0; i < ts.length; i++) { - TaskStatus status = ts[i]; - String taskTrackerName = status.getTaskTracker(); - TaskTrackerStatus taskTracker = tracker.getTaskTrackerStatus(taskTrackerName); - out.print(""); - String taskAttemptTracker = null; - String cleanupTrackerName = null; - TaskTrackerStatus cleanupTracker = null; - String cleanupAttemptTracker = null; - boolean hasCleanupAttempt = false; - if (tip != null && tip.isCleanupAttempt(status.getTaskID())) { - cleanupTrackerName = tip.machineWhereCleanupRan(status.getTaskID()); - cleanupTracker = tracker.getTaskTrackerStatus(cleanupTrackerName); - if (cleanupTracker != null) { - cleanupAttemptTracker = "http://" + cleanupTracker.getHost() + ":" - + cleanupTracker.getHttpPort(); - } - hasCleanupAttempt = true; - } - out.print(""); - out.print(""); - out.print(""); - out.print(""); - if (ts[i].getIsMap()) { - out.print(""); - } - else if (!isCleanupOrSetup) { - out.print(""); - out.println(""); - } - out.println(""); - - out.print(""); - out.print(""); - out.print(""); - } - %> -
Task AttemptsMachineStatusProgressStart TimeMap Phase FinishedShuffle FinishedSort FinishedFinish TimeErrorsTask LogsCountersActions
" + status.getTaskID() + ""); - if (hasCleanupAttempt) { - out.print("Task attempt: "); - } - if (taskTracker == null) { - out.print(taskTrackerName); - } else { - taskAttemptTracker = "http://" + taskTracker.getHost() + ":" - + taskTracker.getHttpPort(); - out.print("" - + tracker.getNode(taskTracker.getHost()) + ""); - } - if (hasCleanupAttempt) { - out.print("
Cleanup Attempt: "); - if (cleanupAttemptTracker == null ) { - out.print(cleanupTrackerName); - } else { - out.print("" - + tracker.getNode(cleanupTracker.getHost()) + ""); - } - } - out.print("
" + status.getRunState() + "" + StringUtils.formatPercent(status.getProgress(), 2) - + ServletUtil.percentageGraph(status.getProgress() * 100f, 80) + "" - + StringUtils.getFormattedTimeWithDiff(dateFormat, status - .getStartTime(), 0) + "" - + StringUtils.getFormattedTimeWithDiff(dateFormat, status - .getMapFinishTime(), status.getStartTime()) + "" - + StringUtils.getFormattedTimeWithDiff(dateFormat, status - .getShuffleFinishTime(), status.getStartTime()) + "" - + StringUtils.getFormattedTimeWithDiff(dateFormat, status - .getSortFinishTime(), status.getShuffleFinishTime()) - + "" - + StringUtils.getFormattedTimeWithDiff(dateFormat, status - .getFinishTime(), status.getStartTime()) + "
");
-        String [] failures = tracker.getTaskDiagnostics(status.getTaskID());
-        if (failures == null) {
-          out.print(" ");
-        } else {
-          for(int j = 0 ; j < failures.length ; j++){
-            out.print(HtmlQuoting.quoteHtmlChars(failures[j]));
-            if (j < (failures.length - 1)) {
-              out.print("\n-------\n");
-            }
-          }
-        }
-        out.print("
"); - String taskLogUrl = null; - if (taskTracker != null ) { - taskLogUrl = TaskLogServlet.getTaskLogUrl(taskTracker.getHost(), - String.valueOf(taskTracker.getHttpPort()), - status.getTaskID().toString()); - } - if (hasCleanupAttempt) { - out.print("Task attempt:
"); - } - if (taskLogUrl == null) { - out.print("n/a"); - } else { - String tailFourKBUrl = taskLogUrl + "&start=-4097"; - String tailEightKBUrl = taskLogUrl + "&start=-8193"; - String entireLogUrl = taskLogUrl + "&all=true"; - out.print("Last 4KB
"); - out.print("Last 8KB
"); - out.print("All
"); - } - if (hasCleanupAttempt) { - out.print("Cleanup attempt:
"); - taskLogUrl = null; - if (cleanupTracker != null ) { - taskLogUrl = TaskLogServlet.getTaskLogUrl(cleanupTracker.getHost(), - String.valueOf(cleanupTracker.getHttpPort()), - status.getTaskID().toString()); - } - if (taskLogUrl == null) { - out.print("n/a"); - } else { - String tailFourKBUrl = taskLogUrl + "&start=-4097&cleanup=true"; - String tailEightKBUrl = taskLogUrl + "&start=-8193&cleanup=true"; - String entireLogUrl = taskLogUrl + "&all=true&cleanup=true"; - out.print("Last 4KB
"); - out.print("Last 8KB
"); - out.print("All
"); - } - } - out.print("
" + "" - + ((status.getCounters() != null) ? status.getCounters().size() : 0) - + ""); - if (privateActions - && status.getRunState() == TaskStatus.State.RUNNING) { - out.print(" Kill "); - out.print("
Fail "); - } - else - out.print("
 
"); - out.println("
-
- -<% - if (ts[0].getIsMap() && !isCleanupOrSetup) { -%> -

Input Split Locations

- -<% - for (String split: StringUtils.split(tracker.getTip( - tipidObj).getSplitNodes())) { - out.println(""); - } -%> -
" + split + "
-<% - } - } -%> - -
-Go back to the job
-Go back to JobTracker
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/taskdetailshistory.jsp b/hadoop-mapreduce-project/src/webapps/job/taskdetailshistory.jsp deleted file mode 100644 index bdacd8ad1f3..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/taskdetailshistory.jsp +++ /dev/null @@ -1,145 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.fs.*" - import="org.apache.hadoop.util.*" - import="java.text.SimpleDateFormat" - import="org.apache.hadoop.mapreduce.TaskType" - import="org.apache.hadoop.mapreduce.Counters" - import="org.apache.hadoop.mapreduce.TaskID" - import="org.apache.hadoop.mapreduce.TaskAttemptID" - import="org.apache.hadoop.mapreduce.jobhistory.*" -%> - -<%! private static SimpleDateFormat dateFormat = new SimpleDateFormat("d/MM HH:mm:ss") ; %> -<%! private static final long serialVersionUID = 1L; -%> - -<% - String logFile = request.getParameter("logFile"); - String tipid = request.getParameter("tipid"); - FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - JobTracker jobTracker = (JobTracker) application.getAttribute("job.tracker"); - JobHistoryParser.JobInfo job = JSPUtil.checkAccessAndGetJobInfo(request, - response, jobTracker, fs, new Path(logFile)); - if (job == null) { - return; - } - JobHistoryParser.TaskInfo task = job.getAllTasks().get(TaskID.forName(tipid)); - TaskType type = task.getTaskType(); -%> - - - -

<%=tipid %> attempts for <%=job.getJobId() %>

-
- - -<% - if (TaskType.REDUCE.equals(type)) { -%> - -<% - } -%> - - - -<% - for (JobHistoryParser.TaskAttemptInfo attempt : task.getAllTaskAttempts().values()) { - printTaskAttempt(attempt, type, out, logFile); - } -%> -
Task IdStart TimeShuffle FinishedSort FinishedFinish TimeHostErrorTask LogsCounters
-
-<% - if (TaskType.MAP.equals(type)) { -%> -

Input Split Locations

- -<% - for (String split : StringUtils.split(task.getSplitLocations())) - { - out.println(""); - } -%> -
" + split + "
-<% - } -%> -<%! - private void printTaskAttempt(JobHistoryParser.TaskAttemptInfo taskAttempt, - TaskType type, JspWriter out, String logFile) - throws IOException { - out.print(""); - out.print("" + taskAttempt.getAttemptId() + ""); - out.print("" + StringUtils.getFormattedTimeWithDiff(dateFormat, - taskAttempt.getStartTime(), 0 ) + ""); - if (TaskType.REDUCE.equals(type)) { - out.print("" + - StringUtils.getFormattedTimeWithDiff(dateFormat, - taskAttempt.getShuffleFinishTime(), - taskAttempt.getStartTime()) + ""); - out.print("" + StringUtils.getFormattedTimeWithDiff(dateFormat, - taskAttempt.getSortFinishTime(), - taskAttempt.getShuffleFinishTime()) + ""); - } - out.print(""+ StringUtils.getFormattedTimeWithDiff(dateFormat, - taskAttempt.getFinishTime(), - taskAttempt.getStartTime()) + ""); - out.print("" + taskAttempt.getHostname() + ""); - out.print("" + HtmlQuoting.quoteHtmlChars(taskAttempt.getError()) + - ""); - - // Print task log urls - out.print(""); - String taskLogsUrl = HistoryViewer.getTaskLogsUrl(taskAttempt); - if (taskLogsUrl != null) { - String tailFourKBUrl = taskLogsUrl + "&start=-4097"; - String tailEightKBUrl = taskLogsUrl + "&start=-8193"; - String entireLogUrl = taskLogsUrl + "&all=true"; - out.print("Last 4KB
"); - out.print("Last 8KB
"); - out.print("All
"); - } else { - out.print("n/a"); - } - out.print(""); - Counters counters = taskAttempt.getCounters(); - if (counters != null) { - TaskAttemptID attemptId = taskAttempt.getAttemptId(); - out.print("" - + "" - + counters.countCounters() + ""); - } else { - out.print(""); - } - out.print(""); - } -%> - - diff --git a/hadoop-mapreduce-project/src/webapps/job/taskstats.jsp b/hadoop-mapreduce-project/src/webapps/job/taskstats.jsp deleted file mode 100644 index e5bf0b72db3..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/taskstats.jsp +++ /dev/null @@ -1,135 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.lang.String" - import="java.text.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.mapred.JSPUtil.JobWithViewAccessCheck" - import="org.apache.hadoop.util.*" - import="java.text.SimpleDateFormat" -%> -<%! private static final long serialVersionUID = 1L; -%> -<% - JobTracker tracker = (JobTracker) application.getAttribute("job.tracker"); - String trackerName = - StringUtils.simpleHostname(tracker.getJobTrackerMachine()); - - String attemptid = request.getParameter("attemptid"); - TaskAttemptID attemptidObj = TaskAttemptID.forName(attemptid); - // Obtain tipid for attemptId, if attemptId is available. - TaskID tipidObj = - (attemptidObj == null) ? TaskID.forName(request.getParameter("tipid")) - : attemptidObj.getTaskID(); - // Obtain jobid from tipid - JobID jobidObj = tipidObj.getJobID(); - String jobid = jobidObj.toString(); - - JobWithViewAccessCheck myJob = JSPUtil.checkAccessAndGetJob(tracker, jobidObj, - request, response); - if (!myJob.isViewJobAllowed()) { - return; // user is not authorized to view this job - } - - JobInProgress job = myJob.getJob(); - // redirect to history page if it cannot be found in memory - if (job == null) { - JobID jobIdObj = JobID.forName(jobid); - String historyFile = tracker.getJobHistory().getHistoryFilePath(jobIdObj); - if (historyFile == null) { - out.println("

Job " + jobid + " not known!

"); - return; - } - String historyUrl = "/taskstatshistory.jsp?logFile=" + historyFile + - "&attemptid=" + attemptid; - response.sendRedirect(response.encodeRedirectURL(historyUrl)); - return; - } - - Format decimal = new DecimalFormat(); - Counters counters; - if (attemptid == null) { - counters = tracker.getTipCounters(tipidObj); - attemptid = tipidObj.toString(); // for page title etc - } - else { - TaskStatus taskStatus = tracker.getTaskStatus(attemptidObj); - counters = taskStatus.getCounters(); - } -%> - - - - - Counters for <%=attemptid%> - - -

Counters for <%=attemptid%>

- -
- -<% - if ( counters == null ) { -%> -

No counter information found for this task

-<% - } else { -%> - -<% - for (String groupName : counters.getGroupNames()) { - Counters.Group group = counters.getGroup(groupName); - String displayGroupName = group.getDisplayName(); -%> - - - -<% - for (Counters.Counter counter : group) { - String displayCounterName = counter.getDisplayName(); - long value = counter.getCounter(); -%> - - - - - -<% - } - } -%> -

- <%=HtmlQuoting.quoteHtmlChars(displayGroupName)%>
<%=HtmlQuoting.quoteHtmlChars(displayCounterName)%><%=decimal.format(value)%>
-<% - } -%> - -
-Go back to the job
-Go back to JobTracker
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/job/taskstatshistory.jsp b/hadoop-mapreduce-project/src/webapps/job/taskstatshistory.jsp deleted file mode 100644 index d4cad15f5ee..00000000000 --- a/hadoop-mapreduce-project/src/webapps/job/taskstatshistory.jsp +++ /dev/null @@ -1,122 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.fs.*" - import="org.apache.hadoop.util.*" - import="java.text.*" - import="org.apache.hadoop.mapreduce.jobhistory.*" - import="org.apache.hadoop.mapreduce.TaskID" - import="org.apache.hadoop.mapreduce.TaskAttemptID" - import="org.apache.hadoop.mapreduce.Counter" - import="org.apache.hadoop.mapreduce.Counters" - import="org.apache.hadoop.mapreduce.CounterGroup" -%> -<%! private static SimpleDateFormat dateFormat = new SimpleDateFormat("d/MM HH:mm:ss") ; - private static final long serialVersionUID = 1L; -%> - -<% - String attemptid = request.getParameter("attemptid"); - if(attemptid == null) { - out.println("No attemptid found! Pass a 'attemptid' parameter in the request."); - return; - } - TaskID tipid = TaskAttemptID.forName(attemptid).getTaskID(); - String logFile = request.getParameter("logFile"); - - Format decimal = new DecimalFormat(); - - FileSystem fs = (FileSystem) application.getAttribute("fileSys"); - JobTracker jobTracker = (JobTracker) application.getAttribute("job.tracker"); - JobHistoryParser.JobInfo job = JSPUtil.checkAccessAndGetJobInfo(request, - response, jobTracker, fs, new Path(logFile)); - if (job == null) { - return; - } - - Map tasks = job.getAllTasks(); - JobHistoryParser.TaskInfo task = tasks.get(tipid); - - Map attempts = task.getAllTaskAttempts(); - JobHistoryParser.TaskAttemptInfo attempt = attempts.get(TaskAttemptID.forName(attemptid)); - - Counters counters = attempt.getCounters(); -%> - - - - - Counters for <%=attemptid%> - - -

Counters for <%=attemptid%>

- -
- -<% - if (counters == null) { -%> -

No counter information found for this attempt

-<% - } else { -%> - -<% - for (String groupName : counters.getGroupNames()) { - CounterGroup group = counters.getGroup(groupName); - String displayGroupName = group.getDisplayName(); -%> - - - -<% - Iterator ctrItr = group.iterator(); - while(ctrItr.hasNext()) { - Counter counter = ctrItr.next(); - String displayCounterName = counter.getDisplayName(); - long value = counter.getValue(); -%> - - - - - -<% - } - } -%> -

- <%=HtmlQuoting.quoteHtmlChars(displayGroupName)%>
<%=HtmlQuoting.quoteHtmlChars(displayCounterName)%><%=decimal.format(value)%>
-<% - } -%> - -
-Go back to the job
-Go back to JobTracker
-<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-mapreduce-project/src/webapps/static/hadoop-logo.jpg b/hadoop-mapreduce-project/src/webapps/static/hadoop-logo.jpg deleted file mode 100644 index 809525d9f15..00000000000 Binary files a/hadoop-mapreduce-project/src/webapps/static/hadoop-logo.jpg and /dev/null differ diff --git a/hadoop-mapreduce-project/src/webapps/static/hadoop.css b/hadoop-mapreduce-project/src/webapps/static/hadoop.css deleted file mode 100644 index 0560cb3075e..00000000000 --- a/hadoop-mapreduce-project/src/webapps/static/hadoop.css +++ /dev/null @@ -1,134 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -body { - background-color : #ffffff; - font-family : sans-serif; -} - -.small { - font-size : smaller; -} - -div#dfsnodetable tr#row1, div#dfstable td#col1 { - font-weight : bolder; -} - -div#dfstable td#col1 { - vertical-align : top; -} - -div#dfstable td#col3 { - text-align : right; -} - -div#dfsnodetable caption { - text-align : left; -} - -div#dfsnodetable a#title { - font-size : larger; - font-weight : bolder; -} - -div#dfsnodetable td, th { - border-bottom-style : none; - padding-bottom : 4px; - padding-top : 4px; -} - -div#dfsnodetable A:link, A:visited { - text-decoration : none; -} - -div#dfsnodetable th.header, th.headerASC, th.headerDSC { - padding-bottom : 8px; - padding-top : 8px; -} -div#dfsnodetable th.header:hover, th.headerASC:hover, th.headerDSC:hover, - td.name:hover { - text-decoration : underline; - cursor : pointer; -} - -div#dfsnodetable td.blocks, td.size, td.pcused, td.adminstate, td.lastcontact { - text-align : right; -} - -div#dfsnodetable .rowNormal .header { - background-color : #ffffff; -} -div#dfsnodetable .rowAlt, .headerASC, .headerDSC { - background-color : lightyellow; -} - -.warning { - font-weight : bolder; - color : red; -} - -div#dfstable table { - white-space : pre; -} - -div#dfsnodetable td, div#dfsnodetable th, div#dfstable td { - padding-left : 10px; - padding-right : 10px; -} - -td.perc_filled { - background-color:#AAAAFF; -} - -td.perc_nonfilled { - background-color:#FFFFFF; -} - -line.taskgraphline { - stroke-width:1;stroke-linecap:round; -} - -#quicklinks { - margin: 0; - padding: 2px 4px; - position: fixed; - top: 0; - right: 0; - text-align: right; - background-color: #eee; - font-weight: bold; -} - -#quicklinks ul { - margin: 0; - padding: 0; - list-style-type: none; - font-weight: normal; -} - -#quicklinks ul { - display: none; -} - -#quicklinks a { - font-size: smaller; - text-decoration: none; -} - -#quicklinks ul a { - text-decoration: underline; -} diff --git a/hadoop-mapreduce-project/src/webapps/static/jobconf.xsl b/hadoop-mapreduce-project/src/webapps/static/jobconf.xsl deleted file mode 100644 index 75363651e81..00000000000 --- a/hadoop-mapreduce-project/src/webapps/static/jobconf.xsl +++ /dev/null @@ -1,35 +0,0 @@ - - - - - - - - - - - - - - - - - -
namevalue
-
-
diff --git a/hadoop-mapreduce-project/src/webapps/static/jobtracker.js b/hadoop-mapreduce-project/src/webapps/static/jobtracker.js deleted file mode 100644 index 7da16c1fcc6..00000000000 --- a/hadoop-mapreduce-project/src/webapps/static/jobtracker.js +++ /dev/null @@ -1,151 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one or more -* contributor license agreements. See the NOTICE file distributed with -* this work for additional information regarding copyright ownership. -* The ASF licenses this file to You under the Apache License, Version 2.0 -* (the "License"); you may not use this file except in compliance with -* the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -function checkButtonVerbage() -{ - var inputs = document.getElementsByName("jobCheckBox"); - var check = getCheckStatus(inputs); - - setCheckButtonVerbage(! check); -} - -function selectAll() -{ - var inputs = document.getElementsByName("jobCheckBox"); - var check = getCheckStatus(inputs); - - for (var i in inputs) { - if ('jobCheckBox' == inputs[i].name) { - if ( inputs[i].parentNode.parentNode.style.display != 'none') { - inputs[i].checked = ! check; - } - } - } - - setCheckButtonVerbage(check); -} - -function getCheckStatus(inputs) -{ - var check = true; - - for (var i in inputs) { - if ('jobCheckBox' == inputs[i].name) { - if ( inputs[i].parentNode.parentNode.style.display != 'none') { - check = (inputs[i].checked && check); - } - } - } - - return check; -} - - -function setCheckButtonVerbage(check) -{ - var op = document.getElementById("checkEm"); - op.value = check ? "Select All" : "Deselect All"; -} - -function applyfilter() -{ - var cols = ["job","priority","user","name"]; - var nodes = []; - var filters = []; - - for (var i = 0; i < cols.length; ++i) { - nodes[i] = document.getElementById(cols[i] + "_0" ); - } - - var filter = document.getElementById("filter"); - filters = filter.value.split(' '); - - var row = 0; - while ( nodes[0] != null ) { - //default display status - var display = true; - - // for each filter - for (var filter_idx = 0; filter_idx < filters.length; ++filter_idx) { - - // go check each column - if ((getDisplayStatus(nodes, filters[filter_idx], cols)) == 0) { - display = false; - break; - } - } - - // set the display status - nodes[0].parentNode.style.display = display ? '' : 'none'; - - // next row - ++row; - - // next set of controls - for (var i = 0; i < cols.length; ++i) { - nodes[i] = document.getElementById(cols[i] + "_" + row); - } - } // while -} - -function getDisplayStatus(nodes, filter, cols) -{ - var offset = filter.indexOf(':'); - - var search = offset != -1 ? filter.substring(offset + 1).toLowerCase() : filter.toLowerCase(); - - for (var col = 0; col < cols.length; ++col) { - // a column specific filter - if (offset != -1 ) { - var searchCol = filter.substring(0, offset).toLowerCase(); - - if (searchCol == cols[col]) { - // special case jobs to remove unnecessary stuff - return containsIgnoreCase(stripHtml(nodes[col].innerHTML), search); - } - } else if (containsIgnoreCase(stripHtml(nodes[col].innerHTML), filter)) { - return true; - } - } - - return false; -} - -function stripHtml(text) -{ - return text.replace(/<[^>]*>/g,'').replace(/&[^;]*;/g,''); -} - -function containsIgnoreCase(haystack, needle) -{ - return haystack.toLowerCase().indexOf(needle.toLowerCase()) != -1; -} - -function confirmAction() -{ - return confirm("Are you sure?"); -} - -function toggle(id) -{ - if ( document.getElementById(id).style.display != 'block') { - document.getElementById(id).style.display = 'block'; - } - else { - document.getElementById(id).style.display = 'none'; - } -} diff --git a/hadoop-mapreduce-project/src/webapps/task/index.html b/hadoop-mapreduce-project/src/webapps/task/index.html deleted file mode 100644 index a1b41522bcc..00000000000 --- a/hadoop-mapreduce-project/src/webapps/task/index.html +++ /dev/null @@ -1,17 +0,0 @@ - - diff --git a/hadoop-mapreduce-project/src/webapps/task/tasktracker.jsp b/hadoop-mapreduce-project/src/webapps/task/tasktracker.jsp deleted file mode 100644 index 5cc04f7fecd..00000000000 --- a/hadoop-mapreduce-project/src/webapps/task/tasktracker.jsp +++ /dev/null @@ -1,116 +0,0 @@ -<% -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -%> -<%@ page - contentType="text/html; charset=UTF-8" - import="javax.servlet.*" - import="javax.servlet.http.*" - import="java.io.*" - import="java.util.*" - import="java.text.DecimalFormat" - import="org.apache.hadoop.http.HtmlQuoting" - import="org.apache.hadoop.mapred.*" - import="org.apache.hadoop.util.*" -%> -<%! private static final long serialVersionUID = 1L; -%> -<% - TaskTracker tracker = (TaskTracker) application.getAttribute("task.tracker"); - String trackerName = tracker.getName(); -%> - - - - -<%= trackerName %> Task Tracker Status - - -

<%= trackerName %> Task Tracker Status

-
-Version: <%= VersionInfo.getVersion()%>, - <%= VersionInfo.getRevision()%>
-Compiled: <%= VersionInfo.getDate()%> by - <%= VersionInfo.getUser()%> from - <%= VersionInfo.getBranch()%>
- -

Running tasks

-
- - - - - <% - Iterator itr = tracker.getRunningTaskStatuses().iterator(); - while (itr.hasNext()) { - TaskStatus status = (TaskStatus) itr.next(); - out.print(""); - out.print("\n"); - } - %> -
Task AttemptsStatusProgressErrors
" + status.getTaskID()); - out.print("" + status.getRunState()); - out.print("" + - StringUtils.formatPercent(status.getProgress(), 2)); - out.print("
" +
-           HtmlQuoting.quoteHtmlChars(status.getDiagnosticInfo()) +
-           "
-
- -

Non-Running Tasks

- - - <% - for(TaskStatus status: tracker.getNonRunningTasks()) { - out.print(""); - out.print("\n"); - } - %> -
Task AttemptsStatus
" + status.getTaskID() + "" + status.getRunState() + "
- - -

Tasks from Running Jobs

-
- - - - - <% - itr = tracker.getTasksFromRunningJobs().iterator(); - while (itr.hasNext()) { - TaskStatus status = (TaskStatus) itr.next(); - out.print(""); - out.print("\n"); - } - %> -
Task AttemptsStatusProgressErrors
" + status.getTaskID()); - out.print("" + status.getRunState()); - out.print("" + - StringUtils.formatPercent(status.getProgress(), 2)); - out.print("
" +
-           HtmlQuoting.quoteHtmlChars(status.getDiagnosticInfo()) +
-           "
-
- - -

Local Logs

-Log directory - -<% -out.println(ServletUtil.htmlFooter()); -%> diff --git a/hadoop-project/src/site/apt/index.apt.vm b/hadoop-project/src/site/apt/index.apt.vm index 32e708ec542..aacae46da26 100644 --- a/hadoop-project/src/site/apt/index.apt.vm +++ b/hadoop-project/src/site/apt/index.apt.vm @@ -34,7 +34,7 @@ Apache Hadoop ${project.version} Namenodes. More details are available in the - {{{./hadoop-yarn/hadoop-yarn-site/Federation.html}HDFS Federation}} + {{{./hadoop-project-dist/hadoop-hdfs/Federation.html}HDFS Federation}} document. * {MapReduce NextGen aka YARN aka MRv2} @@ -65,9 +65,9 @@ Getting Started The Hadoop documentation includes the information you need to get started using Hadoop. Begin with the - {{{./hadoop-yarn/hadoop-yarn-site/SingleCluster.html}Single Node Setup}} which + {{{./hadoop-project-dist/hadoop-common/SingleCluster.html}Single Node Setup}} which shows you how to set up a single-node Hadoop installation. Then move on to the - {{{./hadoop-yarn/hadoop-yarn-site/ClusterSetup.html}Cluster Setup}} to learn how + {{{./hadoop-project-dist/hadoop-common/ClusterSetup.html}Cluster Setup}} to learn how to set up a multi-node Hadoop installation. diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index ae2ab37cad4..da976f635ef 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -48,34 +48,36 @@ - - - + + + - - - - + + + + - - + + + + + - + - - + - + - + @@ -97,8 +99,8 @@ - + diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index a6102b216c4..03d293aa6ae 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -69,6 +69,8 @@ Release 2.0.3-alpha - Unreleased YARN-136. Make ClientToAMTokenSecretManager part of RMContext (Vinod Kumar Vavilapalli via sseth) + YARN-183. Clean up fair scheduler code. (Sandy Ryza via tomwhite) + OPTIMIZATIONS BUG FIXES @@ -96,6 +98,9 @@ Release 2.0.3-alpha - Unreleased YARN-181. Fixed eclipse settings broken by capacity-scheduler.xml move via YARN-140. (Siddharth Seth via vinodkv) + YARN-169. Update log4j.appender.EventCounter to use + org.apache.hadoop.log.metrics.EventCounter (Anthony Rojas via tomwhite) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -166,6 +171,9 @@ Release 0.23.5 - UNRELEASED YARN-32. Fix TestApplicationTokens to not depend on test order and thus pass on JDK7. (vinodkv) + YARN-186. Coverage fixing LinuxContainerExecutor (Aleksey Gorshkov via + bobby) + OPTIMIZATIONS BUG FIXES @@ -202,6 +210,12 @@ Release 0.23.5 - UNRELEASED YARN-202. Log Aggregation generates a storm of fsync() for namenode (Kihwal Lee via bobby) + YARN-201. Fix CapacityScheduler to be less conservative for starved + off-switch requests. (jlowe via acmurthy) + + YARN-206. TestApplicationCleanup.testContainerCleanup occasionally fails. + (jlowe via jeagles) + Release 0.23.4 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties index ea485741951..7c859535c88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties @@ -26,4 +26,4 @@ log4j.appender.CLA.layout.ConversionPattern=%d{ISO8601} %p [%t] %c: %m%n # Event Counter Appender # Sends counts of logging messages at different severity levels to Hadoop Metrics. # -log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter +log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java index 9b98290d909..46ee4aa0c54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutorWithMocks.java @@ -26,11 +26,13 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.LineNumberReader; +import java.net.InetSocketAddress; import java.util.Arrays; import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -46,7 +48,6 @@ import org.junit.Test; public class TestLinuxContainerExecutorWithMocks { - @SuppressWarnings("unused") private static final Log LOG = LogFactory .getLog(TestLinuxContainerExecutorWithMocks.class); @@ -54,6 +55,7 @@ public class TestLinuxContainerExecutorWithMocks { private final File mockParamFile = new File("./params.txt"); private LocalDirsHandlerService dirsHandler; + private void deleteMockParamFile() { if(mockParamFile.exists()) { mockParamFile.delete(); @@ -126,8 +128,102 @@ public class TestLinuxContainerExecutorWithMocks { StringUtils.join(",", dirsHandler.getLocalDirs()), StringUtils.join(",", dirsHandler.getLogDirs())), readMockParams()); + } + + @Test + public void testStartLocalizer() throws IOException { + + + InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 8040); + Path nmPrivateCTokensPath= new Path("file:///bin/nmPrivateCTokensPath"); + + try { + mockExec.startLocalizer(nmPrivateCTokensPath, address, "test", "application_0", "12345", dirsHandler.getLocalDirs(), dirsHandler.getLogDirs()); + List result=readMockParams(); + Assert.assertEquals(result.size(), 16); + Assert.assertEquals(result.get(0), "test"); + Assert.assertEquals(result.get(1), "0" ); + Assert.assertEquals(result.get(2),"application_0" ); + Assert.assertEquals(result.get(3), "/bin/nmPrivateCTokensPath"); + Assert.assertEquals(result.get(7), "-classpath" ); + Assert.assertEquals(result.get(10),"org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer" ); + Assert.assertEquals(result.get(11), "test"); + Assert.assertEquals(result.get(12), "application_0"); + Assert.assertEquals(result.get(13),"12345" ); + Assert.assertEquals(result.get(14),"localhost" ); + Assert.assertEquals(result.get(15),"8040" ); + + } catch (InterruptedException e) { + LOG.error("Error:"+e.getMessage(),e); + Assert.fail(); + } + } + + + @Test + public void testContainerLaunchError() throws IOException { + + // reinitialize executer + File f = new File("./src/test/resources/mock-container-executer-with-error"); + if (!f.canExecute()) { + f.setExecutable(true); + } + String executorPath = f.getAbsolutePath(); + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.NM_LINUX_CONTAINER_EXECUTOR_PATH, executorPath); + conf.set(YarnConfiguration.NM_LOCAL_DIRS, "file:///bin/echo"); + conf.set(YarnConfiguration.NM_LOG_DIRS, "file:///dev/null"); + + mockExec = new LinuxContainerExecutor(); + dirsHandler = new LocalDirsHandlerService(); + dirsHandler.init(conf); + mockExec.setConf(conf); + + String appSubmitter = "nobody"; + String cmd = String + .valueOf(LinuxContainerExecutor.Commands.LAUNCH_CONTAINER.getValue()); + String appId = "APP_ID"; + String containerId = "CONTAINER_ID"; + Container container = mock(Container.class); + ContainerId cId = mock(ContainerId.class); + ContainerLaunchContext context = mock(ContainerLaunchContext.class); + HashMap env = new HashMap(); + + when(container.getContainerID()).thenReturn(cId); + when(container.getLaunchContext()).thenReturn(context); + + when(cId.toString()).thenReturn(containerId); + + when(context.getEnvironment()).thenReturn(env); + + Path scriptPath = new Path("file:///bin/echo"); + Path tokensPath = new Path("file:///dev/null"); + Path workDir = new Path("/tmp"); + Path pidFile = new Path(workDir, "pid.txt"); + + mockExec.activateContainer(cId, pidFile); + int ret = mockExec.launchContainer(container, scriptPath, tokensPath, + appSubmitter, appId, workDir, dirsHandler.getLocalDirs(), + dirsHandler.getLogDirs()); + Assert.assertNotSame(0, ret); + assertEquals(Arrays.asList(appSubmitter, cmd, appId, containerId, + workDir.toString(), "/bin/echo", "/dev/null", pidFile.toString(), + StringUtils.join(",", dirsHandler.getLocalDirs()), + StringUtils.join(",", dirsHandler.getLogDirs())), readMockParams()); + + } + + @Test + public void testInit() throws Exception { + + mockExec.init(); + assertEquals(Arrays.asList("--checksetup"), readMockParams()); + + } + + @Test public void testContainerKill() throws IOException { String appSubmitter = "nobody"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error new file mode 100755 index 00000000000..4f3432cbb80 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error @@ -0,0 +1,7 @@ +#!/bin/sh +for PARAM in "$@" +do + echo $PARAM; +done > params.txt + +exec badcommand diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index e183a2d7020..28a81f7af26 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -833,8 +833,12 @@ public class LeafQueue implements CSQueue { // Note: Update headroom to account for current allocation too... allocateResource(clusterResource, application, assigned); - // Reset scheduling opportunities - application.resetSchedulingOpportunities(priority); + // Don't reset scheduling opportunities for non-local assignments + // otherwise the app will be delayed for each non-local assignment. + // This helps apps with many off-cluster requests schedule faster. + if (assignment.getType() != NodeType.OFF_SWITCH) { + application.resetSchedulingOpportunities(priority); + } // Done return assignment; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java index 3f97c96f457..acad730ee82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java @@ -101,7 +101,7 @@ public class AppSchedulable extends Schedulable { @Override public Resource getResourceUsage() { - return this.app.getCurrentConsumption(); + return app.getCurrentConsumption(); } @@ -114,7 +114,7 @@ public class AppSchedulable extends Schedulable { * Get metrics reference from containing queue. */ public QueueMetrics getMetrics() { - return this.queue.getQueueSchedulable().getMetrics(); + return queue.getQueueSchedulable().getMetrics(); } @Override @@ -190,9 +190,9 @@ public class AppSchedulable extends Schedulable { RMContainer rmContainer = application.reserve(node, priority, null, container); node.reserveResource(application, priority, rmContainer); - getMetrics().reserveResource(this.app.getUser(), + getMetrics().reserveResource(app.getUser(), container.getResource()); - scheduler.getRootQueueMetrics().reserveResource(this.app.getUser(), + scheduler.getRootQueueMetrics().reserveResource(app.getUser(), container.getResource()); } @@ -257,13 +257,13 @@ public class AppSchedulable extends Schedulable { // TODO this should subtract resource just assigned // TEMPROARY getMetrics().setAvailableResourcesToQueue( - this.scheduler.getClusterCapacity()); + scheduler.getClusterCapacity()); } // If we had previously made a reservation, delete it if (reserved) { - this.unreserve(application, priority, node); + unreserve(application, priority, node); } // Inform the node @@ -290,7 +290,7 @@ public class AppSchedulable extends Schedulable { // Make sure the application still needs requests at this priority if (app.getTotalRequiredResources(priority) == 0) { - this.unreserve(app, priority, node); + unreserve(app, priority, node); return Resources.none(); } } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index 31508d3d162..79395b0d1d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -61,7 +61,7 @@ public class FSQueue { queueSchedulable.addApp(appSchedulable); } - public void removeJob(FSSchedulerApp app) { + public void removeApp(FSSchedulerApp app) { applications.remove(app); queueSchedulable.removeApp(app); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java index 592b310e458..ccac112e6bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueSchedulable.java @@ -80,7 +80,7 @@ public class FSQueueSchedulable extends Schedulable implements Queue { this.scheduler = scheduler; this.queue = queue; this.queueMgr = scheduler.getQueueManager(); - this.metrics = QueueMetrics.forQueue(this.getName(), null, true, scheduler.getConf()); + this.metrics = QueueMetrics.forQueue(getName(), null, true, scheduler.getConf()); this.lastTimeAtMinShare = scheduler.getClock().getTime(); this.lastTimeAtHalfFairShare = scheduler.getClock().getTime(); } @@ -113,7 +113,7 @@ public class FSQueueSchedulable extends Schedulable implements Queue { Resource toAdd = sched.getDemand(); if (LOG.isDebugEnabled()) { LOG.debug("Counting resource from " + sched.getName() + " " + toAdd - + "; Total resource consumption for " + this.getName() + " now " + + "; Total resource consumption for " + getName() + " now " + demand); } demand = Resources.add(demand, toAdd); @@ -123,7 +123,7 @@ public class FSQueueSchedulable extends Schedulable implements Queue { } } if (LOG.isDebugEnabled()) { - LOG.debug("The updated demand for " + this.getName() + " is " + demand + LOG.debug("The updated demand for " + getName() + " is " + demand + "; the max is " + maxRes); } } @@ -164,9 +164,9 @@ public class FSQueueSchedulable extends Schedulable implements Queue { @Override public Resource assignContainer(FSSchedulerNode node, boolean reserved) { - LOG.debug("Node offered to queue: " + this.getName() + " reserved: " + reserved); + LOG.debug("Node offered to queue: " + getName() + " reserved: " + reserved); // If this queue is over its limit, reject - if (Resources.greaterThan(this.getResourceUsage(), + if (Resources.greaterThan(getResourceUsage(), queueMgr.getMaxResources(queue.getName()))) { return Resources.none(); } @@ -258,7 +258,7 @@ public class FSQueueSchedulable extends Schedulable implements Queue { @Override public Map getQueueAcls() { - Map acls = this.queueMgr.getQueueAcls(this.getName()); + Map acls = queueMgr.getQueueAcls(getName()); return new HashMap(acls); } @@ -284,7 +284,7 @@ public class FSQueueSchedulable extends Schedulable implements Queue { recordFactory.newRecordInstance(QueueUserACLInfo.class); List operations = new ArrayList(); for (QueueACL operation : QueueACL.values()) { - Map acls = this.queueMgr.getQueueAcls(this.getName()); + Map acls = queueMgr.getQueueAcls(getName()); if (acls.get(operation).isUserAllowed(user)) { operations.add(operation); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java index e2a385f26c3..4e164e576f2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerApp.java @@ -112,12 +112,12 @@ public class FSSchedulerApp extends SchedulerApplication { } public ApplicationId getApplicationId() { - return this.appSchedulingInfo.getApplicationId(); + return appSchedulingInfo.getApplicationId(); } @Override public ApplicationAttemptId getApplicationAttemptId() { - return this.appSchedulingInfo.getApplicationAttemptId(); + return appSchedulingInfo.getApplicationAttemptId(); } public void setAppSchedulable(AppSchedulable appSchedulable) { @@ -129,7 +129,7 @@ public class FSSchedulerApp extends SchedulerApplication { } public String getUser() { - return this.appSchedulingInfo.getUser(); + return appSchedulingInfo.getUser(); } public synchronized void updateResourceRequests( @@ -138,19 +138,19 @@ public class FSSchedulerApp extends SchedulerApplication { } public Map getResourceRequests(Priority priority) { - return this.appSchedulingInfo.getResourceRequests(priority); + return appSchedulingInfo.getResourceRequests(priority); } public int getNewContainerId() { - return this.appSchedulingInfo.getNewContainerId(); + return appSchedulingInfo.getNewContainerId(); } public Collection getPriorities() { - return this.appSchedulingInfo.getPriorities(); + return appSchedulingInfo.getPriorities(); } public ResourceRequest getResourceRequest(Priority priority, String nodeAddress) { - return this.appSchedulingInfo.getResourceRequest(priority, nodeAddress); + return appSchedulingInfo.getResourceRequest(priority, nodeAddress); } public synchronized int getTotalRequiredResources(Priority priority) { @@ -158,7 +158,7 @@ public class FSSchedulerApp extends SchedulerApplication { } public Resource getResource(Priority priority) { - return this.appSchedulingInfo.getResource(priority); + return appSchedulingInfo.getResource(priority); } /** @@ -167,11 +167,11 @@ public class FSSchedulerApp extends SchedulerApplication { */ @Override public boolean isPending() { - return this.appSchedulingInfo.isPending(); + return appSchedulingInfo.isPending(); } public String getQueueName() { - return this.appSchedulingInfo.getQueueName(); + return appSchedulingInfo.getQueueName(); } /** @@ -185,7 +185,7 @@ public class FSSchedulerApp extends SchedulerApplication { public synchronized void stop(RMAppAttemptState rmAppAttemptFinalState) { // Cleanup all scheduling information - this.appSchedulingInfo.stop(rmAppAttemptFinalState); + appSchedulingInfo.stop(rmAppAttemptFinalState); } @SuppressWarnings("unchecked") @@ -196,7 +196,7 @@ public class FSSchedulerApp extends SchedulerApplication { getRMContainer(containerId); if (rmContainer == null) { // Some unknown container sneaked into the system. Kill it. - this.rmContext.getDispatcher().getEventHandler() + rmContext.getDispatcher().getEventHandler() .handle(new RMNodeCleanContainerEvent(nodeId, containerId)); return; } @@ -272,7 +272,7 @@ public class FSSchedulerApp extends SchedulerApplication { } synchronized public void addSchedulingOpportunity(Priority priority) { - this.schedulingOpportunities.setCount(priority, + schedulingOpportunities.setCount(priority, schedulingOpportunities.count(priority) + 1); } @@ -282,19 +282,19 @@ public class FSSchedulerApp extends SchedulerApplication { * successfully did so. */ synchronized public int getSchedulingOpportunities(Priority priority) { - return this.schedulingOpportunities.count(priority); + return schedulingOpportunities.count(priority); } synchronized void resetReReservations(Priority priority) { - this.reReservations.setCount(priority, 0); + reReservations.setCount(priority, 0); } synchronized void addReReservation(Priority priority) { - this.reReservations.add(priority); + reReservations.add(priority); } synchronized public int getReReservations(Priority priority) { - return this.reReservations.count(priority); + return reReservations.count(priority); } public synchronized int getNumReservedContainers(Priority priority) { @@ -458,8 +458,8 @@ public class FSSchedulerApp extends SchedulerApplication { * @param priority The priority of the container scheduled. */ synchronized public void resetSchedulingOpportunities(Priority priority) { - this.lastScheduledContainer.put(priority, System.currentTimeMillis()); - this.schedulingOpportunities.setCount(priority, 0); + lastScheduledContainer.put(priority, System.currentTimeMillis()); + schedulingOpportunities.setCount(priority, 0); } /** @@ -494,14 +494,14 @@ public class FSSchedulerApp extends SchedulerApplication { rackLocalityThreshold; // Relax locality constraints once we've surpassed threshold. - if (this.getSchedulingOpportunities(priority) > (numNodes * threshold)) { + if (getSchedulingOpportunities(priority) > (numNodes * threshold)) { if (allowed.equals(NodeType.NODE_LOCAL)) { allowedLocalityLevel.put(priority, NodeType.RACK_LOCAL); - this.resetSchedulingOpportunities(priority); + resetSchedulingOpportunities(priority); } else if (allowed.equals(NodeType.RACK_LOCAL)) { allowedLocalityLevel.put(priority, NodeType.OFF_SWITCH); - this.resetSchedulingOpportunities(priority); + resetSchedulingOpportunities(priority); } } return allowedLocalityLevel.get(priority); @@ -512,7 +512,7 @@ public class FSSchedulerApp extends SchedulerApplication { Priority priority, ResourceRequest request, Container container) { // Update allowed locality level - NodeType allowed = this.allowedLocalityLevel.get(priority); + NodeType allowed = allowedLocalityLevel.get(priority); if (allowed != null) { if (allowed.equals(NodeType.OFF_SWITCH) && (type.equals(NodeType.NODE_LOCAL) || @@ -532,9 +532,9 @@ public class FSSchedulerApp extends SchedulerApplication { } // Create RMContainer - RMContainer rmContainer = new RMContainerImpl(container, this - .getApplicationAttemptId(), node.getNodeID(), this.rmContext - .getDispatcher().getEventHandler(), this.rmContext + RMContainer rmContainer = new RMContainerImpl(container, + getApplicationAttemptId(), node.getNodeID(), rmContext + .getDispatcher().getEventHandler(), rmContext .getContainerAllocationExpirer()); // Add it to allContainers list. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java index b8cef425d0e..2833ca67b95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java @@ -67,25 +67,25 @@ public class FSSchedulerNode extends SchedulerNode { } public RMNode getRMNode() { - return this.rmNode; + return rmNode; } public NodeId getNodeID() { - return this.rmNode.getNodeID(); + return rmNode.getNodeID(); } public String getHttpAddress() { - return this.rmNode.getHttpAddress(); + return rmNode.getHttpAddress(); } @Override public String getHostName() { - return this.rmNode.getHostName(); + return rmNode.getHostName(); } @Override public String getRackName() { - return this.rmNode.getRackName(); + return rmNode.getRackName(); } /** @@ -112,17 +112,18 @@ public class FSSchedulerNode extends SchedulerNode { @Override public synchronized Resource getAvailableResource() { - return this.availableResource; + return availableResource; } @Override public synchronized Resource getUsedResource() { - return this.usedResource; + return usedResource; } private synchronized boolean isValidContainer(Container c) { - if (launchedContainers.containsKey(c.getId())) + if (launchedContainers.containsKey(c.getId())) { return true; + } return false; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 1d2412e5ffb..f011802f77e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -139,11 +139,11 @@ public class FairScheduler implements ResourceScheduler { public FairSchedulerConfiguration getConf() { - return this.conf; + return conf; } public QueueManager getQueueManager() { - return this.queueMgr; + return queueMgr; } public List getQueueSchedulables() { @@ -183,36 +183,34 @@ public class FairScheduler implements ResourceScheduler { * fair shares, deficits, minimum slot allocations, and amount of used and * required resources per job. */ - protected void update() { - synchronized (this) { - queueMgr.reloadAllocsIfNecessary(); // Relaod alloc file - updateRunnability(); // Set job runnability based on user/queue limits - updatePreemptionVariables(); // Determine if any queues merit preemption + protected synchronized void update() { + queueMgr.reloadAllocsIfNecessary(); // Relaod alloc file + updateRunnability(); // Set job runnability based on user/queue limits + updatePreemptionVariables(); // Determine if any queues merit preemption - // Update demands of apps and queues - for (FSQueue queue: queueMgr.getQueues()) { - queue.getQueueSchedulable().updateDemand(); - } - - // Compute fair shares based on updated demands - List queueScheds = this.getQueueSchedulables(); - SchedulingAlgorithms.computeFairShares( - queueScheds, clusterCapacity); - - // Update queue metrics for this queue - for (FSQueueSchedulable sched : queueScheds) { - sched.getMetrics().setAvailableResourcesToQueue(sched.getFairShare()); - } - - // Use the computed shares to assign shares within each queue - for (FSQueue queue: queueMgr.getQueues()) { - queue.getQueueSchedulable().redistributeShare(); - } - - // Update recorded capacity of root queue (child queues are updated - // when fair share is calculated). - rootMetrics.setAvailableResourcesToQueue(clusterCapacity); + // Update demands of apps and queues + for (FSQueue queue: queueMgr.getQueues()) { + queue.getQueueSchedulable().updateDemand(); } + + // Compute fair shares based on updated demands + List queueScheds = getQueueSchedulables(); + SchedulingAlgorithms.computeFairShares( + queueScheds, clusterCapacity); + + // Update queue metrics for this queue + for (FSQueueSchedulable sched : queueScheds) { + sched.getMetrics().setAvailableResourcesToQueue(sched.getFairShare()); + } + + // Use the computed shares to assign shares within each queue + for (FSQueue queue: queueMgr.getQueues()) { + queue.getQueueSchedulable().redistributeShare(); + } + + // Update recorded capacity of root queue (child queues are updated + // when fair share is calculated). + rootMetrics.setAvailableResourcesToQueue(clusterCapacity); } /** @@ -257,17 +255,16 @@ public class FairScheduler implements ResourceScheduler { * have been below half their fair share for the fairSharePreemptionTimeout. * If such queues exist, compute how many tasks of each type need to be * preempted and then select the right ones using preemptTasks. - * - * This method computes and logs the number of tasks we want to preempt even - * if preemption is disabled, for debugging purposes. */ protected void preemptTasksIfNecessary() { - if (!preemptionEnabled) + if (!preemptionEnabled) { return; + } long curTime = clock.getTime(); - if (curTime - lastPreemptCheckTime < preemptionInterval) + if (curTime - lastPreemptCheckTime < preemptionInterval) { return; + } lastPreemptCheckTime = curTime; Resource resToPreempt = Resources.none(); @@ -288,8 +285,9 @@ public class FairScheduler implements ResourceScheduler { * lowest priority to preempt. */ protected void preemptResources(List scheds, Resource toPreempt) { - if (scheds.isEmpty() || Resources.equals(toPreempt, Resources.none())) + if (scheds.isEmpty() || Resources.equals(toPreempt, Resources.none())) { return; + } Map apps = new HashMap(); @@ -330,7 +328,7 @@ public class FairScheduler implements ResourceScheduler { // TODO: Not sure if this ever actually adds this to the list of cleanup // containers on the RMNode (see SchedulerNode.releaseContainer()). - this.completedContainer(container, status, RMContainerEventType.KILL); + completedContainer(container, status, RMContainerEventType.KILL); toPreempt = Resources.subtract(toPreempt, container.getContainer().getResource()); @@ -413,7 +411,7 @@ public class FairScheduler implements ResourceScheduler { } public RMContainerTokenSecretManager getContainerTokenSecretManager() { - return this.rmContext.getContainerTokenSecretManager(); + return rmContext.getContainerTokenSecretManager(); } public double getAppWeight(AppSchedulable app) { @@ -437,28 +435,28 @@ public class FairScheduler implements ResourceScheduler { @Override public Resource getMinimumResourceCapability() { - return this.minimumAllocation; + return minimumAllocation; } @Override public Resource getMaximumResourceCapability() { - return this.maximumAllocation; + return maximumAllocation; } public double getNodeLocalityThreshold() { - return this.nodeLocalityThreshold; + return nodeLocalityThreshold; } public double getRackLocalityThreshold() { - return this.rackLocalityThreshold; + return rackLocalityThreshold; } public Resource getClusterCapacity() { - return this.clusterCapacity; + return clusterCapacity; } public Clock getClock() { - return this.clock; + return clock; } protected void setClock(Clock clock) { @@ -478,11 +476,11 @@ public class FairScheduler implements ResourceScheduler { addApplication(ApplicationAttemptId applicationAttemptId, String queueName, String user) { - FSQueue queue = this.queueMgr.getQueue(queueName); + FSQueue queue = queueMgr.getQueue(queueName); FSSchedulerApp schedulerApp = new FSSchedulerApp(applicationAttemptId, user, - queue.getQueueSchedulable(), new ActiveUsersManager(this.getRootQueueMetrics()), + queue.getQueueSchedulable(), new ActiveUsersManager(getRootQueueMetrics()), rmContext, null); // Inforce ACLs @@ -553,8 +551,8 @@ public class FairScheduler implements ResourceScheduler { application.stop(rmAppAttemptFinalState); // Inform the queue - FSQueue queue = this.queueMgr.getQueue(application.getQueue().getQueueName()); - queue.removeJob(application); + FSQueue queue = queueMgr.getQueue(application.getQueue().getQueueName()); + queue.removeApp(application); // Remove from our data-structure applications.remove(applicationAttemptId); @@ -600,7 +598,7 @@ public class FairScheduler implements ResourceScheduler { } private synchronized void addNode(RMNode node) { - this.nodes.put(node.getNodeID(), new FSSchedulerNode(node)); + nodes.put(node.getNodeID(), new FSSchedulerNode(node)); Resources.addTo(clusterCapacity, node.getTotalCapability()); LOG.info("Added node " + node.getNodeAddress() + @@ -608,7 +606,7 @@ public class FairScheduler implements ResourceScheduler { } private synchronized void removeNode(RMNode rmNode) { - FSSchedulerNode node = this.nodes.get(rmNode.getNodeID()); + FSSchedulerNode node = nodes.get(rmNode.getNodeID()); Resources.subtractFrom(clusterCapacity, rmNode.getTotalCapability()); // Remove running containers @@ -631,7 +629,7 @@ public class FairScheduler implements ResourceScheduler { RMContainerEventType.KILL); } - this.nodes.remove(rmNode.getNodeID()); + nodes.remove(rmNode.getNodeID()); LOG.info("Removed node " + rmNode.getNodeAddress() + " cluster capacity: " + clusterCapacity); } @@ -669,10 +667,8 @@ public class FairScheduler implements ResourceScheduler { } synchronized (application) { - if (!ask.isEmpty()) { - - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("allocate: pre-update" + " applicationAttemptId=" + appAttemptId + " application=" + application.getApplicationId()); @@ -686,7 +682,7 @@ public class FairScheduler implements ResourceScheduler { application.showRequests(); } - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("allocate:" + " applicationAttemptId=" + appAttemptId + " #ask=" + ask.size()); @@ -764,7 +760,7 @@ public class FairScheduler implements ResourceScheduler { int assignedContainers = 0; while (true) { // At most one task is scheduled each iteration of this loop - List scheds = this.getQueueSchedulables(); + List scheds = getQueueSchedulables(); Collections.sort(scheds, new SchedulingAlgorithms.FairShareComparator()); boolean assignedContainer = false; for (FSQueueSchedulable sched : scheds) { @@ -796,11 +792,11 @@ public class FairScheduler implements ResourceScheduler { @Override public SchedulerAppReport getSchedulerAppInfo( ApplicationAttemptId appAttemptId) { - if (!this.applications.containsKey(appAttemptId)) { + if (!applications.containsKey(appAttemptId)) { LOG.error("Request for appInfo of unknown attempt" + appAttemptId); return null; } - return new SchedulerAppReport(this.applications.get(appAttemptId)); + return new SchedulerAppReport(applications.get(appAttemptId)); } @Override @@ -812,37 +808,30 @@ public class FairScheduler implements ResourceScheduler { public void handle(SchedulerEvent event) { switch(event.getType()) { case NODE_ADDED: - { if (!(event instanceof NodeAddedSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); } NodeAddedSchedulerEvent nodeAddedEvent = (NodeAddedSchedulerEvent)event; addNode(nodeAddedEvent.getAddedRMNode()); - } - break; + break; case NODE_REMOVED: - { if (!(event instanceof NodeRemovedSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); } NodeRemovedSchedulerEvent nodeRemovedEvent = (NodeRemovedSchedulerEvent)event; removeNode(nodeRemovedEvent.getRemovedRMNode()); - } - break; + break; case NODE_UPDATE: - { if (!(event instanceof NodeUpdateSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); } NodeUpdateSchedulerEvent nodeUpdatedEvent = (NodeUpdateSchedulerEvent)event; - this.nodeUpdate(nodeUpdatedEvent.getRMNode(), + nodeUpdate(nodeUpdatedEvent.getRMNode(), nodeUpdatedEvent.getNewlyLaunchedContainers(), nodeUpdatedEvent.getCompletedContainers()); - } - break; + break; case APP_ADDED: - { if (!(event instanceof AppAddedSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); } @@ -857,20 +846,16 @@ public class FairScheduler implements ResourceScheduler { addApplication(appAddedEvent.getApplicationAttemptId(), queue, appAddedEvent.getUser()); - } - break; + break; case APP_REMOVED: - { if (!(event instanceof AppRemovedSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); } AppRemovedSchedulerEvent appRemovedEvent = (AppRemovedSchedulerEvent)event; - this.removeApplication(appRemovedEvent.getApplicationAttemptID(), + removeApplication(appRemovedEvent.getApplicationAttemptID(), appRemovedEvent.getFinalAttemptState()); - } - break; + break; case CONTAINER_EXPIRED: - { if (!(event instanceof ContainerExpiredSchedulerEvent)) { throw new RuntimeException("Unexpected event type: " + event); } @@ -882,8 +867,7 @@ public class FairScheduler implements ResourceScheduler { containerId, SchedulerUtils.EXPIRED_CONTAINER), RMContainerEventType.EXPIRE); - } - break; + break; default: LOG.error("Unknown event arrived at FairScheduler: " + event.toString()); } @@ -897,9 +881,9 @@ public class FairScheduler implements ResourceScheduler { @Override public synchronized void reinitialize(Configuration conf, RMContext rmContext) throws IOException { - if (!this.initialized) { + if (!initialized) { this.conf = new FairSchedulerConfiguration(conf); - this.rootMetrics = QueueMetrics.forQueue("root", null, true, conf); + rootMetrics = QueueMetrics.forQueue("root", null, true, conf); this.rmContext = rmContext; this.clock = new SystemClock(); this.eventLog = new FairSchedulerEventLog(); @@ -973,7 +957,7 @@ public class FairScheduler implements ResourceScheduler { @Override public int getNumClusterNodes() { - return this.nodes.size(); + return nodes.size(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewJobWeightBooster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewAppWeightBooster.java similarity index 97% rename from hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewJobWeightBooster.java rename to hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewAppWeightBooster.java index c643027904a..e77eed79568 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewJobWeightBooster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/NewAppWeightBooster.java @@ -31,7 +31,7 @@ import org.apache.hadoop.conf.Configured; */ @Private @Unstable -public class NewJobWeightBooster extends Configured implements WeightAdjuster { +public class NewAppWeightBooster extends Configured implements WeightAdjuster { private static final float DEFAULT_FACTOR = 3; private static final long DEFAULT_DURATION = 5 * 60 * 1000; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java index c765e7f7dab..0395eaad5c7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java @@ -202,7 +202,7 @@ public class QueueManager { * Get the queue for a given AppSchedulable. */ public FSQueue getQueueForApp(AppSchedulable app) { - return this.getQueue(app.getApp().getQueueName()); + return getQueue(app.getApp().getQueueName()); } /** @@ -388,7 +388,7 @@ public class QueueManager { // Commit the reload; also create any queue defined in the alloc file // if it does not already exist, so it can be displayed on the web UI. - synchronized(this) { + synchronized (this) { setMinResources(minQueueResources); setMaxResources(maxQueueResources); setQueueMaxApps(queueMaxApps); @@ -431,14 +431,14 @@ public class QueueManager { synchronized(minQueueResourcesMO) { if (minQueueResources.containsKey(queue)) { return minQueueResources.get(queue); - } else{ + } else { return Resources.createResource(0); } } } private void setMinResources(Map resources) { - synchronized(minQueueResourcesMO) { + synchronized (minQueueResourcesMO) { minQueueResources = resources; } } @@ -457,7 +457,7 @@ public class QueueManager { } private void setMaxResources(Map resources) { - synchronized(maxQueueResourcesMO) { + synchronized (maxQueueResourcesMO) { maxQueueResources = resources; } } @@ -472,8 +472,8 @@ public class QueueManager { /** * Remove an app */ - public synchronized void removeJob(FSSchedulerApp app) { - getQueue(app.getQueueName()).removeJob(app); + public synchronized void removeApp(FSSchedulerApp app) { + getQueue(app.getQueueName()).removeApp(app); } /** @@ -543,7 +543,7 @@ public class QueueManager { } private int getQueueMaxAppsDefault(){ - synchronized(queueMaxAppsDefaultMO) { + synchronized (queueMaxAppsDefaultMO) { return queueMaxAppsDefault; } } @@ -575,11 +575,12 @@ public class QueueManager { queueWeights = weights; } } + /** - * Get a queue's min share preemption timeout, in milliseconds. This is the - * time after which jobs in the queue may kill other queues' tasks if they - * are below their min share. - */ + * Get a queue's min share preemption timeout, in milliseconds. This is the + * time after which jobs in the queue may kill other queues' tasks if they + * are below their min share. + */ public long getMinSharePreemptionTimeout(String queueName) { synchronized (minSharePreemptionTimeoutsMO) { if (minSharePreemptionTimeouts.containsKey(queueName)) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/WeightAdjuster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/WeightAdjuster.java index 2fa71badbc7..1a9467fc003 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/WeightAdjuster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/WeightAdjuster.java @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configurable; /** * A pluggable object for altering the weights of apps in the fair scheduler, - * which is used for example by {@link NewJobWeightBooster} to give higher + * which is used for example by {@link NewAppWeightBooster} to give higher * weight to new jobs so that short jobs finish faster. * * May implement {@link Configurable} to access configuration parameters. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java index 349de1e44c8..ca640b39e59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java @@ -81,38 +81,38 @@ public class TestApplicationCleanup { new ArrayList()).getAllocatedContainers(); int contReceived = conts.size(); int waitCount = 0; - while (contReceived < request && waitCount++ < 20) { + while (contReceived < request && waitCount++ < 200) { + LOG.info("Got " + contReceived + " containers. Waiting to get " + + request); + Thread.sleep(100); conts = am.allocate(new ArrayList(), new ArrayList()).getAllocatedContainers(); contReceived += conts.size(); - LOG.info("Got " + contReceived + " containers. Waiting to get " - + request); - Thread.sleep(2000); } - Assert.assertEquals(request, conts.size()); + Assert.assertEquals(request, contReceived); am.unregisterAppAttempt(); HeartbeatResponse resp = nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE); am.waitForState(RMAppAttemptState.FINISHED); - int cleanedConts = 0; - int cleanedApps = 0; - List contsToClean = null; - List apps = null; - //currently only containers are cleaned via this //AM container is cleaned via container launcher + resp = nm1.nodeHeartbeat(true); + List contsToClean = resp.getContainersToCleanupList(); + List apps = resp.getApplicationsToCleanupList(); + int cleanedConts = contsToClean.size(); + int cleanedApps = apps.size(); waitCount = 0; - while ((cleanedConts < 2 || cleanedApps < 1) && waitCount++ < 20) { - contsToClean = resp.getContainersToCleanupList(); - apps = resp.getApplicationsToCleanupList(); + while ((cleanedConts < 2 || cleanedApps < 1) && waitCount++ < 200) { LOG.info("Waiting to get cleanup events.. cleanedConts: " + cleanedConts + " cleanedApps: " + cleanedApps); + Thread.sleep(100); + resp = nm1.nodeHeartbeat(true); + contsToClean = resp.getContainersToCleanupList(); + apps = resp.getApplicationsToCleanupList(); cleanedConts += contsToClean.size(); cleanedApps += apps.size(); - Thread.sleep(1000); - resp = nm1.nodeHeartbeat(true); } Assert.assertEquals(1, apps.size()); @@ -170,20 +170,20 @@ public class TestApplicationCleanup { new ArrayList()).getAllocatedContainers(); int contReceived = conts.size(); int waitCount = 0; - while (contReceived < request && waitCount++ < 20) { + while (contReceived < request && waitCount++ < 200) { + LOG.info("Got " + contReceived + " containers. Waiting to get " + + request); + Thread.sleep(100); conts = am.allocate(new ArrayList(), new ArrayList()).getAllocatedContainers(); dispatcher.await(); contReceived += conts.size(); - LOG.info("Got " + contReceived + " containers. Waiting to get " - + request); - Thread.sleep(2000); } - Assert.assertEquals(request, conts.size()); + Assert.assertEquals(request, contReceived); // Release a container. ArrayList release = new ArrayList(); - release.add(conts.get(1).getId()); + release.add(conts.get(0).getId()); am.allocate(new ArrayList(), release); dispatcher.await(); @@ -194,7 +194,7 @@ public class TestApplicationCleanup { new HashMap>(); ArrayList containerStatusList = new ArrayList(); - containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(1) + containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0) .getId(), ContainerState.RUNNING, "nothing", 0)); containerStatuses.put(app.getApplicationId(), containerStatusList); @@ -203,13 +203,13 @@ public class TestApplicationCleanup { List contsToClean = resp.getContainersToCleanupList(); int cleanedConts = contsToClean.size(); waitCount = 0; - while (cleanedConts < 1 && waitCount++ < 20) { + while (cleanedConts < 1 && waitCount++ < 200) { + LOG.info("Waiting to get cleanup events.. cleanedConts: " + cleanedConts); + Thread.sleep(100); resp = nm1.nodeHeartbeat(true); dispatcher.await(); contsToClean = resp.getContainersToCleanupList(); - LOG.info("Waiting to get cleanup events.. cleanedConts: " + cleanedConts); cleanedConts += contsToClean.size(); - Thread.sleep(1000); } LOG.info("Got cleanup for " + contsToClean.get(0)); Assert.assertEquals(1, cleanedConts); @@ -220,7 +220,7 @@ public class TestApplicationCleanup { + "NM getting cleanup"); containerStatuses.clear(); containerStatusList.clear(); - containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(1) + containerStatusList.add(BuilderUtils.newContainerStatus(conts.get(0) .getId(), ContainerState.RUNNING, "nothing", 0)); containerStatuses.put(app.getApplicationId(), containerStatusList); @@ -231,13 +231,13 @@ public class TestApplicationCleanup { // The cleanup list won't be instantaneous as it is given out by scheduler // and not RMNodeImpl. waitCount = 0; - while (cleanedConts < 1 && waitCount++ < 20) { + while (cleanedConts < 1 && waitCount++ < 200) { + LOG.info("Waiting to get cleanup events.. cleanedConts: " + cleanedConts); + Thread.sleep(100); resp = nm1.nodeHeartbeat(true); dispatcher.await(); contsToClean = resp.getContainersToCleanupList(); - LOG.info("Waiting to get cleanup events.. cleanedConts: " + cleanedConts); cleanedConts += contsToClean.size(); - Thread.sleep(1000); } LOG.info("Got cleanup for " + contsToClean.get(0)); Assert.assertEquals(1, cleanedConts); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index f9d6c60479f..868c2bbce87 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -1259,7 +1259,7 @@ public class TestLeafQueue { assignment = a.assignContainers(clusterResource, node_2); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2), any(Priority.class), any(ResourceRequest.class), any(Container.class)); - assertEquals(0, app_0.getSchedulingOpportunities(priority)); // should reset + assertEquals(4, app_0.getSchedulingOpportunities(priority)); // should NOT reset assertEquals(2, app_0.getTotalRequiredResources(priority)); assertEquals(NodeType.OFF_SWITCH, assignment.getType()); @@ -1408,11 +1408,11 @@ public class TestLeafQueue { assertEquals(0, app_0.getSchedulingOpportunities(priority_2)); assertEquals(1, app_0.getTotalRequiredResources(priority_2)); - // Another off-switch, shouldn allocate OFF_SWITCH P1 + // Another off-switch, shouldn't allocate OFF_SWITCH P1 a.assignContainers(clusterResource, node_2); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_2), eq(priority_1), any(ResourceRequest.class), any(Container.class)); - assertEquals(0, app_0.getSchedulingOpportunities(priority_1)); + assertEquals(3, app_0.getSchedulingOpportunities(priority_1)); assertEquals(1, app_0.getTotalRequiredResources(priority_1)); verify(app_0, never()).allocate(any(NodeType.class), eq(node_2), eq(priority_2), any(ResourceRequest.class), any(Container.class)); @@ -1438,7 +1438,7 @@ public class TestLeafQueue { assertEquals(0, app_0.getTotalRequiredResources(priority_1)); verify(app_0).allocate(eq(NodeType.OFF_SWITCH), eq(node_1), eq(priority_2), any(ResourceRequest.class), any(Container.class)); - assertEquals(0, app_0.getSchedulingOpportunities(priority_2)); + assertEquals(1, app_0.getSchedulingOpportunities(priority_2)); assertEquals(0, app_0.getTotalRequiredResources(priority_2)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm index badd9155509..7215172b7f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/index.apt.vm @@ -49,7 +49,7 @@ MapReduce NextGen aka YARN aka MRv2 * {{{./WebApplicationProxy.html}Web Application Proxy}} - * {{{./CLIMiniCluster.html}CLI MiniCluster}} + * {{{../../hadoop-project-dist/hadoop-common/CLIMiniCluster.html}CLI MiniCluster}} - * {{{./EncryptedShuffle.html}Encrypted Shuffle}} + * {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/EncryptedShuffle.html}Encrypted Shuffle}}