Merge r1406415 through r1407703 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1407706 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
40fe1ffbaa
|
@ -272,10 +272,17 @@ Trunk (Unreleased)
|
||||||
HADOOP-8918. test-patch.sh is parsing modified files wrong.
|
HADOOP-8918. test-patch.sh is parsing modified files wrong.
|
||||||
(Raja Aluri via suresh)
|
(Raja Aluri via suresh)
|
||||||
|
|
||||||
|
HADOOP-8589 ViewFs tests fail when tests and home dirs are nested.
|
||||||
|
(sanjay Radia)
|
||||||
|
|
||||||
|
HADOOP-8974. TestDFVariations fails on Windows. (Chris Nauroth via suresh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||||
|
|
||||||
|
HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
|
||||||
|
|
||||||
Release 2.0.3-alpha - Unreleased
|
Release 2.0.3-alpha - Unreleased
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -285,6 +292,8 @@ Release 2.0.3-alpha - Unreleased
|
||||||
HADOOP-8597. Permit FsShell's text command to read Avro files.
|
HADOOP-8597. Permit FsShell's text command to read Avro files.
|
||||||
(Ivan Vladimirov Ivanov via cutting)
|
(Ivan Vladimirov Ivanov via cutting)
|
||||||
|
|
||||||
|
HADOOP-9020. Add a SASL PLAIN server (daryn via bobby)
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
|
||||||
HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR.
|
HADOOP-8789. Tests setLevel(Level.OFF) should be Level.ERROR.
|
||||||
|
@ -342,6 +351,16 @@ Release 2.0.3-alpha - Unreleased
|
||||||
HADOOP-9010. Map UGI authenticationMethod to RPC authMethod (daryn via
|
HADOOP-9010. Map UGI authenticationMethod to RPC authMethod (daryn via
|
||||||
bobby)
|
bobby)
|
||||||
|
|
||||||
|
HADOOP-9013. UGI should not hardcode loginUser's authenticationType (daryn
|
||||||
|
via bobby)
|
||||||
|
|
||||||
|
HADOOP-9014. Standardize creation of SaslRpcClients (daryn via bobby)
|
||||||
|
|
||||||
|
HADOOP-9015. Standardize creation of SaslRpcServers (daryn via bobby)
|
||||||
|
|
||||||
|
HADOOP-8860. Split MapReduce and YARN sections in documentation navigation.
|
||||||
|
(tomwhite via tucu)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
|
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
|
||||||
|
@ -400,6 +419,8 @@ Release 2.0.3-alpha - Unreleased
|
||||||
|
|
||||||
HADOOP-9012. IPC Client sends wrong connection context (daryn via bobby)
|
HADOOP-9012. IPC Client sends wrong connection context (daryn via bobby)
|
||||||
|
|
||||||
|
HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -99,6 +99,13 @@ log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
|
||||||
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
|
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
|
||||||
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||||
|
|
||||||
|
#
|
||||||
|
# HDFS block state change log from block manager
|
||||||
|
#
|
||||||
|
# Uncomment the following to suppress normal block state change
|
||||||
|
# messages from BlockManager in NameNode.
|
||||||
|
#log4j.logger.BlockStateChange=WARN
|
||||||
|
|
||||||
#
|
#
|
||||||
#Security appender
|
#Security appender
|
||||||
#
|
#
|
||||||
|
|
|
@ -184,5 +184,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
||||||
*/
|
*/
|
||||||
public static final String KERBEROS_TICKET_CACHE_PATH =
|
public static final String KERBEROS_TICKET_CACHE_PATH =
|
||||||
"hadoop.security.kerberos.ticket.cache.path";
|
"hadoop.security.kerberos.ticket.cache.path";
|
||||||
}
|
|
||||||
|
|
||||||
|
public static final String HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY =
|
||||||
|
"hadoop.security.uid.cache.secs";
|
||||||
|
|
||||||
|
public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
|
||||||
|
4*60*60; // 4 hours
|
||||||
|
|
||||||
|
}
|
|
@ -125,6 +125,11 @@ public FsStatus getFsStatus() throws IOException {
|
||||||
public FsServerDefaults getServerDefaults() throws IOException {
|
public FsServerDefaults getServerDefaults() throws IOException {
|
||||||
return fsImpl.getServerDefaults();
|
return fsImpl.getServerDefaults();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Path getHomeDirectory() {
|
||||||
|
return fsImpl.getHomeDirectory();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int getUriDefaultPort() {
|
public int getUriDefaultPort() {
|
||||||
|
|
|
@ -153,12 +153,6 @@ public Path getResolvedQualifiedPath(final Path f)
|
||||||
return makeQualified(
|
return makeQualified(
|
||||||
new Path(chRootPathPartString + f.toUri().toString()));
|
new Path(chRootPathPartString + f.toUri().toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public Path getHomeDirectory() {
|
|
||||||
return new Path("/user/"+System.getProperty("user.name")).makeQualified(
|
|
||||||
getUri(), null);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Path getWorkingDirectory() {
|
public Path getWorkingDirectory() {
|
||||||
|
|
|
@ -256,8 +256,9 @@ public Path getHomeDirectory() {
|
||||||
if (base == null) {
|
if (base == null) {
|
||||||
base = "/user";
|
base = "/user";
|
||||||
}
|
}
|
||||||
homeDir =
|
homeDir = (base.equals("/") ?
|
||||||
this.makeQualified(new Path(base + "/" + ugi.getShortUserName()));
|
this.makeQualified(new Path(base + ugi.getShortUserName())):
|
||||||
|
this.makeQualified(new Path(base + "/" + ugi.getShortUserName())));
|
||||||
}
|
}
|
||||||
return homeDir;
|
return homeDir;
|
||||||
}
|
}
|
||||||
|
|
|
@ -248,8 +248,9 @@ public Path getHomeDirectory() {
|
||||||
if (base == null) {
|
if (base == null) {
|
||||||
base = "/user";
|
base = "/user";
|
||||||
}
|
}
|
||||||
homeDir =
|
homeDir = (base.equals("/") ?
|
||||||
this.makeQualified(new Path(base + "/" + ugi.getShortUserName()));
|
this.makeQualified(new Path(base + ugi.getShortUserName())):
|
||||||
|
this.makeQualified(new Path(base + "/" + ugi.getShortUserName())));
|
||||||
}
|
}
|
||||||
return homeDir;
|
return homeDir;
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,7 +120,7 @@ static FileInputStream forceSecureOpenForRead(File f, String expectedOwner,
|
||||||
FileInputStream fis = new FileInputStream(f);
|
FileInputStream fis = new FileInputStream(f);
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
try {
|
||||||
Stat stat = NativeIO.fstat(fis.getFD());
|
Stat stat = NativeIO.getFstat(fis.getFD());
|
||||||
checkStat(f, stat.getOwner(), stat.getGroup(), expectedOwner,
|
checkStat(f, stat.getOwner(), stat.getGroup(), expectedOwner,
|
||||||
expectedGroup);
|
expectedGroup);
|
||||||
success = true;
|
success = true;
|
||||||
|
|
|
@ -19,8 +19,13 @@
|
||||||
|
|
||||||
import java.io.FileDescriptor;
|
import java.io.FileDescriptor;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -30,6 +35,8 @@
|
||||||
* These functions should generally be used alongside a fallback to another
|
* These functions should generally be used alongside a fallback to another
|
||||||
* more portable mechanism.
|
* more portable mechanism.
|
||||||
*/
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Unstable
|
||||||
public class NativeIO {
|
public class NativeIO {
|
||||||
// Flags for open() call from bits/fcntl.h
|
// Flags for open() call from bits/fcntl.h
|
||||||
public static final int O_RDONLY = 00;
|
public static final int O_RDONLY = 00;
|
||||||
|
@ -86,6 +93,8 @@ public class NativeIO {
|
||||||
"hadoop.workaround.non.threadsafe.getpwuid";
|
"hadoop.workaround.non.threadsafe.getpwuid";
|
||||||
static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = false;
|
static final boolean WORKAROUND_NON_THREADSAFE_CALLS_DEFAULT = false;
|
||||||
|
|
||||||
|
private static long cacheTimeout = -1;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
if (NativeCodeLoader.isNativeCodeLoaded()) {
|
||||||
try {
|
try {
|
||||||
|
@ -96,6 +105,14 @@ public class NativeIO {
|
||||||
|
|
||||||
initNative();
|
initNative();
|
||||||
nativeLoaded = true;
|
nativeLoaded = true;
|
||||||
|
|
||||||
|
cacheTimeout = conf.getLong(
|
||||||
|
CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_KEY,
|
||||||
|
CommonConfigurationKeys.HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT) *
|
||||||
|
1000;
|
||||||
|
LOG.debug("Initialized cache for IDs to User/Group mapping with a" +
|
||||||
|
" cache timeout of " + cacheTimeout/1000 + " seconds.");
|
||||||
|
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
// This can happen if the user has an older version of libhadoop.so
|
// This can happen if the user has an older version of libhadoop.so
|
||||||
// installed - in this case we can continue without native IO
|
// installed - in this case we can continue without native IO
|
||||||
|
@ -115,7 +132,7 @@ public static boolean isAvailable() {
|
||||||
/** Wrapper around open(2) */
|
/** Wrapper around open(2) */
|
||||||
public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
|
public static native FileDescriptor open(String path, int flags, int mode) throws IOException;
|
||||||
/** Wrapper around fstat(2) */
|
/** Wrapper around fstat(2) */
|
||||||
public static native Stat fstat(FileDescriptor fd) throws IOException;
|
private static native Stat fstat(FileDescriptor fd) throws IOException;
|
||||||
/** Wrapper around chmod(2) */
|
/** Wrapper around chmod(2) */
|
||||||
public static native void chmod(String path, int mode) throws IOException;
|
public static native void chmod(String path, int mode) throws IOException;
|
||||||
|
|
||||||
|
@ -176,6 +193,7 @@ public static void syncFileRangeIfPossible(
|
||||||
* Result type of the fstat call
|
* Result type of the fstat call
|
||||||
*/
|
*/
|
||||||
public static class Stat {
|
public static class Stat {
|
||||||
|
private int ownerId, groupId;
|
||||||
private String owner, group;
|
private String owner, group;
|
||||||
private int mode;
|
private int mode;
|
||||||
|
|
||||||
|
@ -196,9 +214,9 @@ public static class Stat {
|
||||||
public static final int S_IWUSR = 0000200; /* write permission, owner */
|
public static final int S_IWUSR = 0000200; /* write permission, owner */
|
||||||
public static final int S_IXUSR = 0000100; /* execute/search permission, owner */
|
public static final int S_IXUSR = 0000100; /* execute/search permission, owner */
|
||||||
|
|
||||||
Stat(String owner, String group, int mode) {
|
Stat(int ownerId, int groupId, int mode) {
|
||||||
this.owner = owner;
|
this.ownerId = ownerId;
|
||||||
this.group = group;
|
this.groupId = groupId;
|
||||||
this.mode = mode;
|
this.mode = mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,4 +236,61 @@ public int getMode() {
|
||||||
return mode;
|
return mode;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static native String getUserName(int uid) throws IOException;
|
||||||
|
|
||||||
|
static native String getGroupName(int uid) throws IOException;
|
||||||
|
|
||||||
|
private static class CachedName {
|
||||||
|
final long timestamp;
|
||||||
|
final String name;
|
||||||
|
|
||||||
|
public CachedName(String name, long timestamp) {
|
||||||
|
this.name = name;
|
||||||
|
this.timestamp = timestamp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final Map<Integer, CachedName> USER_ID_NAME_CACHE =
|
||||||
|
new ConcurrentHashMap<Integer, CachedName>();
|
||||||
|
|
||||||
|
private static final Map<Integer, CachedName> GROUP_ID_NAME_CACHE =
|
||||||
|
new ConcurrentHashMap<Integer, CachedName>();
|
||||||
|
|
||||||
|
private enum IdCache { USER, GROUP }
|
||||||
|
|
||||||
|
private static String getName(IdCache domain, int id) throws IOException {
|
||||||
|
Map<Integer, CachedName> idNameCache = (domain == IdCache.USER)
|
||||||
|
? USER_ID_NAME_CACHE : GROUP_ID_NAME_CACHE;
|
||||||
|
String name;
|
||||||
|
CachedName cachedName = idNameCache.get(id);
|
||||||
|
long now = System.currentTimeMillis();
|
||||||
|
if (cachedName != null && (cachedName.timestamp + cacheTimeout) > now) {
|
||||||
|
name = cachedName.name;
|
||||||
|
} else {
|
||||||
|
name = (domain == IdCache.USER) ? getUserName(id) : getGroupName(id);
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
String type = (domain == IdCache.USER) ? "UserName" : "GroupName";
|
||||||
|
LOG.debug("Got " + type + " " + name + " for ID " + id +
|
||||||
|
" from the native implementation");
|
||||||
|
}
|
||||||
|
cachedName = new CachedName(name, now);
|
||||||
|
idNameCache.put(id, cachedName);
|
||||||
|
}
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the file stat for a file descriptor.
|
||||||
|
*
|
||||||
|
* @param fd file descriptor.
|
||||||
|
* @return the file descriptor file stat.
|
||||||
|
* @throws IOException thrown if there was an IO error while obtaining the file stat.
|
||||||
|
*/
|
||||||
|
public static Stat getFstat(FileDescriptor fd) throws IOException {
|
||||||
|
Stat stat = fstat(fd);
|
||||||
|
stat.owner = getName(IdCache.USER, stat.ownerId);
|
||||||
|
stat.group = getName(IdCache.GROUP, stat.groupId);
|
||||||
|
return stat;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,7 @@
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
|
|
||||||
|
import javax.security.auth.callback.CallbackHandler;
|
||||||
import javax.security.sasl.Sasl;
|
import javax.security.sasl.Sasl;
|
||||||
import javax.security.sasl.SaslException;
|
import javax.security.sasl.SaslException;
|
||||||
import javax.security.sasl.SaslServer;
|
import javax.security.sasl.SaslServer;
|
||||||
|
@ -87,6 +88,7 @@
|
||||||
import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
|
import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
|
import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.apache.hadoop.security.authorize.PolicyProvider;
|
import org.apache.hadoop.security.authorize.PolicyProvider;
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
|
@ -1078,7 +1080,6 @@ public class Connection {
|
||||||
|
|
||||||
IpcConnectionContextProto connectionContext;
|
IpcConnectionContextProto connectionContext;
|
||||||
String protocolName;
|
String protocolName;
|
||||||
boolean useSasl;
|
|
||||||
SaslServer saslServer;
|
SaslServer saslServer;
|
||||||
private AuthMethod authMethod;
|
private AuthMethod authMethod;
|
||||||
private boolean saslContextEstablished;
|
private boolean saslContextEstablished;
|
||||||
|
@ -1194,49 +1195,6 @@ private void saslReadAndProcess(byte[] saslToken) throws IOException,
|
||||||
if (!saslContextEstablished) {
|
if (!saslContextEstablished) {
|
||||||
byte[] replyToken = null;
|
byte[] replyToken = null;
|
||||||
try {
|
try {
|
||||||
if (saslServer == null) {
|
|
||||||
switch (authMethod) {
|
|
||||||
case DIGEST:
|
|
||||||
if (secretManager == null) {
|
|
||||||
throw new AccessControlException(
|
|
||||||
"Server is not configured to do DIGEST authentication.");
|
|
||||||
}
|
|
||||||
secretManager.checkAvailableForRead();
|
|
||||||
saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
|
|
||||||
.getMechanismName(), null, SaslRpcServer.SASL_DEFAULT_REALM,
|
|
||||||
SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(
|
|
||||||
secretManager, this));
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
UserGroupInformation current = UserGroupInformation
|
|
||||||
.getCurrentUser();
|
|
||||||
String fullName = current.getUserName();
|
|
||||||
if (LOG.isDebugEnabled())
|
|
||||||
LOG.debug("Kerberos principal name is " + fullName);
|
|
||||||
final String names[] = SaslRpcServer.splitKerberosName(fullName);
|
|
||||||
if (names.length != 3) {
|
|
||||||
throw new AccessControlException(
|
|
||||||
"Kerberos principal name does NOT have the expected "
|
|
||||||
+ "hostname part: " + fullName);
|
|
||||||
}
|
|
||||||
current.doAs(new PrivilegedExceptionAction<Object>() {
|
|
||||||
@Override
|
|
||||||
public Object run() throws SaslException {
|
|
||||||
saslServer = Sasl.createSaslServer(AuthMethod.KERBEROS
|
|
||||||
.getMechanismName(), names[0], names[1],
|
|
||||||
SaslRpcServer.SASL_PROPS, new SaslGssCallbackHandler());
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (saslServer == null)
|
|
||||||
throw new AccessControlException(
|
|
||||||
"Unable to find SASL server implementation for "
|
|
||||||
+ authMethod.getMechanismName());
|
|
||||||
if (LOG.isDebugEnabled())
|
|
||||||
LOG.debug("Created SASL server with mechanism = "
|
|
||||||
+ authMethod.getMechanismName());
|
|
||||||
}
|
|
||||||
if (LOG.isDebugEnabled())
|
if (LOG.isDebugEnabled())
|
||||||
LOG.debug("Have read input token of size " + saslToken.length
|
LOG.debug("Have read input token of size " + saslToken.length
|
||||||
+ " for processing by saslServer.evaluateResponse()");
|
+ " for processing by saslServer.evaluateResponse()");
|
||||||
|
@ -1375,38 +1333,27 @@ public int readAndProcess() throws IOException, InterruptedException {
|
||||||
dataLengthBuffer.clear();
|
dataLengthBuffer.clear();
|
||||||
if (authMethod == null) {
|
if (authMethod == null) {
|
||||||
throw new IOException("Unable to read authentication method");
|
throw new IOException("Unable to read authentication method");
|
||||||
}
|
}
|
||||||
|
boolean useSaslServer = isSecurityEnabled;
|
||||||
final boolean clientUsingSasl;
|
final boolean clientUsingSasl;
|
||||||
switch (authMethod) {
|
switch (authMethod) {
|
||||||
case SIMPLE: { // no sasl for simple
|
case SIMPLE: { // no sasl for simple
|
||||||
if (isSecurityEnabled) {
|
|
||||||
AccessControlException ae = new AccessControlException("Authorization ("
|
|
||||||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
|
|
||||||
+ ") is enabled but authentication ("
|
|
||||||
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
|
|
||||||
+ ") is configured as simple. Please configure another method "
|
|
||||||
+ "like kerberos or digest.");
|
|
||||||
setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
|
|
||||||
null, ae.getClass().getName(), ae.getMessage());
|
|
||||||
responder.doRespond(authFailedCall);
|
|
||||||
throw ae;
|
|
||||||
}
|
|
||||||
clientUsingSasl = false;
|
clientUsingSasl = false;
|
||||||
useSasl = false;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case DIGEST: {
|
case DIGEST: { // always allow tokens if there's a secret manager
|
||||||
|
useSaslServer |= (secretManager != null);
|
||||||
clientUsingSasl = true;
|
clientUsingSasl = true;
|
||||||
useSasl = (secretManager != null);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default: {
|
default: {
|
||||||
clientUsingSasl = true;
|
clientUsingSasl = true;
|
||||||
useSasl = isSecurityEnabled;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (clientUsingSasl && !useSasl) {
|
if (useSaslServer) {
|
||||||
|
saslServer = createSaslServer(authMethod);
|
||||||
|
} else if (clientUsingSasl) { // security is off
|
||||||
doSaslReply(SaslStatus.SUCCESS, new IntWritable(
|
doSaslReply(SaslStatus.SUCCESS, new IntWritable(
|
||||||
SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
|
SaslRpcServer.SWITCH_TO_SIMPLE_AUTH), null, null);
|
||||||
authMethod = AuthMethod.SIMPLE;
|
authMethod = AuthMethod.SIMPLE;
|
||||||
|
@ -1448,7 +1395,7 @@ public int readAndProcess() throws IOException, InterruptedException {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
boolean isHeaderRead = connectionContextRead;
|
boolean isHeaderRead = connectionContextRead;
|
||||||
if (useSasl) {
|
if (saslServer != null) {
|
||||||
saslReadAndProcess(data.array());
|
saslReadAndProcess(data.array());
|
||||||
} else {
|
} else {
|
||||||
processOneRpc(data.array());
|
processOneRpc(data.array());
|
||||||
|
@ -1462,6 +1409,84 @@ public int readAndProcess() throws IOException, InterruptedException {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private SaslServer createSaslServer(AuthMethod authMethod)
|
||||||
|
throws IOException {
|
||||||
|
try {
|
||||||
|
return createSaslServerInternal(authMethod);
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
final String ioeClass = ioe.getClass().getName();
|
||||||
|
final String ioeMessage = ioe.getLocalizedMessage();
|
||||||
|
if (authMethod == AuthMethod.SIMPLE) {
|
||||||
|
setupResponse(authFailedResponse, authFailedCall,
|
||||||
|
RpcStatusProto.FATAL, null, ioeClass, ioeMessage);
|
||||||
|
responder.doRespond(authFailedCall);
|
||||||
|
} else {
|
||||||
|
doSaslReply(SaslStatus.ERROR, null, ioeClass, ioeMessage);
|
||||||
|
}
|
||||||
|
throw ioe;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private SaslServer createSaslServerInternal(AuthMethod authMethod)
|
||||||
|
throws IOException {
|
||||||
|
SaslServer saslServer = null;
|
||||||
|
String hostname = null;
|
||||||
|
String saslProtocol = null;
|
||||||
|
CallbackHandler saslCallback = null;
|
||||||
|
|
||||||
|
switch (authMethod) {
|
||||||
|
case SIMPLE: {
|
||||||
|
throw new AccessControlException("Authorization ("
|
||||||
|
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION
|
||||||
|
+ ") is enabled but authentication ("
|
||||||
|
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
|
||||||
|
+ ") is configured as simple. Please configure another method "
|
||||||
|
+ "like kerberos or digest.");
|
||||||
|
}
|
||||||
|
case DIGEST: {
|
||||||
|
if (secretManager == null) {
|
||||||
|
throw new AccessControlException(
|
||||||
|
"Server is not configured to do DIGEST authentication.");
|
||||||
|
}
|
||||||
|
secretManager.checkAvailableForRead();
|
||||||
|
hostname = SaslRpcServer.SASL_DEFAULT_REALM;
|
||||||
|
saslCallback = new SaslDigestCallbackHandler(secretManager, this);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case KERBEROS: {
|
||||||
|
String fullName = UserGroupInformation.getCurrentUser().getUserName();
|
||||||
|
if (LOG.isDebugEnabled())
|
||||||
|
LOG.debug("Kerberos principal name is " + fullName);
|
||||||
|
KerberosName krbName = new KerberosName(fullName);
|
||||||
|
hostname = krbName.getHostName();
|
||||||
|
if (hostname == null) {
|
||||||
|
throw new AccessControlException(
|
||||||
|
"Kerberos principal name does NOT have the expected "
|
||||||
|
+ "hostname part: " + fullName);
|
||||||
|
}
|
||||||
|
saslProtocol = krbName.getServiceName();
|
||||||
|
saslCallback = new SaslGssCallbackHandler();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
throw new AccessControlException(
|
||||||
|
"Server does not support SASL " + authMethod);
|
||||||
|
}
|
||||||
|
|
||||||
|
String mechanism = authMethod.getMechanismName();
|
||||||
|
saslServer = Sasl.createSaslServer(
|
||||||
|
mechanism, saslProtocol, hostname,
|
||||||
|
SaslRpcServer.SASL_PROPS, saslCallback);
|
||||||
|
if (saslServer == null) {
|
||||||
|
throw new AccessControlException(
|
||||||
|
"Unable to find SASL server implementation for " + mechanism);
|
||||||
|
}
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Created SASL server with mechanism = " + mechanism);
|
||||||
|
}
|
||||||
|
return saslServer;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Try to set up the response to indicate that the client version
|
* Try to set up the response to indicate that the client version
|
||||||
* is incompatible with the server. This can contain special-case
|
* is incompatible with the server. This can contain special-case
|
||||||
|
@ -1523,7 +1548,7 @@ private void processConnectionContext(byte[] buf) throws IOException {
|
||||||
.getProtocol() : null;
|
.getProtocol() : null;
|
||||||
|
|
||||||
UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
|
UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
|
||||||
if (!useSasl) {
|
if (saslServer == null) {
|
||||||
user = protocolUser;
|
user = protocolUser;
|
||||||
if (user != null) {
|
if (user != null) {
|
||||||
user.setAuthenticationMethod(AuthMethod.SIMPLE);
|
user.setAuthenticationMethod(AuthMethod.SIMPLE);
|
||||||
|
@ -1999,7 +2024,7 @@ private void setupResponseOldVersionFatal(ByteArrayOutputStream response,
|
||||||
|
|
||||||
private void wrapWithSasl(ByteArrayOutputStream response, Call call)
|
private void wrapWithSasl(ByteArrayOutputStream response, Call call)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (call.connection.useSasl) {
|
if (call.connection.saslServer != null) {
|
||||||
byte[] token = response.toByteArray();
|
byte[] token = response.toByteArray();
|
||||||
// synchronization may be needed since there can be multiple Handler
|
// synchronization may be needed since there can be multiple Handler
|
||||||
// threads using saslServer to wrap responses.
|
// threads using saslServer to wrap responses.
|
||||||
|
|
|
@ -0,0 +1,159 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.security;
|
||||||
|
|
||||||
|
import java.security.Provider;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import javax.security.auth.callback.*;
|
||||||
|
import javax.security.sasl.AuthorizeCallback;
|
||||||
|
import javax.security.sasl.Sasl;
|
||||||
|
import javax.security.sasl.SaslException;
|
||||||
|
import javax.security.sasl.SaslServer;
|
||||||
|
import javax.security.sasl.SaslServerFactory;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public class SaslPlainServer implements SaslServer {
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
public static class SecurityProvider extends Provider {
|
||||||
|
public SecurityProvider() {
|
||||||
|
super("SaslPlainServer", 1.0, "SASL PLAIN Authentication Server");
|
||||||
|
put("SaslServerFactory.PLAIN",
|
||||||
|
SaslPlainServerFactory.class.getName());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class SaslPlainServerFactory implements SaslServerFactory {
|
||||||
|
@Override
|
||||||
|
public SaslServer createSaslServer(String mechanism, String protocol,
|
||||||
|
String serverName, Map<String,?> props, CallbackHandler cbh)
|
||||||
|
throws SaslException {
|
||||||
|
return "PLAIN".equals(mechanism) ? new SaslPlainServer(cbh) : null;
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public String[] getMechanismNames(Map<String,?> props){
|
||||||
|
return (props == null) || "false".equals(props.get(Sasl.POLICY_NOPLAINTEXT))
|
||||||
|
? new String[]{"PLAIN"}
|
||||||
|
: new String[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private CallbackHandler cbh;
|
||||||
|
private boolean completed;
|
||||||
|
private String authz;
|
||||||
|
|
||||||
|
SaslPlainServer(CallbackHandler callback) {
|
||||||
|
this.cbh = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getMechanismName() {
|
||||||
|
return "PLAIN";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] evaluateResponse(byte[] response) throws SaslException {
|
||||||
|
if (completed) {
|
||||||
|
throw new IllegalStateException("PLAIN authentication has completed");
|
||||||
|
}
|
||||||
|
if (response == null) {
|
||||||
|
throw new IllegalArgumentException("Received null response");
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
String payload;
|
||||||
|
try {
|
||||||
|
payload = new String(response, "UTF-8");
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new IllegalArgumentException("Received corrupt response", e);
|
||||||
|
}
|
||||||
|
// [ authz, authn, password ]
|
||||||
|
String[] parts = payload.split("\u0000", 3);
|
||||||
|
if (parts.length != 3) {
|
||||||
|
throw new IllegalArgumentException("Received corrupt response");
|
||||||
|
}
|
||||||
|
if (parts[0].isEmpty()) { // authz = authn
|
||||||
|
parts[0] = parts[1];
|
||||||
|
}
|
||||||
|
|
||||||
|
NameCallback nc = new NameCallback("SASL PLAIN");
|
||||||
|
nc.setName(parts[1]);
|
||||||
|
PasswordCallback pc = new PasswordCallback("SASL PLAIN", false);
|
||||||
|
pc.setPassword(parts[2].toCharArray());
|
||||||
|
AuthorizeCallback ac = new AuthorizeCallback(parts[1], parts[0]);
|
||||||
|
cbh.handle(new Callback[]{nc, pc, ac});
|
||||||
|
if (ac.isAuthorized()) {
|
||||||
|
authz = ac.getAuthorizedID();
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
throw new SaslException("PLAIN auth failed: " + e.getMessage());
|
||||||
|
} finally {
|
||||||
|
completed = true;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void throwIfNotComplete() {
|
||||||
|
if (!completed) {
|
||||||
|
throw new IllegalStateException("PLAIN authentication not completed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isComplete() {
|
||||||
|
return completed;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getAuthorizationID() {
|
||||||
|
throwIfNotComplete();
|
||||||
|
return authz;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getNegotiatedProperty(String propName) {
|
||||||
|
throwIfNotComplete();
|
||||||
|
return Sasl.QOP.equals(propName) ? "auth" : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] wrap(byte[] outgoing, int offset, int len)
|
||||||
|
throws SaslException {
|
||||||
|
throwIfNotComplete();
|
||||||
|
throw new IllegalStateException(
|
||||||
|
"PLAIN supports neither integrity nor privacy");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] unwrap(byte[] incoming, int offset, int len)
|
||||||
|
throws SaslException {
|
||||||
|
throwIfNotComplete();
|
||||||
|
throw new IllegalStateException(
|
||||||
|
"PLAIN supports neither integrity nor privacy");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void dispose() throws SaslException {
|
||||||
|
cbh = null;
|
||||||
|
authz = null;
|
||||||
|
}
|
||||||
|
}
|
|
@ -25,6 +25,7 @@
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.security.auth.callback.Callback;
|
import javax.security.auth.callback.Callback;
|
||||||
import javax.security.auth.callback.CallbackHandler;
|
import javax.security.auth.callback.CallbackHandler;
|
||||||
|
@ -45,6 +46,7 @@
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
|
import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
|
||||||
|
import org.apache.hadoop.security.authentication.util.KerberosName;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
|
|
||||||
|
@ -69,40 +71,48 @@ public class SaslRpcClient {
|
||||||
public SaslRpcClient(AuthMethod method,
|
public SaslRpcClient(AuthMethod method,
|
||||||
Token<? extends TokenIdentifier> token, String serverPrincipal)
|
Token<? extends TokenIdentifier> token, String serverPrincipal)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
String saslUser = null;
|
||||||
|
String saslProtocol = null;
|
||||||
|
String saslServerName = null;
|
||||||
|
Map<String, String> saslProperties = SaslRpcServer.SASL_PROPS;
|
||||||
|
CallbackHandler saslCallback = null;
|
||||||
|
|
||||||
switch (method) {
|
switch (method) {
|
||||||
case DIGEST:
|
case DIGEST: {
|
||||||
if (LOG.isDebugEnabled())
|
saslServerName = SaslRpcServer.SASL_DEFAULT_REALM;
|
||||||
LOG.debug("Creating SASL " + AuthMethod.DIGEST.getMechanismName()
|
saslCallback = new SaslClientCallbackHandler(token);
|
||||||
+ " client to authenticate to service at " + token.getService());
|
break;
|
||||||
saslClient = Sasl.createSaslClient(new String[] { AuthMethod.DIGEST
|
|
||||||
.getMechanismName() }, null, null, SaslRpcServer.SASL_DEFAULT_REALM,
|
|
||||||
SaslRpcServer.SASL_PROPS, new SaslClientCallbackHandler(token));
|
|
||||||
break;
|
|
||||||
case KERBEROS:
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug("Creating SASL " + AuthMethod.KERBEROS.getMechanismName()
|
|
||||||
+ " client. Server's Kerberos principal name is "
|
|
||||||
+ serverPrincipal);
|
|
||||||
}
|
}
|
||||||
if (serverPrincipal == null || serverPrincipal.length() == 0) {
|
case KERBEROS: {
|
||||||
throw new IOException(
|
if (serverPrincipal == null || serverPrincipal.isEmpty()) {
|
||||||
"Failed to specify server's Kerberos principal name");
|
throw new IOException(
|
||||||
|
"Failed to specify server's Kerberos principal name");
|
||||||
|
}
|
||||||
|
KerberosName name = new KerberosName(serverPrincipal);
|
||||||
|
saslProtocol = name.getServiceName();
|
||||||
|
saslServerName = name.getHostName();
|
||||||
|
if (saslServerName == null) {
|
||||||
|
throw new IOException(
|
||||||
|
"Kerberos principal name does NOT have the expected hostname part: "
|
||||||
|
+ serverPrincipal);
|
||||||
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
String names[] = SaslRpcServer.splitKerberosName(serverPrincipal);
|
default:
|
||||||
if (names.length != 3) {
|
throw new IOException("Unknown authentication method " + method);
|
||||||
throw new IOException(
|
|
||||||
"Kerberos principal name does NOT have the expected hostname part: "
|
|
||||||
+ serverPrincipal);
|
|
||||||
}
|
|
||||||
saslClient = Sasl.createSaslClient(new String[] { AuthMethod.KERBEROS
|
|
||||||
.getMechanismName() }, null, names[0], names[1],
|
|
||||||
SaslRpcServer.SASL_PROPS, null);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw new IOException("Unknown authentication method " + method);
|
|
||||||
}
|
}
|
||||||
if (saslClient == null)
|
|
||||||
|
String mechanism = method.getMechanismName();
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Creating SASL " + mechanism
|
||||||
|
+ " client to authenticate to service at " + saslServerName);
|
||||||
|
}
|
||||||
|
saslClient = Sasl.createSaslClient(
|
||||||
|
new String[] { mechanism }, saslUser, saslProtocol, saslServerName,
|
||||||
|
saslProperties, saslCallback);
|
||||||
|
if (saslClient == null) {
|
||||||
throw new IOException("Unable to find SASL client implementation");
|
throw new IOException("Unable to find SASL client implementation");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void readStatus(DataInputStream inStream) throws IOException {
|
private static void readStatus(DataInputStream inStream) throws IOException {
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
import java.io.DataOutput;
|
import java.io.DataOutput;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.security.Security;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
|
@ -89,6 +90,7 @@ public static void init(Configuration conf) {
|
||||||
|
|
||||||
SASL_PROPS.put(Sasl.QOP, saslQOP.getSaslQop());
|
SASL_PROPS.put(Sasl.QOP, saslQOP.getSaslQop());
|
||||||
SASL_PROPS.put(Sasl.SERVER_AUTH, "true");
|
SASL_PROPS.put(Sasl.SERVER_AUTH, "true");
|
||||||
|
Security.addProvider(new SaslPlainServer.SecurityProvider());
|
||||||
}
|
}
|
||||||
|
|
||||||
static String encodeIdentifier(byte[] identifier) {
|
static String encodeIdentifier(byte[] identifier) {
|
||||||
|
@ -138,7 +140,8 @@ private SaslStatus(int state) {
|
||||||
public static enum AuthMethod {
|
public static enum AuthMethod {
|
||||||
SIMPLE((byte) 80, ""),
|
SIMPLE((byte) 80, ""),
|
||||||
KERBEROS((byte) 81, "GSSAPI"),
|
KERBEROS((byte) 81, "GSSAPI"),
|
||||||
DIGEST((byte) 82, "DIGEST-MD5");
|
DIGEST((byte) 82, "DIGEST-MD5"),
|
||||||
|
PLAIN((byte) 83, "PLAIN");
|
||||||
|
|
||||||
/** The code for this method. */
|
/** The code for this method. */
|
||||||
public final byte code;
|
public final byte code;
|
||||||
|
|
|
@ -238,14 +238,17 @@ private static synchronized void initialize(Configuration conf, boolean skipRule
|
||||||
*/
|
*/
|
||||||
private static synchronized void initUGI(Configuration conf) {
|
private static synchronized void initUGI(Configuration conf) {
|
||||||
AuthenticationMethod auth = SecurityUtil.getAuthenticationMethod(conf);
|
AuthenticationMethod auth = SecurityUtil.getAuthenticationMethod(conf);
|
||||||
if (auth == AuthenticationMethod.SIMPLE) {
|
switch (auth) {
|
||||||
useKerberos = false;
|
case SIMPLE:
|
||||||
} else if (auth == AuthenticationMethod.KERBEROS) {
|
useKerberos = false;
|
||||||
useKerberos = true;
|
break;
|
||||||
} else {
|
case KERBEROS:
|
||||||
throw new IllegalArgumentException("Invalid attribute value for " +
|
useKerberos = true;
|
||||||
HADOOP_SECURITY_AUTHENTICATION +
|
break;
|
||||||
" of " + auth);
|
default:
|
||||||
|
throw new IllegalArgumentException("Invalid attribute value for " +
|
||||||
|
HADOOP_SECURITY_AUTHENTICATION +
|
||||||
|
" of " + auth);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong(
|
kerberosMinSecondsBeforeRelogin = 1000L * conf.getLong(
|
||||||
|
@ -637,19 +640,20 @@ static UserGroupInformation getLoginUser() throws IOException {
|
||||||
try {
|
try {
|
||||||
Subject subject = new Subject();
|
Subject subject = new Subject();
|
||||||
LoginContext login;
|
LoginContext login;
|
||||||
|
AuthenticationMethod authenticationMethod;
|
||||||
if (isSecurityEnabled()) {
|
if (isSecurityEnabled()) {
|
||||||
|
authenticationMethod = AuthenticationMethod.KERBEROS;
|
||||||
login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
|
login = newLoginContext(HadoopConfiguration.USER_KERBEROS_CONFIG_NAME,
|
||||||
subject, new HadoopConfiguration());
|
subject, new HadoopConfiguration());
|
||||||
} else {
|
} else {
|
||||||
|
authenticationMethod = AuthenticationMethod.SIMPLE;
|
||||||
login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME,
|
login = newLoginContext(HadoopConfiguration.SIMPLE_CONFIG_NAME,
|
||||||
subject, new HadoopConfiguration());
|
subject, new HadoopConfiguration());
|
||||||
}
|
}
|
||||||
login.login();
|
login.login();
|
||||||
loginUser = new UserGroupInformation(subject);
|
loginUser = new UserGroupInformation(subject);
|
||||||
loginUser.setLogin(login);
|
loginUser.setLogin(login);
|
||||||
loginUser.setAuthenticationMethod(isSecurityEnabled() ?
|
loginUser.setAuthenticationMethod(authenticationMethod);
|
||||||
AuthenticationMethod.KERBEROS :
|
|
||||||
AuthenticationMethod.SIMPLE);
|
|
||||||
loginUser = new UserGroupInformation(login.getSubject());
|
loginUser = new UserGroupInformation(login.getSubject());
|
||||||
String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
|
String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
|
||||||
if (fileLocation != null) {
|
if (fileLocation != null) {
|
||||||
|
|
|
@ -72,16 +72,27 @@ static int workaround_non_threadsafe_calls(JNIEnv *env, jclass clazz) {
|
||||||
static void stat_init(JNIEnv *env, jclass nativeio_class) {
|
static void stat_init(JNIEnv *env, jclass nativeio_class) {
|
||||||
// Init Stat
|
// Init Stat
|
||||||
jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$Stat");
|
jclass clazz = (*env)->FindClass(env, "org/apache/hadoop/io/nativeio/NativeIO$Stat");
|
||||||
PASS_EXCEPTIONS(env);
|
if (!clazz) {
|
||||||
|
return; // exception has been raised
|
||||||
|
}
|
||||||
stat_clazz = (*env)->NewGlobalRef(env, clazz);
|
stat_clazz = (*env)->NewGlobalRef(env, clazz);
|
||||||
|
if (!stat_clazz) {
|
||||||
|
return; // exception has been raised
|
||||||
|
}
|
||||||
stat_ctor = (*env)->GetMethodID(env, stat_clazz, "<init>",
|
stat_ctor = (*env)->GetMethodID(env, stat_clazz, "<init>",
|
||||||
"(Ljava/lang/String;Ljava/lang/String;I)V");
|
"(III)V");
|
||||||
|
if (!stat_ctor) {
|
||||||
|
return; // exception has been raised
|
||||||
|
}
|
||||||
jclass obj_class = (*env)->FindClass(env, "java/lang/Object");
|
jclass obj_class = (*env)->FindClass(env, "java/lang/Object");
|
||||||
assert(obj_class != NULL);
|
if (!obj_class) {
|
||||||
|
return; // exception has been raised
|
||||||
|
}
|
||||||
jmethodID obj_ctor = (*env)->GetMethodID(env, obj_class,
|
jmethodID obj_ctor = (*env)->GetMethodID(env, obj_class,
|
||||||
"<init>", "()V");
|
"<init>", "()V");
|
||||||
assert(obj_ctor != NULL);
|
if (!obj_ctor) {
|
||||||
|
return; // exception has been raised
|
||||||
|
}
|
||||||
|
|
||||||
if (workaround_non_threadsafe_calls(env, nativeio_class)) {
|
if (workaround_non_threadsafe_calls(env, nativeio_class)) {
|
||||||
pw_lock_object = (*env)->NewObject(env, obj_class, obj_ctor);
|
pw_lock_object = (*env)->NewObject(env, obj_class, obj_ctor);
|
||||||
|
@ -158,8 +169,6 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_fstat(
|
||||||
JNIEnv *env, jclass clazz, jobject fd_object)
|
JNIEnv *env, jclass clazz, jobject fd_object)
|
||||||
{
|
{
|
||||||
jobject ret = NULL;
|
jobject ret = NULL;
|
||||||
char *pw_buf = NULL;
|
|
||||||
int pw_lock_locked = 0;
|
|
||||||
|
|
||||||
int fd = fd_get(env, fd_object);
|
int fd = fd_get(env, fd_object);
|
||||||
PASS_EXCEPTIONS_GOTO(env, cleanup);
|
PASS_EXCEPTIONS_GOTO(env, cleanup);
|
||||||
|
@ -171,71 +180,14 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_fstat(
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t pw_buflen = get_pw_buflen();
|
|
||||||
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
|
||||||
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pw_lock_object != NULL) {
|
|
||||||
if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) {
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
pw_lock_locked = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Grab username
|
|
||||||
struct passwd pwd, *pwdp;
|
|
||||||
while ((rc = getpwuid_r(s.st_uid, &pwd, pw_buf, pw_buflen, &pwdp)) != 0) {
|
|
||||||
if (rc != ERANGE) {
|
|
||||||
throw_ioe(env, rc);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
free(pw_buf);
|
|
||||||
pw_buflen *= 2;
|
|
||||||
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
|
||||||
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(pwdp == &pwd);
|
|
||||||
|
|
||||||
jstring jstr_username = (*env)->NewStringUTF(env, pwd.pw_name);
|
|
||||||
if (jstr_username == NULL) goto cleanup;
|
|
||||||
|
|
||||||
// Grab group
|
|
||||||
struct group grp, *grpp;
|
|
||||||
while ((rc = getgrgid_r(s.st_gid, &grp, pw_buf, pw_buflen, &grpp)) != 0) {
|
|
||||||
if (rc != ERANGE) {
|
|
||||||
throw_ioe(env, rc);
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
free(pw_buf);
|
|
||||||
pw_buflen *= 2;
|
|
||||||
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
|
||||||
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
|
||||||
goto cleanup;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert(grpp == &grp);
|
|
||||||
|
|
||||||
jstring jstr_groupname = (*env)->NewStringUTF(env, grp.gr_name);
|
|
||||||
PASS_EXCEPTIONS_GOTO(env, cleanup);
|
|
||||||
|
|
||||||
// Construct result
|
// Construct result
|
||||||
ret = (*env)->NewObject(env, stat_clazz, stat_ctor,
|
ret = (*env)->NewObject(env, stat_clazz, stat_ctor,
|
||||||
jstr_username, jstr_groupname, s.st_mode);
|
(jint)s.st_uid, (jint)s.st_gid, (jint)s.st_mode);
|
||||||
|
|
||||||
cleanup:
|
cleanup:
|
||||||
if (pw_buf != NULL) free(pw_buf);
|
|
||||||
if (pw_lock_locked) {
|
|
||||||
(*env)->MonitorExit(env, pw_lock_object);
|
|
||||||
}
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* public static native void posix_fadvise(
|
* public static native void posix_fadvise(
|
||||||
* FileDescriptor fd, long offset, long len, int flags);
|
* FileDescriptor fd, long offset, long len, int flags);
|
||||||
|
@ -385,6 +337,128 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_chmod(
|
||||||
(*env)->ReleaseStringUTFChars(env, j_path, path);
|
(*env)->ReleaseStringUTFChars(env, j_path, path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* static native String getUserName(int uid);
|
||||||
|
*/
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_org_apache_hadoop_io_nativeio_NativeIO_getUserName(JNIEnv *env,
|
||||||
|
jclass clazz, jint uid)
|
||||||
|
{
|
||||||
|
int pw_lock_locked = 0;
|
||||||
|
if (pw_lock_object != NULL) {
|
||||||
|
if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) {
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
pw_lock_locked = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
char *pw_buf = NULL;
|
||||||
|
int rc;
|
||||||
|
size_t pw_buflen = get_pw_buflen();
|
||||||
|
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
||||||
|
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab username
|
||||||
|
struct passwd pwd, *pwdp;
|
||||||
|
while ((rc = getpwuid_r((uid_t)uid, &pwd, pw_buf, pw_buflen, &pwdp)) != 0) {
|
||||||
|
if (rc != ERANGE) {
|
||||||
|
throw_ioe(env, rc);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
free(pw_buf);
|
||||||
|
pw_buflen *= 2;
|
||||||
|
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
||||||
|
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (pwdp == NULL) {
|
||||||
|
char msg[80];
|
||||||
|
snprintf(msg, sizeof(msg), "uid not found: %d", uid);
|
||||||
|
THROW(env, "java/io/IOException", msg);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
if (pwdp != &pwd) {
|
||||||
|
char msg[80];
|
||||||
|
snprintf(msg, sizeof(msg), "pwd pointer inconsistent with reference. uid: %d", uid);
|
||||||
|
THROW(env, "java/lang/IllegalStateException", msg);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
jstring jstr_username = (*env)->NewStringUTF(env, pwd.pw_name);
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
if (pw_lock_locked) {
|
||||||
|
(*env)->MonitorExit(env, pw_lock_object);
|
||||||
|
}
|
||||||
|
if (pw_buf != NULL) free(pw_buf);
|
||||||
|
return jstr_username;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* static native String getGroupName(int gid);
|
||||||
|
*/
|
||||||
|
JNIEXPORT jstring JNICALL
|
||||||
|
Java_org_apache_hadoop_io_nativeio_NativeIO_getGroupName(JNIEnv *env,
|
||||||
|
jclass clazz, jint gid)
|
||||||
|
{
|
||||||
|
int pw_lock_locked = 0;
|
||||||
|
|
||||||
|
if (pw_lock_object != NULL) {
|
||||||
|
if ((*env)->MonitorEnter(env, pw_lock_object) != JNI_OK) {
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
pw_lock_locked = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
char *pw_buf = NULL;
|
||||||
|
int rc;
|
||||||
|
size_t pw_buflen = get_pw_buflen();
|
||||||
|
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
||||||
|
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Grab group
|
||||||
|
struct group grp, *grpp;
|
||||||
|
while ((rc = getgrgid_r((uid_t)gid, &grp, pw_buf, pw_buflen, &grpp)) != 0) {
|
||||||
|
if (rc != ERANGE) {
|
||||||
|
throw_ioe(env, rc);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
free(pw_buf);
|
||||||
|
pw_buflen *= 2;
|
||||||
|
if ((pw_buf = malloc(pw_buflen)) == NULL) {
|
||||||
|
THROW(env, "java/lang/OutOfMemoryError", "Couldn't allocate memory for pw buffer");
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (grpp == NULL) {
|
||||||
|
char msg[80];
|
||||||
|
snprintf(msg, sizeof(msg), "gid not found: %d", gid);
|
||||||
|
THROW(env, "java/io/IOException", msg);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
if (grpp != &grp) {
|
||||||
|
char msg[80];
|
||||||
|
snprintf(msg, sizeof(msg), "pwd pointer inconsistent with reference. gid: %d", gid);
|
||||||
|
THROW(env, "java/lang/IllegalStateException", msg);
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
|
||||||
|
jstring jstr_groupname = (*env)->NewStringUTF(env, grp.gr_name);
|
||||||
|
PASS_EXCEPTIONS_GOTO(env, cleanup);
|
||||||
|
|
||||||
|
cleanup:
|
||||||
|
if (pw_lock_locked) {
|
||||||
|
(*env)->MonitorExit(env, pw_lock_object);
|
||||||
|
}
|
||||||
|
if (pw_buf != NULL) free(pw_buf);
|
||||||
|
return jstr_groupname;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Throw a java.IO.IOException, generating the message from errno.
|
* Throw a java.IO.IOException, generating the message from errno.
|
||||||
|
|
|
@ -214,6 +214,17 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.security.uid.cache.secs</name>
|
||||||
|
<value>14400</value>
|
||||||
|
<description>
|
||||||
|
This is the config controlling the validity of the entries in the cache
|
||||||
|
containing the userId to userName and groupId to groupName used by
|
||||||
|
NativeIO getFstat().
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.rpc.protection</name>
|
<name>hadoop.rpc.protection</name>
|
||||||
<value>authentication</value>
|
<value>authentication</value>
|
||||||
|
|
|
@ -61,19 +61,28 @@ public static byte[] getFileData(int numOfBlocks, long blockSize) {
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get testRootPath qualified for fSys
|
||||||
|
*/
|
||||||
public static Path getTestRootPath(FileSystem fSys) {
|
public static Path getTestRootPath(FileSystem fSys) {
|
||||||
return fSys.makeQualified(new Path(TEST_ROOT_DIR));
|
return fSys.makeQualified(new Path(TEST_ROOT_DIR));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get testRootPath + pathString qualified for fSys
|
||||||
|
*/
|
||||||
public static Path getTestRootPath(FileSystem fSys, String pathString) {
|
public static Path getTestRootPath(FileSystem fSys, String pathString) {
|
||||||
return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
|
return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// the getAbsolutexxx method is needed because the root test dir
|
// the getAbsolutexxx method is needed because the root test dir
|
||||||
// can be messed up by changing the working dir.
|
// can be messed up by changing the working dir since the TEST_ROOT_PATH
|
||||||
|
// is often relative to the working directory of process
|
||||||
|
// running the unit tests.
|
||||||
|
|
||||||
public static String getAbsoluteTestRootDir(FileSystem fSys)
|
static String getAbsoluteTestRootDir(FileSystem fSys)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// NOTE: can't cache because of different filesystems!
|
// NOTE: can't cache because of different filesystems!
|
||||||
//if (absTestRootDir == null)
|
//if (absTestRootDir == null)
|
||||||
|
|
|
@ -23,6 +23,8 @@
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
|
|
||||||
|
import org.apache.hadoop.util.Shell;
|
||||||
|
|
||||||
public class TestDFVariations extends TestCase {
|
public class TestDFVariations extends TestCase {
|
||||||
|
|
||||||
public static class XXDF extends DF {
|
public static class XXDF extends DF {
|
||||||
|
@ -51,7 +53,9 @@ protected String[] getExecString() {
|
||||||
public void testOSParsing() throws Exception {
|
public void testOSParsing() throws Exception {
|
||||||
for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) {
|
for (DF.OSType ost : EnumSet.allOf(DF.OSType.class)) {
|
||||||
XXDF df = new XXDF(ost.getId());
|
XXDF df = new XXDF(ost.getId());
|
||||||
assertEquals(ost.getId() + " mount", "/foo/bar", df.getMount());
|
assertEquals(ost.getId() + " mount",
|
||||||
|
Shell.WINDOWS ? df.getDirPath().substring(0, 2) : "/foo/bar",
|
||||||
|
df.getMount());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -73,10 +73,10 @@ public void testBasicPaths() {
|
||||||
URI uri = fSys.getUri();
|
URI uri = fSys.getUri();
|
||||||
Assert.assertEquals(chrootedTo.toUri(), uri);
|
Assert.assertEquals(chrootedTo.toUri(), uri);
|
||||||
Assert.assertEquals(fSys.makeQualified(
|
Assert.assertEquals(fSys.makeQualified(
|
||||||
new Path("/user/" + System.getProperty("user.name"))),
|
new Path(System.getProperty("user.home"))),
|
||||||
fSys.getWorkingDirectory());
|
fSys.getWorkingDirectory());
|
||||||
Assert.assertEquals(fSys.makeQualified(
|
Assert.assertEquals(fSys.makeQualified(
|
||||||
new Path("/user/" + System.getProperty("user.name"))),
|
new Path(System.getProperty("user.home"))),
|
||||||
fSys.getHomeDirectory());
|
fSys.getHomeDirectory());
|
||||||
/*
|
/*
|
||||||
* ChRootedFs as its uri like file:///chrootRoot.
|
* ChRootedFs as its uri like file:///chrootRoot.
|
||||||
|
|
|
@ -70,10 +70,10 @@ public void testBasicPaths() {
|
||||||
URI uri = fc.getDefaultFileSystem().getUri();
|
URI uri = fc.getDefaultFileSystem().getUri();
|
||||||
Assert.assertEquals(chrootedTo.toUri(), uri);
|
Assert.assertEquals(chrootedTo.toUri(), uri);
|
||||||
Assert.assertEquals(fc.makeQualified(
|
Assert.assertEquals(fc.makeQualified(
|
||||||
new Path("/user/" + System.getProperty("user.name"))),
|
new Path(System.getProperty("user.home"))),
|
||||||
fc.getWorkingDirectory());
|
fc.getWorkingDirectory());
|
||||||
Assert.assertEquals(fc.makeQualified(
|
Assert.assertEquals(fc.makeQualified(
|
||||||
new Path("/user/" + System.getProperty("user.name"))),
|
new Path(System.getProperty("user.home"))),
|
||||||
fc.getHomeDirectory());
|
fc.getHomeDirectory());
|
||||||
/*
|
/*
|
||||||
* ChRootedFs as its uri like file:///chrootRoot.
|
* ChRootedFs as its uri like file:///chrootRoot.
|
||||||
|
|
|
@ -39,44 +39,7 @@ public class TestFcMainOperationsLocalFs extends
|
||||||
@Override
|
@Override
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
/**
|
fc = ViewFsTestSetup.setupForViewFsLocalFs();
|
||||||
* create the test root on local_fs - the mount table will point here
|
|
||||||
*/
|
|
||||||
fclocal = FileContext.getLocalFSFileContext();
|
|
||||||
targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
|
|
||||||
// In case previous test was killed before cleanup
|
|
||||||
fclocal.delete(targetOfTests, true);
|
|
||||||
|
|
||||||
fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// We create mount table so that the test root on the viewFs points to
|
|
||||||
// to the test root on the target.
|
|
||||||
// DOing this helps verify the FileStatus.path.
|
|
||||||
//
|
|
||||||
// The test root by default when running eclipse
|
|
||||||
// is a test dir below the working directory.
|
|
||||||
// (see FileContextTestHelper).
|
|
||||||
// Since viewFs has no built-in wd, its wd is /user/<username>.
|
|
||||||
// If this test launched via ant (build.xml) the test root is absolute path
|
|
||||||
|
|
||||||
String srcTestRoot;
|
|
||||||
if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) {
|
|
||||||
srcTestRoot = FileContextTestHelper.TEST_ROOT_DIR;
|
|
||||||
} else {
|
|
||||||
srcTestRoot = "/user/" + System.getProperty("user.name") + "/" +
|
|
||||||
FileContextTestHelper.TEST_ROOT_DIR;
|
|
||||||
}
|
|
||||||
|
|
||||||
Configuration conf = new Configuration();
|
|
||||||
ConfigUtil.addLink(conf, srcTestRoot,
|
|
||||||
targetOfTests.toUri());
|
|
||||||
|
|
||||||
fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
|
|
||||||
//System.out.println("SRCOfTests = "+ FileContextTestHelper.getTestRootPath(fc, "test"));
|
|
||||||
//System.out.println("TargetOfTests = "+ targetOfTests.toUri());
|
|
||||||
super.setUp();
|
super.setUp();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,6 +47,6 @@ public void setUp() throws Exception {
|
||||||
@After
|
@After
|
||||||
public void tearDown() throws Exception {
|
public void tearDown() throws Exception {
|
||||||
super.tearDown();
|
super.tearDown();
|
||||||
fclocal.delete(targetOfTests, true);
|
ViewFsTestSetup.tearDownForViewFsLocalFs();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,10 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.viewfs;
|
package org.apache.hadoop.fs.viewfs;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileContext;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||||
import org.apache.hadoop.fs.FsConstants;
|
import org.apache.hadoop.fs.FsConstants;
|
||||||
|
@ -32,14 +35,19 @@
|
||||||
*
|
*
|
||||||
* If tests launched via ant (build.xml) the test root is absolute path
|
* If tests launched via ant (build.xml) the test root is absolute path
|
||||||
* If tests launched via eclipse, the test root is
|
* If tests launched via eclipse, the test root is
|
||||||
* is a test dir below the working directory. (see FileSystemTestHelper).
|
* is a test dir below the working directory. (see FileContextTestHelper)
|
||||||
* Since viewFs has no built-in wd, its wd is /user/<username>
|
|
||||||
* (or /User/<username> on mac)
|
|
||||||
*
|
*
|
||||||
* We set a viewFileSystems with mount point for
|
* We set a viewFileSystems with 3 mount points:
|
||||||
* /<firstComponent>" pointing to the target fs's testdir
|
* 1) /<firstComponent>" of testdir pointing to same in target fs
|
||||||
|
* 2) /<firstComponent>" of home pointing to same in target fs
|
||||||
|
* 3) /<firstComponent>" of wd pointing to same in target fs
|
||||||
|
* (note in many cases the link may be the same - viewFileSytem handles this)
|
||||||
|
*
|
||||||
|
* We also set the view file system's wd to point to the wd.
|
||||||
*/
|
*/
|
||||||
public class ViewFileSystemTestSetup {
|
public class ViewFileSystemTestSetup {
|
||||||
|
|
||||||
|
static public String ViewFSTestDir = "/testDir";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -56,24 +64,26 @@ static public FileSystem setupForViewFileSystem(Configuration conf, FileSystem f
|
||||||
fsTarget.delete(targetOfTests, true);
|
fsTarget.delete(targetOfTests, true);
|
||||||
fsTarget.mkdirs(targetOfTests);
|
fsTarget.mkdirs(targetOfTests);
|
||||||
|
|
||||||
// Setup a link from viewfs to targetfs for the first component of
|
|
||||||
// path of testdir.
|
// Set up viewfs link for test dir as described above
|
||||||
String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
|
String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
|
||||||
.getPath();
|
.getPath();
|
||||||
int indexOf2ndSlash = testDir.indexOf('/', 1);
|
linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
|
||||||
String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
|
|
||||||
ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified(
|
|
||||||
new Path(testDirFirstComponent)).toUri());
|
// Set up viewfs link for home dir as described above
|
||||||
|
setUpHomeDir(conf, fsTarget);
|
||||||
|
|
||||||
|
|
||||||
|
// the test path may be relative to working dir - we need to make that work:
|
||||||
|
// Set up viewfs link for wd as described above
|
||||||
|
String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
|
||||||
|
linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
|
||||||
|
|
||||||
// viewFs://home => fsTarget://home
|
|
||||||
String homeDirRoot = fsTarget.getHomeDirectory()
|
|
||||||
.getParent().toUri().getPath();
|
|
||||||
ConfigUtil.addLink(conf, homeDirRoot,
|
|
||||||
fsTarget.makeQualified(new Path(homeDirRoot)).toUri());
|
|
||||||
ConfigUtil.setHomeDirConf(conf, homeDirRoot);
|
|
||||||
Log.info("Home dir base " + homeDirRoot);
|
|
||||||
|
|
||||||
FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
|
FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
|
||||||
|
fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
|
||||||
|
Log.info("Working dir is: " + fsView.getWorkingDirectory());
|
||||||
return fsView;
|
return fsView;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,4 +101,33 @@ public static Configuration createConfig() {
|
||||||
conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
|
conf.set("fs.viewfs.impl", ViewFileSystem.class.getName());
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void setUpHomeDir(Configuration conf, FileSystem fsTarget) {
|
||||||
|
String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
|
||||||
|
int indexOf2ndSlash = homeDir.indexOf('/', 1);
|
||||||
|
if (indexOf2ndSlash >0) {
|
||||||
|
linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
|
||||||
|
} else { // home dir is at root. Just link the home dir itse
|
||||||
|
URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
|
||||||
|
ConfigUtil.addLink(conf, homeDir, linkTarget);
|
||||||
|
Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
|
||||||
|
}
|
||||||
|
// Now set the root of the home dir for viewfs
|
||||||
|
String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
|
||||||
|
ConfigUtil.setHomeDirConf(conf, homeDirRoot);
|
||||||
|
Log.info("Home dir base for viewfs" + homeDirRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up link in config for first component of path to the same
|
||||||
|
* in the target file system.
|
||||||
|
*/
|
||||||
|
static void linkUpFirstComponents(Configuration conf, String path, FileSystem fsTarget, String info) {
|
||||||
|
int indexOf2ndSlash = path.indexOf('/', 1);
|
||||||
|
String firstComponent = path.substring(0, indexOf2ndSlash);
|
||||||
|
URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
|
||||||
|
ConfigUtil.addLink(conf, firstComponent, linkTarget);
|
||||||
|
Log.info("Added link for " + info + " "
|
||||||
|
+ firstComponent + "->" + linkTarget);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,12 +17,15 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs.viewfs;
|
package org.apache.hadoop.fs.viewfs;
|
||||||
|
|
||||||
|
import java.net.URI;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileContext;
|
import org.apache.hadoop.fs.FileContext;
|
||||||
import org.apache.hadoop.fs.FileContextTestHelper;
|
import org.apache.hadoop.fs.FileContextTestHelper;
|
||||||
import org.apache.hadoop.fs.FsConstants;
|
import org.apache.hadoop.fs.FsConstants;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.viewfs.ConfigUtil;
|
import org.apache.hadoop.fs.viewfs.ConfigUtil;
|
||||||
|
import org.mortbay.log.Log;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -31,13 +34,20 @@
|
||||||
*
|
*
|
||||||
* If tests launched via ant (build.xml) the test root is absolute path
|
* If tests launched via ant (build.xml) the test root is absolute path
|
||||||
* If tests launched via eclipse, the test root is
|
* If tests launched via eclipse, the test root is
|
||||||
* is a test dir below the working directory. (see FileContextTestHelper).
|
* is a test dir below the working directory. (see FileContextTestHelper)
|
||||||
* Since viewFs has no built-in wd, its wd is /user/<username>.
|
|
||||||
*
|
*
|
||||||
* We set up fc to be the viewFs with mount point for
|
* We set a viewfs with 3 mount points:
|
||||||
* /<firstComponent>" pointing to the local file system's testdir
|
* 1) /<firstComponent>" of testdir pointing to same in target fs
|
||||||
|
* 2) /<firstComponent>" of home pointing to same in target fs
|
||||||
|
* 3) /<firstComponent>" of wd pointing to same in target fs
|
||||||
|
* (note in many cases the link may be the same - viewfs handles this)
|
||||||
|
*
|
||||||
|
* We also set the view file system's wd to point to the wd.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public class ViewFsTestSetup {
|
public class ViewFsTestSetup {
|
||||||
|
|
||||||
|
static public String ViewFSTestDir = "/testDir";
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -47,30 +57,31 @@ static public FileContext setupForViewFsLocalFs() throws Exception {
|
||||||
/**
|
/**
|
||||||
* create the test root on local_fs - the mount table will point here
|
* create the test root on local_fs - the mount table will point here
|
||||||
*/
|
*/
|
||||||
FileContext fclocal = FileContext.getLocalFSFileContext();
|
FileContext fsTarget = FileContext.getLocalFSFileContext();
|
||||||
Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
|
Path targetOfTests = FileContextTestHelper.getTestRootPath(fsTarget);
|
||||||
// In case previous test was killed before cleanup
|
// In case previous test was killed before cleanup
|
||||||
fclocal.delete(targetOfTests, true);
|
fsTarget.delete(targetOfTests, true);
|
||||||
|
|
||||||
fclocal.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
|
fsTarget.mkdir(targetOfTests, FileContext.DEFAULT_PERM, true);
|
||||||
|
|
||||||
String srcTestFirstDir;
|
|
||||||
if (FileContextTestHelper.TEST_ROOT_DIR.startsWith("/")) {
|
|
||||||
int indexOf2ndSlash = FileContextTestHelper.TEST_ROOT_DIR.indexOf('/', 1);
|
|
||||||
srcTestFirstDir = FileContextTestHelper.TEST_ROOT_DIR.substring(0, indexOf2ndSlash);
|
|
||||||
} else {
|
|
||||||
srcTestFirstDir = "/user";
|
|
||||||
|
|
||||||
}
|
|
||||||
//System.out.println("srcTestFirstDir=" + srcTestFirstDir);
|
|
||||||
|
|
||||||
// Set up the defaultMT in the config with mount point links
|
|
||||||
// The test dir is root is below /user/<userid>
|
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
ConfigUtil.addLink(conf, srcTestFirstDir,
|
|
||||||
targetOfTests.toUri());
|
// Set up viewfs link for test dir as described above
|
||||||
|
String testDir = FileContextTestHelper.getTestRootPath(fsTarget).toUri()
|
||||||
|
.getPath();
|
||||||
|
linkUpFirstComponents(conf, testDir, fsTarget, "test dir");
|
||||||
|
|
||||||
|
|
||||||
|
// Set up viewfs link for home dir as described above
|
||||||
|
setUpHomeDir(conf, fsTarget);
|
||||||
|
|
||||||
|
// the test path may be relative to working dir - we need to make that work:
|
||||||
|
// Set up viewfs link for wd as described above
|
||||||
|
String wdDir = fsTarget.getWorkingDirectory().toUri().getPath();
|
||||||
|
linkUpFirstComponents(conf, wdDir, fsTarget, "working dir");
|
||||||
|
|
||||||
FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
|
FileContext fc = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
|
||||||
|
fc.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd.
|
||||||
|
Log.info("Working dir is: " + fc.getWorkingDirectory());
|
||||||
//System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
|
//System.out.println("SRCOfTests = "+ getTestRootPath(fc, "test"));
|
||||||
//System.out.println("TargetOfTests = "+ targetOfTests.toUri());
|
//System.out.println("TargetOfTests = "+ targetOfTests.toUri());
|
||||||
return fc;
|
return fc;
|
||||||
|
@ -85,5 +96,36 @@ static public void tearDownForViewFsLocalFs() throws Exception {
|
||||||
Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
|
Path targetOfTests = FileContextTestHelper.getTestRootPath(fclocal);
|
||||||
fclocal.delete(targetOfTests, true);
|
fclocal.delete(targetOfTests, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void setUpHomeDir(Configuration conf, FileContext fsTarget) {
|
||||||
|
String homeDir = fsTarget.getHomeDirectory().toUri().getPath();
|
||||||
|
int indexOf2ndSlash = homeDir.indexOf('/', 1);
|
||||||
|
if (indexOf2ndSlash >0) {
|
||||||
|
linkUpFirstComponents(conf, homeDir, fsTarget, "home dir");
|
||||||
|
} else { // home dir is at root. Just link the home dir itse
|
||||||
|
URI linkTarget = fsTarget.makeQualified(new Path(homeDir)).toUri();
|
||||||
|
ConfigUtil.addLink(conf, homeDir, linkTarget);
|
||||||
|
Log.info("Added link for home dir " + homeDir + "->" + linkTarget);
|
||||||
|
}
|
||||||
|
// Now set the root of the home dir for viewfs
|
||||||
|
String homeDirRoot = fsTarget.getHomeDirectory().getParent().toUri().getPath();
|
||||||
|
ConfigUtil.setHomeDirConf(conf, homeDirRoot);
|
||||||
|
Log.info("Home dir base for viewfs" + homeDirRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up link in config for first component of path to the same
|
||||||
|
* in the target file system.
|
||||||
|
*/
|
||||||
|
static void linkUpFirstComponents(Configuration conf, String path,
|
||||||
|
FileContext fsTarget, String info) {
|
||||||
|
int indexOf2ndSlash = path.indexOf('/', 1);
|
||||||
|
String firstComponent = path.substring(0, indexOf2ndSlash);
|
||||||
|
URI linkTarget = fsTarget.makeQualified(new Path(firstComponent)).toUri();
|
||||||
|
ConfigUtil.addLink(conf, firstComponent, linkTarget);
|
||||||
|
Log.info("Added link for " + info + " "
|
||||||
|
+ firstComponent + "->" + linkTarget);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ public void setupTestDir() {
|
||||||
public void testFstat() throws Exception {
|
public void testFstat() throws Exception {
|
||||||
FileOutputStream fos = new FileOutputStream(
|
FileOutputStream fos = new FileOutputStream(
|
||||||
new File(TEST_DIR, "testfstat"));
|
new File(TEST_DIR, "testfstat"));
|
||||||
NativeIO.Stat stat = NativeIO.fstat(fos.getFD());
|
NativeIO.Stat stat = NativeIO.getFstat(fos.getFD());
|
||||||
fos.close();
|
fos.close();
|
||||||
LOG.info("Stat: " + String.valueOf(stat));
|
LOG.info("Stat: " + String.valueOf(stat));
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ public void run() {
|
||||||
long et = Time.now() + 5000;
|
long et = Time.now() + 5000;
|
||||||
while (Time.now() < et) {
|
while (Time.now() < et) {
|
||||||
try {
|
try {
|
||||||
NativeIO.Stat stat = NativeIO.fstat(fos.getFD());
|
NativeIO.Stat stat = NativeIO.getFstat(fos.getFD());
|
||||||
assertEquals(System.getProperty("user.name"), stat.getOwner());
|
assertEquals(System.getProperty("user.name"), stat.getOwner());
|
||||||
assertNotNull(stat.getGroup());
|
assertNotNull(stat.getGroup());
|
||||||
assertTrue(!stat.getGroup().isEmpty());
|
assertTrue(!stat.getGroup().isEmpty());
|
||||||
|
@ -125,7 +125,7 @@ public void testFstatClosedFd() throws Exception {
|
||||||
new File(TEST_DIR, "testfstat2"));
|
new File(TEST_DIR, "testfstat2"));
|
||||||
fos.close();
|
fos.close();
|
||||||
try {
|
try {
|
||||||
NativeIO.Stat stat = NativeIO.fstat(fos.getFD());
|
NativeIO.Stat stat = NativeIO.getFstat(fos.getFD());
|
||||||
} catch (NativeIOException nioe) {
|
} catch (NativeIOException nioe) {
|
||||||
LOG.info("Got expected exception", nioe);
|
LOG.info("Got expected exception", nioe);
|
||||||
assertEquals(Errno.EBADF, nioe.getErrno());
|
assertEquals(Errno.EBADF, nioe.getErrno());
|
||||||
|
@ -283,4 +283,14 @@ private void assertPermissions(File f, int expected) throws IOException {
|
||||||
assertEquals(expected, perms.toShort());
|
assertEquals(expected, perms.toShort());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetUserName() throws IOException {
|
||||||
|
assertFalse(NativeIO.getUserName(0).isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetGroupName() throws IOException {
|
||||||
|
assertFalse(NativeIO.getGroupName(0).isEmpty());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,12 +27,13 @@
|
||||||
import java.lang.annotation.Annotation;
|
import java.lang.annotation.Annotation;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.security.Security;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.regex.Pattern;
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
import javax.security.sasl.Sasl;
|
import javax.security.auth.callback.*;
|
||||||
|
import javax.security.sasl.*;
|
||||||
import junit.framework.Assert;
|
import junit.framework.Assert;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
@ -43,14 +44,8 @@
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.*;
|
||||||
import org.apache.hadoop.security.SaslInputStream;
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
import org.apache.hadoop.security.SaslRpcClient;
|
|
||||||
import org.apache.hadoop.security.SaslRpcServer;
|
|
||||||
import org.apache.hadoop.security.SecurityInfo;
|
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
|
||||||
import org.apache.hadoop.security.TestUserGroupInformation;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
import org.apache.hadoop.security.token.SecretManager;
|
import org.apache.hadoop.security.token.SecretManager;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -58,8 +53,10 @@
|
||||||
import org.apache.hadoop.security.token.TokenInfo;
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
import org.apache.hadoop.security.token.TokenSelector;
|
import org.apache.hadoop.security.token.TokenSelector;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
|
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/** Unit tests for using Sasl over RPC. */
|
/** Unit tests for using Sasl over RPC. */
|
||||||
|
@ -74,14 +71,22 @@ public class TestSaslRPC {
|
||||||
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
|
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
|
||||||
static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
|
static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
|
||||||
static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
|
static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
|
||||||
|
|
||||||
private static Configuration conf;
|
private static Configuration conf;
|
||||||
|
static Boolean forceSecretManager = null;
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setupKerb() {
|
||||||
|
System.setProperty("java.security.krb5.kdc", "");
|
||||||
|
System.setProperty("java.security.krb5.realm", "NONE");
|
||||||
|
Security.addProvider(new SaslPlainServer.SecurityProvider());
|
||||||
|
}
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setup() {
|
public void setup() {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
|
SecurityUtil.setAuthenticationMethod(KERBEROS, conf);
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
forceSecretManager = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
@ -265,16 +270,6 @@ public void testDigestRpcWithoutAnnotation() throws Exception {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void testSecureToInsecureRpc() throws Exception {
|
|
||||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.SIMPLE, conf);
|
|
||||||
Server server = new RPC.Builder(conf).setProtocol(TestSaslProtocol.class)
|
|
||||||
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
|
|
||||||
.setNumHandlers(5).setVerbose(true).build();
|
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
|
||||||
doDigestRpc(server, sm);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testErrorMessage() throws Exception {
|
public void testErrorMessage() throws Exception {
|
||||||
BadTokenSecretManager sm = new BadTokenSecretManager();
|
BadTokenSecretManager sm = new BadTokenSecretManager();
|
||||||
|
@ -455,6 +450,120 @@ static void testKerberosRpc(String principal, String keytab) throws Exception {
|
||||||
System.out.println("Test is successful.");
|
System.out.println("Test is successful.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSaslPlainServer() throws IOException {
|
||||||
|
runNegotiation(
|
||||||
|
new TestPlainCallbacks.Client("user", "pass"),
|
||||||
|
new TestPlainCallbacks.Server("user", "pass"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSaslPlainServerBadPassword() throws IOException {
|
||||||
|
SaslException e = null;
|
||||||
|
try {
|
||||||
|
runNegotiation(
|
||||||
|
new TestPlainCallbacks.Client("user", "pass1"),
|
||||||
|
new TestPlainCallbacks.Server("user", "pass2"));
|
||||||
|
} catch (SaslException se) {
|
||||||
|
e = se;
|
||||||
|
}
|
||||||
|
assertNotNull(e);
|
||||||
|
assertEquals("PLAIN auth failed: wrong password", e.getMessage());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void runNegotiation(CallbackHandler clientCbh,
|
||||||
|
CallbackHandler serverCbh)
|
||||||
|
throws SaslException {
|
||||||
|
String mechanism = AuthMethod.PLAIN.getMechanismName();
|
||||||
|
|
||||||
|
SaslClient saslClient = Sasl.createSaslClient(
|
||||||
|
new String[]{ mechanism }, null, null, null, null, clientCbh);
|
||||||
|
assertNotNull(saslClient);
|
||||||
|
|
||||||
|
SaslServer saslServer = Sasl.createSaslServer(
|
||||||
|
mechanism, null, "localhost", null, serverCbh);
|
||||||
|
assertNotNull("failed to find PLAIN server", saslServer);
|
||||||
|
|
||||||
|
byte[] response = saslClient.evaluateChallenge(new byte[0]);
|
||||||
|
assertNotNull(response);
|
||||||
|
assertTrue(saslClient.isComplete());
|
||||||
|
|
||||||
|
response = saslServer.evaluateResponse(response);
|
||||||
|
assertNull(response);
|
||||||
|
assertTrue(saslServer.isComplete());
|
||||||
|
assertNotNull(saslServer.getAuthorizationID());
|
||||||
|
}
|
||||||
|
|
||||||
|
static class TestPlainCallbacks {
|
||||||
|
public static class Client implements CallbackHandler {
|
||||||
|
String user = null;
|
||||||
|
String password = null;
|
||||||
|
|
||||||
|
Client(String user, String password) {
|
||||||
|
this.user = user;
|
||||||
|
this.password = password;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void handle(Callback[] callbacks)
|
||||||
|
throws UnsupportedCallbackException {
|
||||||
|
for (Callback callback : callbacks) {
|
||||||
|
if (callback instanceof NameCallback) {
|
||||||
|
((NameCallback) callback).setName(user);
|
||||||
|
} else if (callback instanceof PasswordCallback) {
|
||||||
|
((PasswordCallback) callback).setPassword(password.toCharArray());
|
||||||
|
} else {
|
||||||
|
throw new UnsupportedCallbackException(callback,
|
||||||
|
"Unrecognized SASL PLAIN Callback");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class Server implements CallbackHandler {
|
||||||
|
String user = null;
|
||||||
|
String password = null;
|
||||||
|
|
||||||
|
Server(String user, String password) {
|
||||||
|
this.user = user;
|
||||||
|
this.password = password;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void handle(Callback[] callbacks)
|
||||||
|
throws UnsupportedCallbackException, SaslException {
|
||||||
|
NameCallback nc = null;
|
||||||
|
PasswordCallback pc = null;
|
||||||
|
AuthorizeCallback ac = null;
|
||||||
|
|
||||||
|
for (Callback callback : callbacks) {
|
||||||
|
if (callback instanceof NameCallback) {
|
||||||
|
nc = (NameCallback)callback;
|
||||||
|
assertEquals(user, nc.getName());
|
||||||
|
} else if (callback instanceof PasswordCallback) {
|
||||||
|
pc = (PasswordCallback)callback;
|
||||||
|
if (!password.equals(new String(pc.getPassword()))) {
|
||||||
|
throw new IllegalArgumentException("wrong password");
|
||||||
|
}
|
||||||
|
} else if (callback instanceof AuthorizeCallback) {
|
||||||
|
ac = (AuthorizeCallback)callback;
|
||||||
|
assertEquals(user, ac.getAuthorizationID());
|
||||||
|
assertEquals(user, ac.getAuthenticationID());
|
||||||
|
ac.setAuthorized(true);
|
||||||
|
ac.setAuthorizedID(ac.getAuthenticationID());
|
||||||
|
} else {
|
||||||
|
throw new UnsupportedCallbackException(callback,
|
||||||
|
"Unsupported SASL PLAIN Callback");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assertNotNull(nc);
|
||||||
|
assertNotNull(pc);
|
||||||
|
assertNotNull(ac);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private static Pattern BadToken =
|
private static Pattern BadToken =
|
||||||
Pattern.compile(".*DIGEST-MD5: digest response format violation.*");
|
Pattern.compile(".*DIGEST-MD5: digest response format violation.*");
|
||||||
private static Pattern KrbFailed =
|
private static Pattern KrbFailed =
|
||||||
|
@ -462,6 +571,8 @@ static void testKerberosRpc(String principal, String keytab) throws Exception {
|
||||||
"Failed to specify server's Kerberos principal name.*");
|
"Failed to specify server's Kerberos principal name.*");
|
||||||
private static Pattern Denied =
|
private static Pattern Denied =
|
||||||
Pattern.compile(".*Authorization .* is enabled .*");
|
Pattern.compile(".*Authorization .* is enabled .*");
|
||||||
|
private static Pattern NoDigest =
|
||||||
|
Pattern.compile(".*Server is not configured to do DIGEST auth.*");
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* simple server
|
* simple server
|
||||||
|
@ -478,6 +589,9 @@ public void testSimpleServerWithTokens() throws Exception {
|
||||||
// Tokens are ignored because client is reverted to simple
|
// Tokens are ignored because client is reverted to simple
|
||||||
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, true));
|
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, true));
|
||||||
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
|
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
|
||||||
|
forceSecretManager = true;
|
||||||
|
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, true));
|
||||||
|
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -485,6 +599,9 @@ public void testSimpleServerWithInvalidTokens() throws Exception {
|
||||||
// Tokens are ignored because client is reverted to simple
|
// Tokens are ignored because client is reverted to simple
|
||||||
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, false));
|
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, false));
|
||||||
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
|
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
|
||||||
|
forceSecretManager = true;
|
||||||
|
assertAuthEquals(SIMPLE, getAuthMethod(SIMPLE, SIMPLE, false));
|
||||||
|
assertAuthEquals(SIMPLE, getAuthMethod(KERBEROS, SIMPLE, false));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -501,12 +618,19 @@ public void testKerberosServerWithTokens() throws Exception {
|
||||||
// can use tokens regardless of auth
|
// can use tokens regardless of auth
|
||||||
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, KERBEROS, true));
|
assertAuthEquals(TOKEN, getAuthMethod(SIMPLE, KERBEROS, true));
|
||||||
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, true));
|
assertAuthEquals(TOKEN, getAuthMethod(KERBEROS, KERBEROS, true));
|
||||||
|
// can't fallback to simple when using kerberos w/o tokens
|
||||||
|
forceSecretManager = false;
|
||||||
|
assertAuthEquals(NoDigest, getAuthMethod(SIMPLE, KERBEROS, true));
|
||||||
|
assertAuthEquals(NoDigest, getAuthMethod(KERBEROS, KERBEROS, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testKerberosServerWithInvalidTokens() throws Exception {
|
public void testKerberosServerWithInvalidTokens() throws Exception {
|
||||||
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, KERBEROS, false));
|
assertAuthEquals(BadToken, getAuthMethod(SIMPLE, KERBEROS, false));
|
||||||
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, false));
|
assertAuthEquals(BadToken, getAuthMethod(KERBEROS, KERBEROS, false));
|
||||||
|
forceSecretManager = false;
|
||||||
|
assertAuthEquals(NoDigest, getAuthMethod(SIMPLE, KERBEROS, true));
|
||||||
|
assertAuthEquals(NoDigest, getAuthMethod(KERBEROS, KERBEROS, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -539,21 +663,45 @@ private String internalGetAuthMethod(
|
||||||
final boolean useToken,
|
final boolean useToken,
|
||||||
final boolean useValidToken) throws Exception {
|
final boolean useValidToken) throws Exception {
|
||||||
|
|
||||||
Configuration serverConf = new Configuration(conf);
|
String currentUser = UserGroupInformation.getCurrentUser().getUserName();
|
||||||
|
|
||||||
|
final Configuration serverConf = new Configuration(conf);
|
||||||
SecurityUtil.setAuthenticationMethod(serverAuth, serverConf);
|
SecurityUtil.setAuthenticationMethod(serverAuth, serverConf);
|
||||||
UserGroupInformation.setConfiguration(serverConf);
|
UserGroupInformation.setConfiguration(serverConf);
|
||||||
|
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
final UserGroupInformation serverUgi =
|
||||||
Server server = new RPC.Builder(serverConf).setProtocol(TestSaslProtocol.class)
|
UserGroupInformation.createRemoteUser(currentUser + "-SERVER");
|
||||||
|
serverUgi.setAuthenticationMethod(serverAuth);
|
||||||
|
|
||||||
|
final TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
|
boolean useSecretManager = (serverAuth != SIMPLE);
|
||||||
|
if (forceSecretManager != null) {
|
||||||
|
useSecretManager &= forceSecretManager.booleanValue();
|
||||||
|
}
|
||||||
|
final SecretManager<?> serverSm = useSecretManager ? sm : null;
|
||||||
|
|
||||||
|
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
|
||||||
|
@Override
|
||||||
|
public Server run() throws IOException {
|
||||||
|
Server server = new RPC.Builder(serverConf)
|
||||||
|
.setProtocol(TestSaslProtocol.class)
|
||||||
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
|
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||||
.setNumHandlers(5).setVerbose(true)
|
.setNumHandlers(5).setVerbose(true)
|
||||||
.setSecretManager((serverAuth != SIMPLE) ? sm : null)
|
.setSecretManager(serverSm)
|
||||||
.build();
|
.build();
|
||||||
server.start();
|
server.start();
|
||||||
|
return server;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
final Configuration clientConf = new Configuration(conf);
|
||||||
|
SecurityUtil.setAuthenticationMethod(clientAuth, clientConf);
|
||||||
|
UserGroupInformation.setConfiguration(clientConf);
|
||||||
|
|
||||||
final UserGroupInformation clientUgi =
|
final UserGroupInformation clientUgi =
|
||||||
UserGroupInformation.createRemoteUser(
|
UserGroupInformation.createRemoteUser(currentUser + "-CLIENT");
|
||||||
UserGroupInformation.getCurrentUser().getUserName()+"-CLIENT");
|
clientUgi.setAuthenticationMethod(clientAuth);
|
||||||
|
|
||||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
if (useToken) {
|
if (useToken) {
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(
|
||||||
|
@ -568,10 +716,6 @@ private String internalGetAuthMethod(
|
||||||
clientUgi.addToken(token);
|
clientUgi.addToken(token);
|
||||||
}
|
}
|
||||||
|
|
||||||
final Configuration clientConf = new Configuration(conf);
|
|
||||||
SecurityUtil.setAuthenticationMethod(clientAuth, clientConf);
|
|
||||||
UserGroupInformation.setConfiguration(clientConf);
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
|
return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -581,6 +725,12 @@ public String run() throws IOException {
|
||||||
proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
|
proxy = (TestSaslProtocol) RPC.getProxy(TestSaslProtocol.class,
|
||||||
TestSaslProtocol.versionID, addr, clientConf);
|
TestSaslProtocol.versionID, addr, clientConf);
|
||||||
|
|
||||||
|
proxy.ping();
|
||||||
|
// verify sasl completed
|
||||||
|
if (serverAuth != SIMPLE) {
|
||||||
|
assertEquals(SaslRpcServer.SASL_PROPS.get(Sasl.QOP), "auth");
|
||||||
|
}
|
||||||
|
|
||||||
// make sure the other side thinks we are who we said we are!!!
|
// make sure the other side thinks we are who we said we are!!!
|
||||||
assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
|
assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
|
||||||
return proxy.getAuthMethod().toString();
|
return proxy.getAuthMethod().toString();
|
||||||
|
|
|
@ -157,6 +157,8 @@ Trunk (Unreleased)
|
||||||
HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
|
HDFS-4152. Add a new class BlocksMapUpdateInfo for the parameter in
|
||||||
INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
|
INode.collectSubtreeBlocksAndClear(..). (Jing Zhao via szetszwo)
|
||||||
|
|
||||||
|
HDFS-4153. Add START_MSG/SHUTDOWN_MSG for JournalNode. (liang xie via atm)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -223,9 +225,6 @@ Trunk (Unreleased)
|
||||||
HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
|
HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
|
||||||
(acmurthy via eli)
|
(acmurthy via eli)
|
||||||
|
|
||||||
HDFS-3625. Fix TestBackupNode by properly initializing edit log during
|
|
||||||
startup. (Junping Du via todd)
|
|
||||||
|
|
||||||
HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
|
HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
|
||||||
|
|
||||||
HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
|
HDFS-3827. TestHASafeMode#assertSafemode method should be made static.
|
||||||
|
@ -249,6 +248,9 @@ Trunk (Unreleased)
|
||||||
HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and
|
HDFS-4106. BPServiceActor#lastHeartbeat, lastBlockReport and
|
||||||
lastDeletedReport should be volatile. (Jing Zhao via suresh)
|
lastDeletedReport should be volatile. (Jing Zhao via suresh)
|
||||||
|
|
||||||
|
HDFS-4165. Faulty sanity check in FsDirectory.unprotectedSetQuota.
|
||||||
|
(Binglin Chang via suresh)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-3077 SUBTASKS
|
BREAKDOWN OF HDFS-3077 SUBTASKS
|
||||||
|
|
||||||
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
||||||
|
@ -459,6 +461,9 @@ Release 2.0.3-alpha - Unreleased
|
||||||
HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in
|
HDFS-4046. Rename ChecksumTypeProto enum NULL since it is illegal in
|
||||||
C/C++. (Binglin Chang via suresh)
|
C/C++. (Binglin Chang via suresh)
|
||||||
|
|
||||||
|
HDFS-4048. Use ERROR instead of INFO for volume failure logs.
|
||||||
|
(Stephen Chu via eli)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -561,6 +566,17 @@ Release 2.0.3-alpha - Unreleased
|
||||||
HDFS-3979. For hsync, datanode should wait for the local sync to complete
|
HDFS-3979. For hsync, datanode should wait for the local sync to complete
|
||||||
before sending ack. (Lars Hofhansl via szetszwo)
|
before sending ack. (Lars Hofhansl via szetszwo)
|
||||||
|
|
||||||
|
HDFS-3625. Fix TestBackupNode by properly initializing edit log during
|
||||||
|
startup. (Junping Du via todd)
|
||||||
|
|
||||||
|
HDFS-4138. BackupNode startup fails due to uninitialized edit log.
|
||||||
|
(Kihwal Lee via shv)
|
||||||
|
|
||||||
|
HDFS-3810. Implement format() for BKJM (Ivan Kelly via umamahesh)
|
||||||
|
|
||||||
|
HDFS-4162. Some malformed and unquoted HTML strings are returned from
|
||||||
|
datanode web ui. (Darek Dagit via suresh)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -1942,6 +1958,9 @@ Release 0.23.5 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
HDFS-4080. Add a separate logger for block state change logs to enable turning
|
||||||
|
off those logs. (Kihwal Lee via suresh)
|
||||||
|
|
||||||
NEW FEATURES
|
NEW FEATURES
|
||||||
|
|
||||||
IMPROVEMENTS
|
IMPROVEMENTS
|
||||||
|
@ -1950,6 +1969,8 @@ Release 0.23.5 - UNRELEASED
|
||||||
|
|
||||||
HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn)
|
HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn)
|
||||||
|
|
||||||
|
HDFS-3990. NN's health report has severe performance problems (daryn)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor
|
HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor
|
||||||
|
|
|
@ -39,6 +39,7 @@
|
||||||
import org.apache.zookeeper.CreateMode;
|
import org.apache.zookeeper.CreateMode;
|
||||||
import org.apache.zookeeper.ZooDefs.Ids;
|
import org.apache.zookeeper.ZooDefs.Ids;
|
||||||
import org.apache.zookeeper.AsyncCallback.StringCallback;
|
import org.apache.zookeeper.AsyncCallback.StringCallback;
|
||||||
|
import org.apache.zookeeper.ZKUtil;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
|
@ -46,6 +47,7 @@
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
@ -142,13 +144,16 @@ public class BookKeeperJournalManager implements JournalManager {
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
private final BookKeeper bkc;
|
private final BookKeeper bkc;
|
||||||
private final CurrentInprogress ci;
|
private final CurrentInprogress ci;
|
||||||
|
private final String basePath;
|
||||||
private final String ledgerPath;
|
private final String ledgerPath;
|
||||||
|
private final String versionPath;
|
||||||
private final MaxTxId maxTxId;
|
private final MaxTxId maxTxId;
|
||||||
private final int ensembleSize;
|
private final int ensembleSize;
|
||||||
private final int quorumSize;
|
private final int quorumSize;
|
||||||
private final String digestpw;
|
private final String digestpw;
|
||||||
private final CountDownLatch zkConnectLatch;
|
private final CountDownLatch zkConnectLatch;
|
||||||
private final NamespaceInfo nsInfo;
|
private final NamespaceInfo nsInfo;
|
||||||
|
private boolean initialized = false;
|
||||||
private LedgerHandle currentLedger = null;
|
private LedgerHandle currentLedger = null;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -160,16 +165,16 @@ public BookKeeperJournalManager(Configuration conf, URI uri,
|
||||||
this.nsInfo = nsInfo;
|
this.nsInfo = nsInfo;
|
||||||
|
|
||||||
String zkConnect = uri.getAuthority().replace(";", ",");
|
String zkConnect = uri.getAuthority().replace(";", ",");
|
||||||
String zkPath = uri.getPath();
|
basePath = uri.getPath();
|
||||||
ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
|
ensembleSize = conf.getInt(BKJM_BOOKKEEPER_ENSEMBLE_SIZE,
|
||||||
BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
|
BKJM_BOOKKEEPER_ENSEMBLE_SIZE_DEFAULT);
|
||||||
quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
|
quorumSize = conf.getInt(BKJM_BOOKKEEPER_QUORUM_SIZE,
|
||||||
BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
|
BKJM_BOOKKEEPER_QUORUM_SIZE_DEFAULT);
|
||||||
|
|
||||||
ledgerPath = zkPath + "/ledgers";
|
ledgerPath = basePath + "/ledgers";
|
||||||
String maxTxIdPath = zkPath + "/maxtxid";
|
String maxTxIdPath = basePath + "/maxtxid";
|
||||||
String currentInprogressNodePath = zkPath + "/CurrentInprogress";
|
String currentInprogressNodePath = basePath + "/CurrentInprogress";
|
||||||
String versionPath = zkPath + "/version";
|
versionPath = basePath + "/version";
|
||||||
digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
|
digestpw = conf.get(BKJM_BOOKKEEPER_DIGEST_PW,
|
||||||
BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
|
BKJM_BOOKKEEPER_DIGEST_PW_DEFAULT);
|
||||||
|
|
||||||
|
@ -180,47 +185,7 @@ public BookKeeperJournalManager(Configuration conf, URI uri,
|
||||||
if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
|
if (!zkConnectLatch.await(6000, TimeUnit.MILLISECONDS)) {
|
||||||
throw new IOException("Error connecting to zookeeper");
|
throw new IOException("Error connecting to zookeeper");
|
||||||
}
|
}
|
||||||
if (zkc.exists(zkPath, false) == null) {
|
|
||||||
zkc.create(zkPath, new byte[] {'0'},
|
|
||||||
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
|
||||||
}
|
|
||||||
|
|
||||||
Stat versionStat = zkc.exists(versionPath, false);
|
|
||||||
if (versionStat != null) {
|
|
||||||
byte[] d = zkc.getData(versionPath, false, versionStat);
|
|
||||||
VersionProto.Builder builder = VersionProto.newBuilder();
|
|
||||||
TextFormat.merge(new String(d, UTF_8), builder);
|
|
||||||
if (!builder.isInitialized()) {
|
|
||||||
throw new IOException("Invalid/Incomplete data in znode");
|
|
||||||
}
|
|
||||||
VersionProto vp = builder.build();
|
|
||||||
|
|
||||||
// There's only one version at the moment
|
|
||||||
assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
|
|
||||||
|
|
||||||
NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
|
|
||||||
|
|
||||||
if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
|
|
||||||
!nsInfo.clusterID.equals(readns.getClusterID()) ||
|
|
||||||
!nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
|
|
||||||
String err = String.format("Environment mismatch. Running process %s"
|
|
||||||
+", stored in ZK %s", nsInfo, readns);
|
|
||||||
LOG.error(err);
|
|
||||||
throw new IOException(err);
|
|
||||||
}
|
|
||||||
} else if (nsInfo.getNamespaceID() > 0) {
|
|
||||||
VersionProto.Builder builder = VersionProto.newBuilder();
|
|
||||||
builder.setNamespaceInfo(PBHelper.convert(nsInfo))
|
|
||||||
.setLayoutVersion(BKJM_LAYOUT_VERSION);
|
|
||||||
byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
|
|
||||||
zkc.create(versionPath, data,
|
|
||||||
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (zkc.exists(ledgerPath, false) == null) {
|
|
||||||
zkc.create(ledgerPath, new byte[] {'0'},
|
|
||||||
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
|
||||||
}
|
|
||||||
prepareBookKeeperEnv();
|
prepareBookKeeperEnv();
|
||||||
bkc = new BookKeeper(new ClientConfiguration(), zkc);
|
bkc = new BookKeeper(new ClientConfiguration(), zkc);
|
||||||
} catch (KeeperException e) {
|
} catch (KeeperException e) {
|
||||||
|
@ -244,6 +209,7 @@ private void prepareBookKeeperEnv() throws IOException {
|
||||||
BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
|
BKJM_ZK_LEDGERS_AVAILABLE_PATH_DEFAULT);
|
||||||
final CountDownLatch zkPathLatch = new CountDownLatch(1);
|
final CountDownLatch zkPathLatch = new CountDownLatch(1);
|
||||||
|
|
||||||
|
final AtomicBoolean success = new AtomicBoolean(false);
|
||||||
StringCallback callback = new StringCallback() {
|
StringCallback callback = new StringCallback() {
|
||||||
@Override
|
@Override
|
||||||
public void processResult(int rc, String path, Object ctx, String name) {
|
public void processResult(int rc, String path, Object ctx, String name) {
|
||||||
|
@ -251,22 +217,23 @@ public void processResult(int rc, String path, Object ctx, String name) {
|
||||||
|| KeeperException.Code.NODEEXISTS.intValue() == rc) {
|
|| KeeperException.Code.NODEEXISTS.intValue() == rc) {
|
||||||
LOG.info("Successfully created bookie available path : "
|
LOG.info("Successfully created bookie available path : "
|
||||||
+ zkAvailablePath);
|
+ zkAvailablePath);
|
||||||
zkPathLatch.countDown();
|
success.set(true);
|
||||||
} else {
|
} else {
|
||||||
KeeperException.Code code = KeeperException.Code.get(rc);
|
KeeperException.Code code = KeeperException.Code.get(rc);
|
||||||
LOG
|
LOG.error("Error : "
|
||||||
.error("Error : "
|
|
||||||
+ KeeperException.create(code, path).getMessage()
|
+ KeeperException.create(code, path).getMessage()
|
||||||
+ ", failed to create bookie available path : "
|
+ ", failed to create bookie available path : "
|
||||||
+ zkAvailablePath);
|
+ zkAvailablePath);
|
||||||
}
|
}
|
||||||
|
zkPathLatch.countDown();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
|
ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
|
||||||
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
|
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)) {
|
if (!zkPathLatch.await(zkc.getSessionTimeout(), TimeUnit.MILLISECONDS)
|
||||||
|
|| !success.get()) {
|
||||||
throw new IOException("Couldn't create bookie available path :"
|
throw new IOException("Couldn't create bookie available path :"
|
||||||
+ zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
|
+ zkAvailablePath + ", timed out " + zkc.getSessionTimeout()
|
||||||
+ " millis");
|
+ " millis");
|
||||||
|
@ -281,19 +248,101 @@ public void processResult(int rc, String path, Object ctx, String name) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void format(NamespaceInfo ns) throws IOException {
|
public void format(NamespaceInfo ns) throws IOException {
|
||||||
// Currently, BKJM automatically formats itself when first accessed.
|
try {
|
||||||
// TODO: change over to explicit formatting so that the admin can
|
// delete old info
|
||||||
// clear out the BK storage when reformatting a cluster.
|
Stat baseStat = null;
|
||||||
LOG.info("Not formatting " + this + " - BKJM does not currently " +
|
Stat ledgerStat = null;
|
||||||
"support reformatting. If it has not been used before, it will" +
|
if ((baseStat = zkc.exists(basePath, false)) != null) {
|
||||||
"be formatted automatically upon first use.");
|
if ((ledgerStat = zkc.exists(ledgerPath, false)) != null) {
|
||||||
|
for (EditLogLedgerMetadata l : getLedgerList(true)) {
|
||||||
|
try {
|
||||||
|
bkc.deleteLedger(l.getLedgerId());
|
||||||
|
} catch (BKException.BKNoSuchLedgerExistsException bke) {
|
||||||
|
LOG.warn("Ledger " + l.getLedgerId() + " does not exist;"
|
||||||
|
+ " Cannot delete.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ZKUtil.deleteRecursive(zkc, basePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
// should be clean now.
|
||||||
|
zkc.create(basePath, new byte[] {'0'},
|
||||||
|
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
||||||
|
|
||||||
|
VersionProto.Builder builder = VersionProto.newBuilder();
|
||||||
|
builder.setNamespaceInfo(PBHelper.convert(ns))
|
||||||
|
.setLayoutVersion(BKJM_LAYOUT_VERSION);
|
||||||
|
|
||||||
|
byte[] data = TextFormat.printToString(builder.build()).getBytes(UTF_8);
|
||||||
|
zkc.create(versionPath, data,
|
||||||
|
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
||||||
|
|
||||||
|
zkc.create(ledgerPath, new byte[] {'0'},
|
||||||
|
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
|
||||||
|
} catch (KeeperException ke) {
|
||||||
|
LOG.error("Error accessing zookeeper to format", ke);
|
||||||
|
throw new IOException("Error accessing zookeeper to format", ke);
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
throw new IOException("Interrupted during format", ie);
|
||||||
|
} catch (BKException bke) {
|
||||||
|
throw new IOException("Error cleaning up ledgers during format", bke);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean hasSomeData() throws IOException {
|
public boolean hasSomeData() throws IOException {
|
||||||
// Don't confirm format on BKJM, since format() is currently a
|
try {
|
||||||
// no-op anyway
|
return zkc.exists(basePath, false) != null;
|
||||||
return false;
|
} catch (KeeperException ke) {
|
||||||
|
throw new IOException("Couldn't contact zookeeper", ke);
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
throw new IOException("Interrupted while checking for data", ie);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized private void checkEnv() throws IOException {
|
||||||
|
if (!initialized) {
|
||||||
|
try {
|
||||||
|
Stat versionStat = zkc.exists(versionPath, false);
|
||||||
|
if (versionStat == null) {
|
||||||
|
throw new IOException("Environment not initialized. "
|
||||||
|
+"Have you forgotten to format?");
|
||||||
|
}
|
||||||
|
byte[] d = zkc.getData(versionPath, false, versionStat);
|
||||||
|
|
||||||
|
VersionProto.Builder builder = VersionProto.newBuilder();
|
||||||
|
TextFormat.merge(new String(d, UTF_8), builder);
|
||||||
|
if (!builder.isInitialized()) {
|
||||||
|
throw new IOException("Invalid/Incomplete data in znode");
|
||||||
|
}
|
||||||
|
VersionProto vp = builder.build();
|
||||||
|
|
||||||
|
// There's only one version at the moment
|
||||||
|
assert vp.getLayoutVersion() == BKJM_LAYOUT_VERSION;
|
||||||
|
|
||||||
|
NamespaceInfo readns = PBHelper.convert(vp.getNamespaceInfo());
|
||||||
|
|
||||||
|
if (nsInfo.getNamespaceID() != readns.getNamespaceID() ||
|
||||||
|
!nsInfo.clusterID.equals(readns.getClusterID()) ||
|
||||||
|
!nsInfo.getBlockPoolID().equals(readns.getBlockPoolID())) {
|
||||||
|
String err = String.format("Environment mismatch. Running process %s"
|
||||||
|
+", stored in ZK %s", nsInfo, readns);
|
||||||
|
LOG.error(err);
|
||||||
|
throw new IOException(err);
|
||||||
|
}
|
||||||
|
|
||||||
|
ci.init();
|
||||||
|
initialized = true;
|
||||||
|
} catch (KeeperException ke) {
|
||||||
|
throw new IOException("Cannot access ZooKeeper", ke);
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
throw new IOException("Interrupted while checking environment", ie);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -307,6 +356,8 @@ public boolean hasSomeData() throws IOException {
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public EditLogOutputStream startLogSegment(long txId) throws IOException {
|
public EditLogOutputStream startLogSegment(long txId) throws IOException {
|
||||||
|
checkEnv();
|
||||||
|
|
||||||
if (txId <= maxTxId.get()) {
|
if (txId <= maxTxId.get()) {
|
||||||
throw new IOException("We've already seen " + txId
|
throw new IOException("We've already seen " + txId
|
||||||
+ ". A new stream cannot be created with it");
|
+ ". A new stream cannot be created with it");
|
||||||
|
@ -384,6 +435,8 @@ private void cleanupLedger(LedgerHandle lh) {
|
||||||
@Override
|
@Override
|
||||||
public void finalizeLogSegment(long firstTxId, long lastTxId)
|
public void finalizeLogSegment(long firstTxId, long lastTxId)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
checkEnv();
|
||||||
|
|
||||||
String inprogressPath = inprogressZNode(firstTxId);
|
String inprogressPath = inprogressZNode(firstTxId);
|
||||||
try {
|
try {
|
||||||
Stat inprogressStat = zkc.exists(inprogressPath, false);
|
Stat inprogressStat = zkc.exists(inprogressPath, false);
|
||||||
|
@ -537,6 +590,8 @@ long getNumberOfTransactions(long fromTxId, boolean inProgressOk)
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void recoverUnfinalizedSegments() throws IOException {
|
public void recoverUnfinalizedSegments() throws IOException {
|
||||||
|
checkEnv();
|
||||||
|
|
||||||
synchronized (this) {
|
synchronized (this) {
|
||||||
try {
|
try {
|
||||||
List<String> children = zkc.getChildren(ledgerPath, false);
|
List<String> children = zkc.getChildren(ledgerPath, false);
|
||||||
|
@ -589,6 +644,8 @@ public void recoverUnfinalizedSegments() throws IOException {
|
||||||
@Override
|
@Override
|
||||||
public void purgeLogsOlderThan(long minTxIdToKeep)
|
public void purgeLogsOlderThan(long minTxIdToKeep)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
checkEnv();
|
||||||
|
|
||||||
for (EditLogLedgerMetadata l : getLedgerList(false)) {
|
for (EditLogLedgerMetadata l : getLedgerList(false)) {
|
||||||
if (l.getLastTxId() < minTxIdToKeep) {
|
if (l.getLastTxId() < minTxIdToKeep) {
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -56,6 +56,9 @@ class CurrentInprogress {
|
||||||
CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
|
CurrentInprogress(ZooKeeper zkc, String lockpath) throws IOException {
|
||||||
this.currentInprogressNode = lockpath;
|
this.currentInprogressNode = lockpath;
|
||||||
this.zkc = zkc;
|
this.zkc = zkc;
|
||||||
|
}
|
||||||
|
|
||||||
|
void init() throws IOException {
|
||||||
try {
|
try {
|
||||||
Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
|
Stat isCurrentInprogressNodeExists = zkc.exists(currentInprogressNode,
|
||||||
false);
|
false);
|
||||||
|
@ -96,15 +99,14 @@ void update(String path) throws IOException {
|
||||||
this.versionNumberForPermission);
|
this.versionNumberForPermission);
|
||||||
} catch (KeeperException e) {
|
} catch (KeeperException e) {
|
||||||
throw new IOException("Exception when setting the data "
|
throw new IOException("Exception when setting the data "
|
||||||
+ "[layout version number,hostname,inprogressNode path]= [" + content
|
+ "[" + content + "] to CurrentInprogress. ", e);
|
||||||
+ "] to CurrentInprogress. ", e);
|
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw new IOException("Interrupted while setting the data "
|
throw new IOException("Interrupted while setting the data "
|
||||||
+ "[layout version number,hostname,inprogressNode path]= [" + content
|
+ "[" + content + "] to CurrentInprogress", e);
|
||||||
+ "] to CurrentInprogress", e);
|
}
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
LOG.debug("Updated data[" + content + "] to CurrentInprogress");
|
||||||
}
|
}
|
||||||
LOG.info("Updated data[layout version number,hostname,inprogressNode path]"
|
|
||||||
+ "= [" + content + "] to CurrentInprogress");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -136,7 +138,7 @@ String read() throws IOException {
|
||||||
}
|
}
|
||||||
return builder.build().getPath();
|
return builder.build().getPath();
|
||||||
} else {
|
} else {
|
||||||
LOG.info("No data available in CurrentInprogress");
|
LOG.debug("No data available in CurrentInprogress");
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -152,7 +154,7 @@ void clear() throws IOException {
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"Interrupted when setting the data to CurrentInprogress node", e);
|
"Interrupted when setting the data to CurrentInprogress node", e);
|
||||||
}
|
}
|
||||||
LOG.info("Cleared the data from CurrentInprogress");
|
LOG.debug("Cleared the data from CurrentInprogress");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,6 +149,7 @@ public void testWithConfiguringBKAvailablePath() throws Exception {
|
||||||
bkjm = new BookKeeperJournalManager(conf,
|
bkjm = new BookKeeperJournalManager(conf,
|
||||||
URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),
|
URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-WithBKPath"),
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
Assert.assertNotNull("Bookie available path : " + bkAvailablePath
|
Assert.assertNotNull("Bookie available path : " + bkAvailablePath
|
||||||
+ " doesn't exists", zkc.exists(bkAvailablePath, false));
|
+ " doesn't exists", zkc.exists(bkAvailablePath, false));
|
||||||
}
|
}
|
||||||
|
@ -166,6 +167,7 @@ public void testDefaultBKAvailablePath() throws Exception {
|
||||||
bkjm = new BookKeeperJournalManager(conf,
|
bkjm = new BookKeeperJournalManager(conf,
|
||||||
URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),
|
URI.create("bookkeeper://" + HOSTPORT + "/hdfsjournal-DefaultBKPath"),
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
|
Assert.assertNotNull("Bookie available path : " + BK_ROOT_PATH
|
||||||
+ " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
|
+ " doesn't exists", zkc.exists(BK_ROOT_PATH, false));
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,16 @@
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
import java.util.concurrent.CyclicBarrier;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
|
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
|
||||||
|
@ -90,6 +98,7 @@ public void testSimpleWrite() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-simplewrite"), nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1 ; i <= 100; i++) {
|
for (long i = 1 ; i <= 100; i++) {
|
||||||
|
@ -112,6 +121,8 @@ public void testNumberOfTransactions() throws Exception {
|
||||||
|
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-txncount"), nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1 ; i <= 100; i++) {
|
for (long i = 1 ; i <= 100; i++) {
|
||||||
FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
|
FSEditLogOp op = FSEditLogTestUtil.getNoOpInstance();
|
||||||
|
@ -130,6 +141,7 @@ public void testNumberOfTransactionsWithGaps() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-gaps"), nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
long txid = 1;
|
long txid = 1;
|
||||||
for (long i = 0; i < 3; i++) {
|
for (long i = 0; i < 3; i++) {
|
||||||
|
@ -167,6 +179,7 @@ public void testNumberOfTransactionsWithInprogressAtEnd() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-inprogressAtEnd"), nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
long txid = 1;
|
long txid = 1;
|
||||||
for (long i = 0; i < 3; i++) {
|
for (long i = 0; i < 3; i++) {
|
||||||
|
@ -208,6 +221,7 @@ public void testWriteRestartFrom1() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-restartFrom1"), nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
long txid = 1;
|
long txid = 1;
|
||||||
long start = txid;
|
long start = txid;
|
||||||
|
@ -266,6 +280,7 @@ public void testTwoWriters() throws Exception {
|
||||||
|
|
||||||
BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm1 = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
|
||||||
|
bkjm1.format(nsi);
|
||||||
|
|
||||||
BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm2 = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
|
BKJMUtil.createJournalURI("/hdfsjournal-dualWriter"), nsi);
|
||||||
|
@ -288,6 +303,7 @@ public void testSimpleRead() throws Exception {
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
|
BKJMUtil.createJournalURI("/hdfsjournal-simpleread"),
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
final long numTransactions = 10000;
|
final long numTransactions = 10000;
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
|
@ -315,6 +331,7 @@ public void testSimpleRecovery() throws Exception {
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
|
BKJMUtil.createJournalURI("/hdfsjournal-simplerecovery"),
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1 ; i <= 100; i++) {
|
for (long i = 1 ; i <= 100; i++) {
|
||||||
|
@ -365,6 +382,7 @@ public void testAllBookieFailure() throws Exception {
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
|
BKJMUtil.createJournalURI("/hdfsjournal-allbookiefailure"),
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(txid);
|
EditLogOutputStream out = bkjm.startLogSegment(txid);
|
||||||
|
|
||||||
for (long i = 1 ; i <= 3; i++) {
|
for (long i = 1 ; i <= 3; i++) {
|
||||||
|
@ -450,6 +468,7 @@ public void testOneBookieFailure() throws Exception {
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf,
|
||||||
BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),
|
BKJMUtil.createJournalURI("/hdfsjournal-onebookiefailure"),
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(txid);
|
EditLogOutputStream out = bkjm.startLogSegment(txid);
|
||||||
for (long i = 1 ; i <= 3; i++) {
|
for (long i = 1 ; i <= 3; i++) {
|
||||||
|
@ -500,6 +519,7 @@ public void testEmptyInprogressNode() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1; i <= 100; i++) {
|
for (long i = 1; i <= 100; i++) {
|
||||||
|
@ -541,6 +561,7 @@ public void testCorruptInprogressNode() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1; i <= 100; i++) {
|
for (long i = 1; i <= 100; i++) {
|
||||||
|
@ -583,6 +604,7 @@ public void testEmptyInprogressLedger() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1; i <= 100; i++) {
|
for (long i = 1; i <= 100; i++) {
|
||||||
|
@ -622,6 +644,7 @@ public void testRefinalizeAlreadyFinalizedInprogress() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
EditLogOutputStream out = bkjm.startLogSegment(1);
|
EditLogOutputStream out = bkjm.startLogSegment(1);
|
||||||
for (long i = 1; i <= 100; i++) {
|
for (long i = 1; i <= 100; i++) {
|
||||||
|
@ -669,6 +692,7 @@ public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
|
||||||
NamespaceInfo nsi = newNSInfo();
|
NamespaceInfo nsi = newNSInfo();
|
||||||
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
BookKeeperJournalManager bkjm = new BookKeeperJournalManager(conf, uri,
|
||||||
nsi);
|
nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// start new inprogress log segment with txid=1
|
// start new inprogress log segment with txid=1
|
||||||
|
@ -697,6 +721,81 @@ public void testEditLogFileNotExistsWhenReadingMetadata() throws Exception {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private enum ThreadStatus {
|
||||||
|
COMPLETED, GOODEXCEPTION, BADEXCEPTION;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests that concurrent calls to format will still allow one to succeed.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testConcurrentFormat() throws Exception {
|
||||||
|
final URI uri = BKJMUtil.createJournalURI("/hdfsjournal-concurrentformat");
|
||||||
|
final NamespaceInfo nsi = newNSInfo();
|
||||||
|
|
||||||
|
// populate with data first
|
||||||
|
BookKeeperJournalManager bkjm
|
||||||
|
= new BookKeeperJournalManager(conf, uri, nsi);
|
||||||
|
bkjm.format(nsi);
|
||||||
|
for (int i = 1; i < 100*2; i += 2) {
|
||||||
|
bkjm.startLogSegment(i);
|
||||||
|
bkjm.finalizeLogSegment(i, i+1);
|
||||||
|
}
|
||||||
|
bkjm.close();
|
||||||
|
|
||||||
|
final int numThreads = 40;
|
||||||
|
List<Callable<ThreadStatus>> threads
|
||||||
|
= new ArrayList<Callable<ThreadStatus>>();
|
||||||
|
final CyclicBarrier barrier = new CyclicBarrier(numThreads);
|
||||||
|
|
||||||
|
for (int i = 0; i < numThreads; i++) {
|
||||||
|
threads.add(new Callable<ThreadStatus>() {
|
||||||
|
public ThreadStatus call() {
|
||||||
|
BookKeeperJournalManager bkjm = null;
|
||||||
|
try {
|
||||||
|
bkjm = new BookKeeperJournalManager(conf, uri, nsi);
|
||||||
|
barrier.await();
|
||||||
|
bkjm.format(nsi);
|
||||||
|
return ThreadStatus.COMPLETED;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.info("Exception formatting ", ioe);
|
||||||
|
return ThreadStatus.GOODEXCEPTION;
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
LOG.error("Interrupted. Something is broken", ie);
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
return ThreadStatus.BADEXCEPTION;
|
||||||
|
} catch (Exception e) {
|
||||||
|
LOG.error("Some other bad exception", e);
|
||||||
|
return ThreadStatus.BADEXCEPTION;
|
||||||
|
} finally {
|
||||||
|
if (bkjm != null) {
|
||||||
|
try {
|
||||||
|
bkjm.close();
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.error("Error closing journal manager", ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
ExecutorService service = Executors.newFixedThreadPool(numThreads);
|
||||||
|
List<Future<ThreadStatus>> statuses = service.invokeAll(threads, 60,
|
||||||
|
TimeUnit.SECONDS);
|
||||||
|
int numCompleted = 0;
|
||||||
|
for (Future<ThreadStatus> s : statuses) {
|
||||||
|
assertTrue(s.isDone());
|
||||||
|
assertTrue("Thread threw invalid exception",
|
||||||
|
s.get() == ThreadStatus.COMPLETED
|
||||||
|
|| s.get() == ThreadStatus.GOODEXCEPTION);
|
||||||
|
if (s.get() == ThreadStatus.COMPLETED) {
|
||||||
|
numCompleted++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG.info("Completed " + numCompleted + " formats");
|
||||||
|
assertTrue("No thread managed to complete formatting", numCompleted > 0);
|
||||||
|
}
|
||||||
|
|
||||||
private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
|
private String startAndFinalizeLogSegment(BookKeeperJournalManager bkjm,
|
||||||
int startTxid, int endTxid) throws IOException, KeeperException,
|
int startTxid, int endTxid) throws IOException, KeeperException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
|
|
|
@ -118,6 +118,7 @@ public void teardown() throws Exception {
|
||||||
public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
|
public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
|
||||||
String data = "inprogressNode";
|
String data = "inprogressNode";
|
||||||
CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
|
CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
|
||||||
|
ci.init();
|
||||||
ci.update(data);
|
ci.update(data);
|
||||||
String inprogressNodePath = ci.read();
|
String inprogressNodePath = ci.read();
|
||||||
assertEquals("Not returning inprogressZnode", "inprogressNode",
|
assertEquals("Not returning inprogressZnode", "inprogressNode",
|
||||||
|
@ -131,6 +132,7 @@ public void testReadShouldReturnTheZnodePathAfterUpdate() throws Exception {
|
||||||
@Test
|
@Test
|
||||||
public void testReadShouldReturnNullAfterClear() throws Exception {
|
public void testReadShouldReturnNullAfterClear() throws Exception {
|
||||||
CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
|
CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
|
||||||
|
ci.init();
|
||||||
ci.update("myInprogressZnode");
|
ci.update("myInprogressZnode");
|
||||||
ci.read();
|
ci.read();
|
||||||
ci.clear();
|
ci.clear();
|
||||||
|
@ -146,6 +148,7 @@ public void testReadShouldReturnNullAfterClear() throws Exception {
|
||||||
public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
|
public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
|
||||||
throws Exception {
|
throws Exception {
|
||||||
CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
|
CurrentInprogress ci = new CurrentInprogress(zkc, CURRENT_NODE_PATH);
|
||||||
|
ci.init();
|
||||||
ci.update("myInprogressZnode");
|
ci.update("myInprogressZnode");
|
||||||
assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
|
assertEquals("Not returning myInprogressZnode", "myInprogressZnode", ci
|
||||||
.read());
|
.read());
|
||||||
|
@ -154,4 +157,4 @@ public void testUpdateShouldFailWithIOEIfVersionNumberChangedAfterRead()
|
||||||
ci.update("myInprogressZnode");
|
ci.update("myInprogressZnode");
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,8 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
public static final DatanodeID[] EMPTY_ARRAY = {};
|
public static final DatanodeID[] EMPTY_ARRAY = {};
|
||||||
|
|
||||||
private String ipAddr; // IP address
|
private String ipAddr; // IP address
|
||||||
private String hostName; // hostname
|
private String hostName; // hostname claimed by datanode
|
||||||
|
private String peerHostName; // hostname from the actual connection
|
||||||
private String storageID; // unique per cluster storageID
|
private String storageID; // unique per cluster storageID
|
||||||
private int xferPort; // data streaming port
|
private int xferPort; // data streaming port
|
||||||
private int infoPort; // info server port
|
private int infoPort; // info server port
|
||||||
|
@ -51,6 +52,7 @@ public DatanodeID(DatanodeID from) {
|
||||||
from.getXferPort(),
|
from.getXferPort(),
|
||||||
from.getInfoPort(),
|
from.getInfoPort(),
|
||||||
from.getIpcPort());
|
from.getIpcPort());
|
||||||
|
this.peerHostName = from.getPeerHostName();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -76,6 +78,10 @@ public void setIpAddr(String ipAddr) {
|
||||||
this.ipAddr = ipAddr;
|
this.ipAddr = ipAddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void setPeerHostName(String peerHostName) {
|
||||||
|
this.peerHostName = peerHostName;
|
||||||
|
}
|
||||||
|
|
||||||
public void setStorageID(String storageID) {
|
public void setStorageID(String storageID) {
|
||||||
this.storageID = storageID;
|
this.storageID = storageID;
|
||||||
}
|
}
|
||||||
|
@ -94,6 +100,13 @@ public String getHostName() {
|
||||||
return hostName;
|
return hostName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return hostname from the actual connection
|
||||||
|
*/
|
||||||
|
public String getPeerHostName() {
|
||||||
|
return peerHostName;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return IP:xferPort string
|
* @return IP:xferPort string
|
||||||
*/
|
*/
|
||||||
|
@ -202,6 +215,7 @@ public String toString() {
|
||||||
public void updateRegInfo(DatanodeID nodeReg) {
|
public void updateRegInfo(DatanodeID nodeReg) {
|
||||||
ipAddr = nodeReg.getIpAddr();
|
ipAddr = nodeReg.getIpAddr();
|
||||||
hostName = nodeReg.getHostName();
|
hostName = nodeReg.getHostName();
|
||||||
|
peerHostName = nodeReg.getPeerHostName();
|
||||||
xferPort = nodeReg.getXferPort();
|
xferPort = nodeReg.getXferPort();
|
||||||
infoPort = nodeReg.getInfoPort();
|
infoPort = nodeReg.getInfoPort();
|
||||||
ipcPort = nodeReg.getIpcPort();
|
ipcPort = nodeReg.getIpcPort();
|
||||||
|
|
|
@ -35,6 +35,7 @@
|
||||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
|
|
||||||
|
@ -230,6 +231,7 @@ public void reportErrorOnFile(File f) {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
public static void main(String[] args) throws Exception {
|
||||||
|
StringUtils.startupShutdownMessage(JournalNode.class, args, LOG);
|
||||||
System.exit(ToolRunner.run(new JournalNode(), args));
|
System.exit(ToolRunner.run(new JournalNode(), args));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,7 +233,7 @@ public void initializeBlockRecovery(long recoveryId) {
|
||||||
setBlockUCState(BlockUCState.UNDER_RECOVERY);
|
setBlockUCState(BlockUCState.UNDER_RECOVERY);
|
||||||
blockRecoveryId = recoveryId;
|
blockRecoveryId = recoveryId;
|
||||||
if (replicas.size() == 0) {
|
if (replicas.size() == 0) {
|
||||||
NameNode.stateChangeLog.warn("BLOCK*"
|
NameNode.blockStateChangeLog.warn("BLOCK*"
|
||||||
+ " BlockInfoUnderConstruction.initLeaseRecovery:"
|
+ " BlockInfoUnderConstruction.initLeaseRecovery:"
|
||||||
+ " No blocks found, lease removed.");
|
+ " No blocks found, lease removed.");
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ public void initializeBlockRecovery(long recoveryId) {
|
||||||
primaryNodeIndex = j;
|
primaryNodeIndex = j;
|
||||||
DatanodeDescriptor primary = replicas.get(j).getExpectedLocation();
|
DatanodeDescriptor primary = replicas.get(j).getExpectedLocation();
|
||||||
primary.addBlockToBeRecovered(this);
|
primary.addBlockToBeRecovered(this);
|
||||||
NameNode.stateChangeLog.info("BLOCK* " + this
|
NameNode.blockStateChangeLog.info("BLOCK* " + this
|
||||||
+ " recovery started, primary=" + primary);
|
+ " recovery started, primary=" + primary);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -83,6 +83,7 @@
|
||||||
public class BlockManager {
|
public class BlockManager {
|
||||||
|
|
||||||
static final Log LOG = LogFactory.getLog(BlockManager.class);
|
static final Log LOG = LogFactory.getLog(BlockManager.class);
|
||||||
|
static final Log blockLog = NameNode.blockStateChangeLog;
|
||||||
|
|
||||||
/** Default load factor of map */
|
/** Default load factor of map */
|
||||||
public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
|
public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
|
||||||
|
@ -872,7 +873,7 @@ private BlocksWithLocations getBlocksWithLocations(final DatanodeID datanode,
|
||||||
final long size) throws UnregisteredNodeException {
|
final long size) throws UnregisteredNodeException {
|
||||||
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
|
final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
|
blockLog.warn("BLOCK* getBlocks: "
|
||||||
+ "Asking for blocks from an unrecorded node " + datanode);
|
+ "Asking for blocks from an unrecorded node " + datanode);
|
||||||
throw new HadoopIllegalArgumentException(
|
throw new HadoopIllegalArgumentException(
|
||||||
"Datanode " + datanode + " not found.");
|
"Datanode " + datanode + " not found.");
|
||||||
|
@ -950,7 +951,7 @@ private void addToInvalidates(Block b) {
|
||||||
datanodes.append(node).append(" ");
|
datanodes.append(node).append(" ");
|
||||||
}
|
}
|
||||||
if (datanodes.length() != 0) {
|
if (datanodes.length() != 0) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* addToInvalidates: " + b + " "
|
blockLog.info("BLOCK* addToInvalidates: " + b + " "
|
||||||
+ datanodes);
|
+ datanodes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -971,7 +972,7 @@ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk,
|
||||||
// ignore the request for now. This could happen when BlockScanner
|
// ignore the request for now. This could happen when BlockScanner
|
||||||
// thread of Datanode reports bad block before Block reports are sent
|
// thread of Datanode reports bad block before Block reports are sent
|
||||||
// by the Datanode on startup
|
// by the Datanode on startup
|
||||||
NameNode.stateChangeLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
|
blockLog.info("BLOCK* findAndMarkBlockAsCorrupt: "
|
||||||
+ blk + " not found");
|
+ blk + " not found");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -988,7 +989,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b,
|
||||||
|
|
||||||
BlockCollection bc = b.corrupted.getBlockCollection();
|
BlockCollection bc = b.corrupted.getBlockCollection();
|
||||||
if (bc == null) {
|
if (bc == null) {
|
||||||
NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " + b
|
blockLog.info("BLOCK markBlockAsCorrupt: " + b
|
||||||
+ " cannot be marked as corrupt as it does not belong to any file");
|
+ " cannot be marked as corrupt as it does not belong to any file");
|
||||||
addToInvalidates(b.corrupted, node);
|
addToInvalidates(b.corrupted, node);
|
||||||
return;
|
return;
|
||||||
|
@ -1013,7 +1014,7 @@ private void markBlockAsCorrupt(BlockToMarkCorrupt b,
|
||||||
*/
|
*/
|
||||||
private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
|
private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
NameNode.stateChangeLog.info("BLOCK* invalidateBlock: " + b + " on " + dn);
|
blockLog.info("BLOCK* invalidateBlock: " + b + " on " + dn);
|
||||||
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
|
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
throw new IOException("Cannot invalidate " + b
|
throw new IOException("Cannot invalidate " + b
|
||||||
|
@ -1023,7 +1024,7 @@ private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
|
||||||
// Check how many copies we have of the block
|
// Check how many copies we have of the block
|
||||||
NumberReplicas nr = countNodes(b.stored);
|
NumberReplicas nr = countNodes(b.stored);
|
||||||
if (nr.replicasOnStaleNodes() > 0) {
|
if (nr.replicasOnStaleNodes() > 0) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: postponing " +
|
blockLog.info("BLOCK* invalidateBlocks: postponing " +
|
||||||
"invalidation of " + b + " on " + dn + " because " +
|
"invalidation of " + b + " on " + dn + " because " +
|
||||||
nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
|
nr.replicasOnStaleNodes() + " replica(s) are located on nodes " +
|
||||||
"with potentially out-of-date block reports");
|
"with potentially out-of-date block reports");
|
||||||
|
@ -1033,12 +1034,12 @@ private void invalidateBlock(BlockToMarkCorrupt b, DatanodeInfo dn
|
||||||
// If we have at least one copy on a live node, then we can delete it.
|
// If we have at least one copy on a live node, then we can delete it.
|
||||||
addToInvalidates(b.corrupted, dn);
|
addToInvalidates(b.corrupted, dn);
|
||||||
removeStoredBlock(b.stored, node);
|
removeStoredBlock(b.stored, node);
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
|
blockLog.debug("BLOCK* invalidateBlocks: "
|
||||||
+ b + " on " + dn + " listed for deletion.");
|
+ b + " on " + dn + " listed for deletion.");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + b
|
blockLog.info("BLOCK* invalidateBlocks: " + b
|
||||||
+ " on " + dn + " is the only copy and was not deleted");
|
+ " on " + dn + " is the only copy and was not deleted");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1160,7 +1161,7 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
|
||||||
(blockHasEnoughRacks(block)) ) {
|
(blockHasEnoughRacks(block)) ) {
|
||||||
neededReplications.remove(block, priority); // remove from neededReplications
|
neededReplications.remove(block, priority); // remove from neededReplications
|
||||||
neededReplications.decrementReplicationIndex(priority);
|
neededReplications.decrementReplicationIndex(priority);
|
||||||
NameNode.stateChangeLog.info("BLOCK* Removing " + block
|
blockLog.info("BLOCK* Removing " + block
|
||||||
+ " from neededReplications as it has enough replicas");
|
+ " from neededReplications as it has enough replicas");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1235,7 +1236,7 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
|
||||||
neededReplications.remove(block, priority); // remove from neededReplications
|
neededReplications.remove(block, priority); // remove from neededReplications
|
||||||
neededReplications.decrementReplicationIndex(priority);
|
neededReplications.decrementReplicationIndex(priority);
|
||||||
rw.targets = null;
|
rw.targets = null;
|
||||||
NameNode.stateChangeLog.info("BLOCK* Removing " + block
|
blockLog.info("BLOCK* Removing " + block
|
||||||
+ " from neededReplications as it has enough replicas");
|
+ " from neededReplications as it has enough replicas");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1261,8 +1262,8 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
|
||||||
// The reason we use 'pending' is so we can retry
|
// The reason we use 'pending' is so we can retry
|
||||||
// replications that fail after an appropriate amount of time.
|
// replications that fail after an appropriate amount of time.
|
||||||
pendingReplications.increment(block, targets.length);
|
pendingReplications.increment(block, targets.length);
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug(
|
blockLog.debug(
|
||||||
"BLOCK* block " + block
|
"BLOCK* block " + block
|
||||||
+ " is moved from neededReplications to pendingReplications");
|
+ " is moved from neededReplications to pendingReplications");
|
||||||
}
|
}
|
||||||
|
@ -1278,7 +1279,7 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
|
||||||
namesystem.writeUnlock();
|
namesystem.writeUnlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
if (blockLog.isInfoEnabled()) {
|
||||||
// log which blocks have been scheduled for replication
|
// log which blocks have been scheduled for replication
|
||||||
for(ReplicationWork rw : work){
|
for(ReplicationWork rw : work){
|
||||||
DatanodeDescriptor[] targets = rw.targets;
|
DatanodeDescriptor[] targets = rw.targets;
|
||||||
|
@ -1288,13 +1289,13 @@ int computeReplicationWorkForBlocks(List<List<Block>> blocksToReplicate) {
|
||||||
targetList.append(' ');
|
targetList.append(' ');
|
||||||
targetList.append(targets[k]);
|
targetList.append(targets[k]);
|
||||||
}
|
}
|
||||||
NameNode.stateChangeLog.info("BLOCK* ask " + rw.srcNode
|
blockLog.info("BLOCK* ask " + rw.srcNode
|
||||||
+ " to replicate " + rw.block + " to " + targetList);
|
+ " to replicate " + rw.block + " to " + targetList);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug(
|
blockLog.debug(
|
||||||
"BLOCK* neededReplications = " + neededReplications.size()
|
"BLOCK* neededReplications = " + neededReplications.size()
|
||||||
+ " pendingReplications = " + pendingReplications.size());
|
+ " pendingReplications = " + pendingReplications.size());
|
||||||
}
|
}
|
||||||
|
@ -1504,7 +1505,7 @@ public void processReport(final DatanodeID nodeID, final String poolId,
|
||||||
// To minimize startup time, we discard any second (or later) block reports
|
// To minimize startup time, we discard any second (or later) block reports
|
||||||
// that we receive while still in startup phase.
|
// that we receive while still in startup phase.
|
||||||
if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
|
if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* processReport: "
|
blockLog.info("BLOCK* processReport: "
|
||||||
+ "discarded non-initial block report from " + nodeID
|
+ "discarded non-initial block report from " + nodeID
|
||||||
+ " because namenode still in startup phase");
|
+ " because namenode still in startup phase");
|
||||||
return;
|
return;
|
||||||
|
@ -1536,7 +1537,7 @@ public void processReport(final DatanodeID nodeID, final String poolId,
|
||||||
|
|
||||||
// Log the block report processing stats from Namenode perspective
|
// Log the block report processing stats from Namenode perspective
|
||||||
NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
|
NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
|
||||||
NameNode.stateChangeLog.info("BLOCK* processReport: from "
|
blockLog.info("BLOCK* processReport: from "
|
||||||
+ nodeID + ", blocks: " + newReport.getNumberOfBlocks()
|
+ nodeID + ", blocks: " + newReport.getNumberOfBlocks()
|
||||||
+ ", processing time: " + (endTime - startTime) + " msecs");
|
+ ", processing time: " + (endTime - startTime) + " msecs");
|
||||||
}
|
}
|
||||||
|
@ -1596,7 +1597,7 @@ private void processReport(final DatanodeDescriptor node,
|
||||||
addStoredBlock(b, node, null, true);
|
addStoredBlock(b, node, null, true);
|
||||||
}
|
}
|
||||||
for (Block b : toInvalidate) {
|
for (Block b : toInvalidate) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* processReport: "
|
blockLog.info("BLOCK* processReport: "
|
||||||
+ b + " on " + node + " size " + b.getNumBytes()
|
+ b + " on " + node + " size " + b.getNumBytes()
|
||||||
+ " does not belong to any file");
|
+ " does not belong to any file");
|
||||||
addToInvalidates(b, node);
|
addToInvalidates(b, node);
|
||||||
|
@ -2034,7 +2035,7 @@ private Block addStoredBlock(final BlockInfo block,
|
||||||
}
|
}
|
||||||
if (storedBlock == null || storedBlock.getBlockCollection() == null) {
|
if (storedBlock == null || storedBlock.getBlockCollection() == null) {
|
||||||
// If this block does not belong to anyfile, then we are done.
|
// If this block does not belong to anyfile, then we are done.
|
||||||
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
|
blockLog.info("BLOCK* addStoredBlock: " + block + " on "
|
||||||
+ node + " size " + block.getNumBytes()
|
+ node + " size " + block.getNumBytes()
|
||||||
+ " but it does not belong to any file");
|
+ " but it does not belong to any file");
|
||||||
// we could add this block to invalidate set of this datanode.
|
// we could add this block to invalidate set of this datanode.
|
||||||
|
@ -2056,7 +2057,7 @@ private Block addStoredBlock(final BlockInfo block,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
curReplicaDelta = 0;
|
curReplicaDelta = 0;
|
||||||
NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
|
blockLog.warn("BLOCK* addStoredBlock: "
|
||||||
+ "Redundant addStoredBlock request received for " + storedBlock
|
+ "Redundant addStoredBlock request received for " + storedBlock
|
||||||
+ " on " + node + " size " + storedBlock.getNumBytes());
|
+ " on " + node + " size " + storedBlock.getNumBytes());
|
||||||
}
|
}
|
||||||
|
@ -2115,7 +2116,7 @@ private Block addStoredBlock(final BlockInfo block,
|
||||||
}
|
}
|
||||||
|
|
||||||
private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
|
private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
|
||||||
if (!NameNode.stateChangeLog.isInfoEnabled()) {
|
if (!blockLog.isInfoEnabled()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2126,7 +2127,7 @@ private void logAddStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) {
|
||||||
storedBlock.appendStringTo(sb);
|
storedBlock.appendStringTo(sb);
|
||||||
sb.append(" size " )
|
sb.append(" size " )
|
||||||
.append(storedBlock.getNumBytes());
|
.append(storedBlock.getNumBytes());
|
||||||
NameNode.stateChangeLog.info(sb);
|
blockLog.info(sb);
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Invalidate corrupt replicas.
|
* Invalidate corrupt replicas.
|
||||||
|
@ -2153,7 +2154,7 @@ private void invalidateCorruptReplicas(BlockInfo blk) {
|
||||||
try {
|
try {
|
||||||
invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
|
invalidateBlock(new BlockToMarkCorrupt(blk, null), node);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
NameNode.stateChangeLog.info("invalidateCorruptReplicas "
|
blockLog.info("invalidateCorruptReplicas "
|
||||||
+ "error in deleting bad block " + blk + " on " + node, e);
|
+ "error in deleting bad block " + blk + " on " + node, e);
|
||||||
gotException = true;
|
gotException = true;
|
||||||
}
|
}
|
||||||
|
@ -2391,7 +2392,7 @@ private void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess,
|
||||||
// upon giving instructions to the namenode.
|
// upon giving instructions to the namenode.
|
||||||
//
|
//
|
||||||
addToInvalidates(b, cur);
|
addToInvalidates(b, cur);
|
||||||
NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
|
blockLog.info("BLOCK* chooseExcessReplicates: "
|
||||||
+"("+cur+", "+b+") is added to invalidated blocks set");
|
+"("+cur+", "+b+") is added to invalidated blocks set");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2405,8 +2406,8 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) {
|
||||||
}
|
}
|
||||||
if (excessBlocks.add(block)) {
|
if (excessBlocks.add(block)) {
|
||||||
excessBlocksCount++;
|
excessBlocksCount++;
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
|
blockLog.debug("BLOCK* addToExcessReplicate:"
|
||||||
+ " (" + dn + ", " + block
|
+ " (" + dn + ", " + block
|
||||||
+ ") is added to excessReplicateMap");
|
+ ") is added to excessReplicateMap");
|
||||||
}
|
}
|
||||||
|
@ -2418,15 +2419,15 @@ private void addToExcessReplicate(DatanodeInfo dn, Block block) {
|
||||||
* removed block is still valid.
|
* removed block is still valid.
|
||||||
*/
|
*/
|
||||||
public void removeStoredBlock(Block block, DatanodeDescriptor node) {
|
public void removeStoredBlock(Block block, DatanodeDescriptor node) {
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
|
blockLog.debug("BLOCK* removeStoredBlock: "
|
||||||
+ block + " from " + node);
|
+ block + " from " + node);
|
||||||
}
|
}
|
||||||
assert (namesystem.hasWriteLock());
|
assert (namesystem.hasWriteLock());
|
||||||
{
|
{
|
||||||
if (!blocksMap.removeNode(block, node)) {
|
if (!blocksMap.removeNode(block, node)) {
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
|
blockLog.debug("BLOCK* removeStoredBlock: "
|
||||||
+ block + " has already been removed from node " + node);
|
+ block + " has already been removed from node " + node);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -2453,8 +2454,8 @@ public void removeStoredBlock(Block block, DatanodeDescriptor node) {
|
||||||
if (excessBlocks != null) {
|
if (excessBlocks != null) {
|
||||||
if (excessBlocks.remove(block)) {
|
if (excessBlocks.remove(block)) {
|
||||||
excessBlocksCount--;
|
excessBlocksCount--;
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
|
blockLog.debug("BLOCK* removeStoredBlock: "
|
||||||
+ block + " is removed from excessBlocks");
|
+ block + " is removed from excessBlocks");
|
||||||
}
|
}
|
||||||
if (excessBlocks.size() == 0) {
|
if (excessBlocks.size() == 0) {
|
||||||
|
@ -2497,7 +2498,7 @@ void addBlock(DatanodeDescriptor node, Block block, String delHint)
|
||||||
if (delHint != null && delHint.length() != 0) {
|
if (delHint != null && delHint.length() != 0) {
|
||||||
delHintNode = datanodeManager.getDatanode(delHint);
|
delHintNode = datanodeManager.getDatanode(delHint);
|
||||||
if (delHintNode == null) {
|
if (delHintNode == null) {
|
||||||
NameNode.stateChangeLog.warn("BLOCK* blockReceived: " + block
|
blockLog.warn("BLOCK* blockReceived: " + block
|
||||||
+ " is expected to be removed from an unrecorded node " + delHint);
|
+ " is expected to be removed from an unrecorded node " + delHint);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2532,7 +2533,7 @@ private void processAndHandleReportedBlock(DatanodeDescriptor node, Block block,
|
||||||
addStoredBlock(b, node, delHintNode, true);
|
addStoredBlock(b, node, delHintNode, true);
|
||||||
}
|
}
|
||||||
for (Block b : toInvalidate) {
|
for (Block b : toInvalidate) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* addBlock: block "
|
blockLog.info("BLOCK* addBlock: block "
|
||||||
+ b + " on " + node + " size " + b.getNumBytes()
|
+ b + " on " + node + " size " + b.getNumBytes()
|
||||||
+ " does not belong to any file");
|
+ " does not belong to any file");
|
||||||
addToInvalidates(b, node);
|
addToInvalidates(b, node);
|
||||||
|
@ -2558,7 +2559,7 @@ public void processIncrementalBlockReport(final DatanodeID nodeID,
|
||||||
try {
|
try {
|
||||||
final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
|
final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
|
||||||
if (node == null || !node.isAlive) {
|
if (node == null || !node.isAlive) {
|
||||||
NameNode.stateChangeLog
|
blockLog
|
||||||
.warn("BLOCK* processIncrementalBlockReport"
|
.warn("BLOCK* processIncrementalBlockReport"
|
||||||
+ " is received from dead or unregistered node "
|
+ " is received from dead or unregistered node "
|
||||||
+ nodeID);
|
+ nodeID);
|
||||||
|
@ -2585,19 +2586,19 @@ public void processIncrementalBlockReport(final DatanodeID nodeID,
|
||||||
String msg =
|
String msg =
|
||||||
"Unknown block status code reported by " + nodeID +
|
"Unknown block status code reported by " + nodeID +
|
||||||
": " + rdbi;
|
": " + rdbi;
|
||||||
NameNode.stateChangeLog.warn(msg);
|
blockLog.warn(msg);
|
||||||
assert false : msg; // if assertions are enabled, throw.
|
assert false : msg; // if assertions are enabled, throw.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (NameNode.stateChangeLog.isDebugEnabled()) {
|
if (blockLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug("BLOCK* block "
|
blockLog.debug("BLOCK* block "
|
||||||
+ (rdbi.getStatus()) + ": " + rdbi.getBlock()
|
+ (rdbi.getStatus()) + ": " + rdbi.getBlock()
|
||||||
+ " is received from " + nodeID);
|
+ " is received from " + nodeID);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
namesystem.writeUnlock();
|
namesystem.writeUnlock();
|
||||||
NameNode.stateChangeLog
|
blockLog
|
||||||
.debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
|
.debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
|
||||||
+ nodeID
|
+ nodeID
|
||||||
+ " receiving: " + receiving + ", "
|
+ " receiving: " + receiving + ", "
|
||||||
|
@ -2890,8 +2891,8 @@ private int invalidateWorkForOneNode(String nodeId) {
|
||||||
} finally {
|
} finally {
|
||||||
namesystem.writeUnlock();
|
namesystem.writeUnlock();
|
||||||
}
|
}
|
||||||
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
if (blockLog.isInfoEnabled()) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
blockLog.info("BLOCK* " + getClass().getSimpleName()
|
||||||
+ ": ask " + dn + " to delete " + toInvalidate);
|
+ ": ask " + dn + " to delete " + toInvalidate);
|
||||||
}
|
}
|
||||||
return toInvalidate.size();
|
return toInvalidate.size();
|
||||||
|
|
|
@ -63,13 +63,13 @@ public void addToCorruptReplicasMap(Block blk, DatanodeDescriptor dn,
|
||||||
|
|
||||||
if (!nodes.contains(dn)) {
|
if (!nodes.contains(dn)) {
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
|
NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
|
||||||
blk.getBlockName() +
|
blk.getBlockName() +
|
||||||
" added as corrupt on " + dn +
|
" added as corrupt on " + dn +
|
||||||
" by " + Server.getRemoteIp() +
|
" by " + Server.getRemoteIp() +
|
||||||
reasonText);
|
reasonText);
|
||||||
} else {
|
} else {
|
||||||
NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
|
NameNode.blockStateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
|
||||||
"duplicate requested for " +
|
"duplicate requested for " +
|
||||||
blk.getBlockName() + " to add as corrupt " +
|
blk.getBlockName() + " to add as corrupt " +
|
||||||
"on " + dn +
|
"on " + dn +
|
||||||
|
|
|
@ -540,28 +540,16 @@ private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) {
|
||||||
private static boolean checkInList(final DatanodeID node,
|
private static boolean checkInList(final DatanodeID node,
|
||||||
final Set<String> hostsList,
|
final Set<String> hostsList,
|
||||||
final boolean isExcludeList) {
|
final boolean isExcludeList) {
|
||||||
final InetAddress iaddr;
|
|
||||||
|
|
||||||
try {
|
|
||||||
iaddr = InetAddress.getByName(node.getIpAddr());
|
|
||||||
} catch (UnknownHostException e) {
|
|
||||||
LOG.warn("Unknown IP: " + node.getIpAddr(), e);
|
|
||||||
return isExcludeList;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if include list is empty, host is in include list
|
// if include list is empty, host is in include list
|
||||||
if ( (!isExcludeList) && (hostsList.isEmpty()) ){
|
if ( (!isExcludeList) && (hostsList.isEmpty()) ){
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return // compare ipaddress(:port)
|
for (String name : getNodeNamesForHostFiltering(node)) {
|
||||||
(hostsList.contains(iaddr.getHostAddress().toString()))
|
if (hostsList.contains(name)) {
|
||||||
|| (hostsList.contains(iaddr.getHostAddress().toString() + ":"
|
return true;
|
||||||
+ node.getXferPort()))
|
}
|
||||||
// compare hostname(:port)
|
}
|
||||||
|| (hostsList.contains(iaddr.getHostName()))
|
return false;
|
||||||
|| (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort()))
|
|
||||||
|| ((node instanceof DatanodeInfo) && hostsList
|
|
||||||
.contains(((DatanodeInfo) node).getHostName()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -644,16 +632,20 @@ private String newStorageID() {
|
||||||
*/
|
*/
|
||||||
public void registerDatanode(DatanodeRegistration nodeReg)
|
public void registerDatanode(DatanodeRegistration nodeReg)
|
||||||
throws DisallowedDatanodeException {
|
throws DisallowedDatanodeException {
|
||||||
String dnAddress = Server.getRemoteAddress();
|
InetAddress dnAddress = Server.getRemoteIp();
|
||||||
if (dnAddress == null) {
|
if (dnAddress != null) {
|
||||||
// Mostly called inside an RPC.
|
// Mostly called inside an RPC, update ip and peer hostname
|
||||||
// But if not, use address passed by the data-node.
|
String hostname = dnAddress.getHostName();
|
||||||
dnAddress = nodeReg.getIpAddr();
|
String ip = dnAddress.getHostAddress();
|
||||||
|
if (hostname.equals(ip)) {
|
||||||
|
LOG.warn("Unresolved datanode registration from " + ip);
|
||||||
|
throw new DisallowedDatanodeException(nodeReg);
|
||||||
|
}
|
||||||
|
// update node registration with the ip and hostname from rpc request
|
||||||
|
nodeReg.setIpAddr(ip);
|
||||||
|
nodeReg.setPeerHostName(hostname);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the IP to the address of the RPC request that is
|
|
||||||
// registering this datanode.
|
|
||||||
nodeReg.setIpAddr(dnAddress);
|
|
||||||
nodeReg.setExportedKeys(blockManager.getBlockKeys());
|
nodeReg.setExportedKeys(blockManager.getBlockKeys());
|
||||||
|
|
||||||
// Checks if the node is not on the hosts list. If it is not, then
|
// Checks if the node is not on the hosts list. If it is not, then
|
||||||
|
@ -1033,19 +1025,8 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
||||||
if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
|
if ( (isDead && listDeadNodes) || (!isDead && listLiveNodes) ) {
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
}
|
}
|
||||||
// Remove any nodes we know about from the map
|
for (String name : getNodeNamesForHostFiltering(dn)) {
|
||||||
try {
|
mustList.remove(name);
|
||||||
InetAddress inet = InetAddress.getByName(dn.getIpAddr());
|
|
||||||
// compare hostname(:port)
|
|
||||||
mustList.remove(inet.getHostName());
|
|
||||||
mustList.remove(inet.getHostName()+":"+dn.getXferPort());
|
|
||||||
// compare ipaddress(:port)
|
|
||||||
mustList.remove(inet.getHostAddress().toString());
|
|
||||||
mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
|
|
||||||
} catch (UnknownHostException e) {
|
|
||||||
mustList.remove(dn.getName());
|
|
||||||
mustList.remove(dn.getIpAddr());
|
|
||||||
LOG.warn(e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1066,6 +1047,25 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
||||||
return nodes;
|
return nodes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static List<String> getNodeNamesForHostFiltering(DatanodeID node) {
|
||||||
|
String ip = node.getIpAddr();
|
||||||
|
String regHostName = node.getHostName();
|
||||||
|
int xferPort = node.getXferPort();
|
||||||
|
|
||||||
|
List<String> names = new ArrayList<String>();
|
||||||
|
names.add(ip);
|
||||||
|
names.add(ip + ":" + xferPort);
|
||||||
|
names.add(regHostName);
|
||||||
|
names.add(regHostName + ":" + xferPort);
|
||||||
|
|
||||||
|
String peerHostName = node.getPeerHostName();
|
||||||
|
if (peerHostName != null) {
|
||||||
|
names.add(peerHostName);
|
||||||
|
names.add(peerHostName + ":" + xferPort);
|
||||||
|
}
|
||||||
|
return names;
|
||||||
|
}
|
||||||
|
|
||||||
private void setDatanodeDead(DatanodeDescriptor node) {
|
private void setDatanodeDead(DatanodeDescriptor node) {
|
||||||
node.setLastUpdate(0);
|
node.setLastUpdate(0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,7 +86,7 @@ synchronized void add(final Block block, final DatanodeInfo datanode,
|
||||||
if (set.add(block)) {
|
if (set.add(block)) {
|
||||||
numBlocks++;
|
numBlocks++;
|
||||||
if (log) {
|
if (log) {
|
||||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
NameNode.blockStateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
||||||
+ ": add " + block + " to " + datanode);
|
+ ": add " + block + " to " + datanode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,8 +190,8 @@ synchronized boolean add(Block block,
|
||||||
int priLevel = getPriority(block, curReplicas, decomissionedReplicas,
|
int priLevel = getPriority(block, curReplicas, decomissionedReplicas,
|
||||||
expectedReplicas);
|
expectedReplicas);
|
||||||
if(priorityQueues.get(priLevel).add(block)) {
|
if(priorityQueues.get(priLevel).add(block)) {
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug(
|
NameNode.blockStateChangeLog.debug(
|
||||||
"BLOCK* NameSystem.UnderReplicationBlock.add:"
|
"BLOCK* NameSystem.UnderReplicationBlock.add:"
|
||||||
+ block
|
+ block
|
||||||
+ " has only " + curReplicas
|
+ " has only " + curReplicas
|
||||||
|
@ -233,8 +233,8 @@ synchronized boolean remove(Block block,
|
||||||
boolean remove(Block block, int priLevel) {
|
boolean remove(Block block, int priLevel) {
|
||||||
if(priLevel >= 0 && priLevel < LEVEL
|
if(priLevel >= 0 && priLevel < LEVEL
|
||||||
&& priorityQueues.get(priLevel).remove(block)) {
|
&& priorityQueues.get(priLevel).remove(block)) {
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug(
|
NameNode.blockStateChangeLog.debug(
|
||||||
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
|
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
|
||||||
+ "Removing block " + block
|
+ "Removing block " + block
|
||||||
+ " from priority queue "+ priLevel);
|
+ " from priority queue "+ priLevel);
|
||||||
|
@ -245,8 +245,8 @@ boolean remove(Block block, int priLevel) {
|
||||||
// not found in the queue for the given priority level.
|
// not found in the queue for the given priority level.
|
||||||
for (int i = 0; i < LEVEL; i++) {
|
for (int i = 0; i < LEVEL; i++) {
|
||||||
if (priorityQueues.get(i).remove(block)) {
|
if (priorityQueues.get(i).remove(block)) {
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug(
|
NameNode.blockStateChangeLog.debug(
|
||||||
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
|
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
|
||||||
+ "Removing block " + block
|
+ "Removing block " + block
|
||||||
+ " from priority queue "+ i);
|
+ " from priority queue "+ i);
|
||||||
|
@ -296,8 +296,8 @@ synchronized void update(Block block, int curReplicas,
|
||||||
remove(block, oldPri);
|
remove(block, oldPri);
|
||||||
}
|
}
|
||||||
if(priorityQueues.get(curPri).add(block)) {
|
if(priorityQueues.get(curPri).add(block)) {
|
||||||
if(NameNode.stateChangeLog.isDebugEnabled()) {
|
if(NameNode.blockStateChangeLog.isDebugEnabled()) {
|
||||||
NameNode.stateChangeLog.debug(
|
NameNode.blockStateChangeLog.debug(
|
||||||
"BLOCK* NameSystem.UnderReplicationBlock.update:"
|
"BLOCK* NameSystem.UnderReplicationBlock.update:"
|
||||||
+ block
|
+ block
|
||||||
+ " has only "+ curReplicas
|
+ " has only "+ curReplicas
|
||||||
|
|
|
@ -408,15 +408,15 @@ public static void printPathWithLinks(String dir, JspWriter out,
|
||||||
if (!parts[i].equals("")) {
|
if (!parts[i].equals("")) {
|
||||||
tempPath.append(parts[i]);
|
tempPath.append(parts[i]);
|
||||||
out.print("<a href=\"browseDirectory.jsp" + "?dir="
|
out.print("<a href=\"browseDirectory.jsp" + "?dir="
|
||||||
+ tempPath.toString() + "&namenodeInfoPort=" + namenodeInfoPort
|
+ HtmlQuoting.quoteHtmlChars(tempPath.toString()) + "&namenodeInfoPort=" + namenodeInfoPort
|
||||||
+ getDelegationTokenUrlParam(tokenString)
|
+ getDelegationTokenUrlParam(tokenString)
|
||||||
+ getUrlParam(NAMENODE_ADDRESS, nnAddress));
|
+ getUrlParam(NAMENODE_ADDRESS, nnAddress));
|
||||||
out.print("\">" + parts[i] + "</a>" + Path.SEPARATOR);
|
out.print("\">" + HtmlQuoting.quoteHtmlChars(parts[i]) + "</a>" + Path.SEPARATOR);
|
||||||
tempPath.append(Path.SEPARATOR);
|
tempPath.append(Path.SEPARATOR);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(parts.length > 0) {
|
if(parts.length > 0) {
|
||||||
out.print(parts[parts.length-1]);
|
out.print(HtmlQuoting.quoteHtmlChars(parts[parts.length-1]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (UnsupportedEncodingException ex) {
|
catch (UnsupportedEncodingException ex) {
|
||||||
|
@ -431,16 +431,16 @@ public static void printGotoForm(JspWriter out,
|
||||||
String nnAddress) throws IOException {
|
String nnAddress) throws IOException {
|
||||||
out.print("<form action=\"browseDirectory.jsp\" method=\"get\" name=\"goto\">");
|
out.print("<form action=\"browseDirectory.jsp\" method=\"get\" name=\"goto\">");
|
||||||
out.print("Goto : ");
|
out.print("Goto : ");
|
||||||
out.print("<input name=\"dir\" type=\"text\" width=\"50\" id\"dir\" value=\""+ file+"\">");
|
out.print("<input name=\"dir\" type=\"text\" width=\"50\" id=\"dir\" value=\""+ HtmlQuoting.quoteHtmlChars(file)+"\"/>");
|
||||||
out.print("<input name=\"go\" type=\"submit\" value=\"go\">");
|
out.print("<input name=\"go\" type=\"submit\" value=\"go\"/>");
|
||||||
out.print("<input name=\"namenodeInfoPort\" type=\"hidden\" "
|
out.print("<input name=\"namenodeInfoPort\" type=\"hidden\" "
|
||||||
+ "value=\"" + namenodeInfoPort + "\">");
|
+ "value=\"" + namenodeInfoPort + "\"/>");
|
||||||
if (UserGroupInformation.isSecurityEnabled()) {
|
if (UserGroupInformation.isSecurityEnabled()) {
|
||||||
out.print("<input name=\"" + DELEGATION_PARAMETER_NAME
|
out.print("<input name=\"" + DELEGATION_PARAMETER_NAME
|
||||||
+ "\" type=\"hidden\" value=\"" + tokenString + "\">");
|
+ "\" type=\"hidden\" value=\"" + tokenString + "\"/>");
|
||||||
}
|
}
|
||||||
out.print("<input name=\""+ NAMENODE_ADDRESS +"\" type=\"hidden\" "
|
out.print("<input name=\""+ NAMENODE_ADDRESS +"\" type=\"hidden\" "
|
||||||
+ "value=\"" + nnAddress + "\">");
|
+ "value=\"" + nnAddress + "\"/>");
|
||||||
out.print("</form>");
|
out.print("</form>");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -433,7 +433,7 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
|
||||||
if (!root.exists()) {
|
if (!root.exists()) {
|
||||||
// storage directory does not exist
|
// storage directory does not exist
|
||||||
if (startOpt != StartupOption.FORMAT) {
|
if (startOpt != StartupOption.FORMAT) {
|
||||||
LOG.info("Storage directory " + rootPath + " does not exist");
|
LOG.warn("Storage directory " + rootPath + " does not exist");
|
||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
LOG.info(rootPath + " does not exist. Creating ...");
|
LOG.info(rootPath + " does not exist. Creating ...");
|
||||||
|
@ -442,15 +442,15 @@ public StorageState analyzeStorage(StartupOption startOpt, Storage storage)
|
||||||
}
|
}
|
||||||
// or is inaccessible
|
// or is inaccessible
|
||||||
if (!root.isDirectory()) {
|
if (!root.isDirectory()) {
|
||||||
LOG.info(rootPath + "is not a directory");
|
LOG.warn(rootPath + "is not a directory");
|
||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
if (!root.canWrite()) {
|
if (!root.canWrite()) {
|
||||||
LOG.info("Cannot access storage directory " + rootPath);
|
LOG.warn("Cannot access storage directory " + rootPath);
|
||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
} catch(SecurityException ex) {
|
} catch(SecurityException ex) {
|
||||||
LOG.info("Cannot access storage directory " + rootPath, ex);
|
LOG.warn("Cannot access storage directory " + rootPath, ex);
|
||||||
return StorageState.NON_EXISTENT;
|
return StorageState.NON_EXISTENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||||
|
import org.apache.hadoop.http.HtmlQuoting;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -119,7 +120,7 @@ static void generateDirectoryStructure(JspWriter out,
|
||||||
String target = dir;
|
String target = dir;
|
||||||
final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
|
final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
|
||||||
if (targetStatus == null) { // not exists
|
if (targetStatus == null) { // not exists
|
||||||
out.print("<h3>File or directory : " + target + " does not exist</h3>");
|
out.print("<h3>File or directory : " + StringEscapeUtils.escapeHtml(target) + " does not exist</h3>");
|
||||||
JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target,
|
JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target,
|
||||||
nnAddr);
|
nnAddr);
|
||||||
} else {
|
} else {
|
||||||
|
@ -203,7 +204,7 @@ static void generateDirectoryStructure(JspWriter out,
|
||||||
+ JspHelper.getDelegationTokenUrlParam(tokenString)
|
+ JspHelper.getDelegationTokenUrlParam(tokenString)
|
||||||
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
|
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
|
||||||
cols[0] = "<a href=\"" + datanodeUrl + "\">"
|
cols[0] = "<a href=\"" + datanodeUrl + "\">"
|
||||||
+ localFileName + "</a>";
|
+ HtmlQuoting.quoteHtmlChars(localFileName) + "</a>";
|
||||||
cols[5] = lsDateFormat.format(new Date((files[i]
|
cols[5] = lsDateFormat.format(new Date((files[i]
|
||||||
.getModificationTime())));
|
.getModificationTime())));
|
||||||
cols[6] = files[i].getPermission().toString();
|
cols[6] = files[i].getPermission().toString();
|
||||||
|
|
|
@ -137,7 +137,7 @@ synchronized List<FsVolumeImpl> checkDirs() {
|
||||||
if (removedVols != null && removedVols.size() > 0) {
|
if (removedVols != null && removedVols.size() > 0) {
|
||||||
// Replace volume list
|
// Replace volume list
|
||||||
volumes = Collections.unmodifiableList(volumeList);
|
volumes = Collections.unmodifiableList(volumeList);
|
||||||
FsDatasetImpl.LOG.info("Completed checkDirs. Removed " + removedVols.size()
|
FsDatasetImpl.LOG.warn("Completed checkDirs. Removed " + removedVols.size()
|
||||||
+ " volumes. Current volumes: " + this);
|
+ " volumes. Current volumes: " + this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,10 +78,6 @@ public class BackupNode extends NameNode {
|
||||||
String nnHttpAddress;
|
String nnHttpAddress;
|
||||||
/** Checkpoint manager */
|
/** Checkpoint manager */
|
||||||
Checkpointer checkpointManager;
|
Checkpointer checkpointManager;
|
||||||
/** ClusterID to which BackupNode belongs to */
|
|
||||||
String clusterId;
|
|
||||||
/** Block pool Id of the peer namenode of this BackupNode */
|
|
||||||
String blockPoolId;
|
|
||||||
|
|
||||||
BackupNode(Configuration conf, NamenodeRole role) throws IOException {
|
BackupNode(Configuration conf, NamenodeRole role) throws IOException {
|
||||||
super(conf, role);
|
super(conf, role);
|
||||||
|
@ -145,6 +141,7 @@ protected void initialize(Configuration conf) throws IOException {
|
||||||
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
|
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
|
||||||
NamespaceInfo nsInfo = handshake(conf);
|
NamespaceInfo nsInfo = handshake(conf);
|
||||||
super.initialize(conf);
|
super.initialize(conf);
|
||||||
|
namesystem.setBlockPoolId(nsInfo.getBlockPoolID());
|
||||||
|
|
||||||
if (false == namesystem.isInSafeMode()) {
|
if (false == namesystem.isInSafeMode()) {
|
||||||
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
|
||||||
|
@ -154,9 +151,6 @@ protected void initialize(Configuration conf) throws IOException {
|
||||||
// therefore lease hard limit should never expire.
|
// therefore lease hard limit should never expire.
|
||||||
namesystem.leaseManager.setLeasePeriod(
|
namesystem.leaseManager.setLeasePeriod(
|
||||||
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
|
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
|
||||||
|
|
||||||
clusterId = nsInfo.getClusterID();
|
|
||||||
blockPoolId = nsInfo.getBlockPoolID();
|
|
||||||
|
|
||||||
// register with the active name-node
|
// register with the active name-node
|
||||||
registerWith(nsInfo);
|
registerWith(nsInfo);
|
||||||
|
@ -219,7 +213,7 @@ void stop(boolean reportError) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* @Override */// NameNode
|
/* @Override */// NameNode
|
||||||
public boolean setSafeMode(@SuppressWarnings("unused") SafeModeAction action)
|
public boolean setSafeMode(SafeModeAction action)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
throw new UnsupportedActionException("setSafeMode");
|
throw new UnsupportedActionException("setSafeMode");
|
||||||
}
|
}
|
||||||
|
@ -415,14 +409,6 @@ private static NamespaceInfo handshake(NamenodeProtocol namenode)
|
||||||
return nsInfo;
|
return nsInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
String getBlockPoolId() {
|
|
||||||
return blockPoolId;
|
|
||||||
}
|
|
||||||
|
|
||||||
String getClusterId() {
|
|
||||||
return clusterId;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected NameNodeHAContext createHAContext() {
|
protected NameNodeHAContext createHAContext() {
|
||||||
return new BNHAContext();
|
return new BNHAContext();
|
||||||
|
|
|
@ -1938,9 +1938,9 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
|
||||||
assert hasWriteLock();
|
assert hasWriteLock();
|
||||||
// sanity check
|
// sanity check
|
||||||
if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET &&
|
if ((nsQuota < 0 && nsQuota != HdfsConstants.QUOTA_DONT_SET &&
|
||||||
nsQuota < HdfsConstants.QUOTA_RESET) ||
|
nsQuota != HdfsConstants.QUOTA_RESET) ||
|
||||||
(dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET &&
|
(dsQuota < 0 && dsQuota != HdfsConstants.QUOTA_DONT_SET &&
|
||||||
dsQuota < HdfsConstants.QUOTA_RESET)) {
|
dsQuota != HdfsConstants.QUOTA_RESET)) {
|
||||||
throw new IllegalArgumentException("Illegal value for nsQuota or " +
|
throw new IllegalArgumentException("Illegal value for nsQuota or " +
|
||||||
"dsQuota : " + nsQuota + " and " +
|
"dsQuota : " + nsQuota + " and " +
|
||||||
dsQuota);
|
dsQuota);
|
||||||
|
|
|
@ -3550,7 +3550,7 @@ public long getTransactionsSinceLastCheckpoint() {
|
||||||
@Metric({"TransactionsSinceLastLogRoll",
|
@Metric({"TransactionsSinceLastLogRoll",
|
||||||
"Number of transactions since last edit log roll"})
|
"Number of transactions since last edit log roll"})
|
||||||
public long getTransactionsSinceLastLogRoll() {
|
public long getTransactionsSinceLastLogRoll() {
|
||||||
if (isInStandbyState()) {
|
if (isInStandbyState() || !getEditLog().isSegmentOpen()) {
|
||||||
return 0;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
return getEditLog().getLastWrittenTxId() -
|
return getEditLog().getLastWrittenTxId() -
|
||||||
|
|
|
@ -227,6 +227,7 @@ public long getProtocolVersion(String protocol,
|
||||||
public static final int DEFAULT_PORT = 8020;
|
public static final int DEFAULT_PORT = 8020;
|
||||||
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
|
public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
|
||||||
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
|
public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
|
||||||
|
public static final Log blockStateChangeLog = LogFactory.getLog("BlockStateChange");
|
||||||
public static final HAState ACTIVE_STATE = new ActiveState();
|
public static final HAState ACTIVE_STATE = new ActiveState();
|
||||||
public static final HAState STANDBY_STATE = new StandbyState();
|
public static final HAState STANDBY_STATE = new StandbyState();
|
||||||
|
|
||||||
|
|
|
@ -132,6 +132,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
|
|
||||||
private static final Log LOG = NameNode.LOG;
|
private static final Log LOG = NameNode.LOG;
|
||||||
private static final Log stateChangeLog = NameNode.stateChangeLog;
|
private static final Log stateChangeLog = NameNode.stateChangeLog;
|
||||||
|
private static final Log blockStateChangeLog = NameNode.blockStateChangeLog;
|
||||||
|
|
||||||
// Dependencies from other parts of NN.
|
// Dependencies from other parts of NN.
|
||||||
protected final FSNamesystem namesystem;
|
protected final FSNamesystem namesystem;
|
||||||
|
@ -889,8 +890,8 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
|
||||||
String poolId, StorageBlockReport[] reports) throws IOException {
|
String poolId, StorageBlockReport[] reports) throws IOException {
|
||||||
verifyRequest(nodeReg);
|
verifyRequest(nodeReg);
|
||||||
BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
|
BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
|
||||||
if(stateChangeLog.isDebugEnabled()) {
|
if(blockStateChangeLog.isDebugEnabled()) {
|
||||||
stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
|
blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: "
|
||||||
+ "from " + nodeReg + " " + blist.getNumberOfBlocks()
|
+ "from " + nodeReg + " " + blist.getNumberOfBlocks()
|
||||||
+ " blocks");
|
+ " blocks");
|
||||||
}
|
}
|
||||||
|
@ -905,8 +906,8 @@ public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
|
||||||
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
|
public void blockReceivedAndDeleted(DatanodeRegistration nodeReg, String poolId,
|
||||||
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
|
StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
|
||||||
verifyRequest(nodeReg);
|
verifyRequest(nodeReg);
|
||||||
if(stateChangeLog.isDebugEnabled()) {
|
if(blockStateChangeLog.isDebugEnabled()) {
|
||||||
stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
|
blockStateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
|
||||||
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
|
+"from "+nodeReg+" "+receivedAndDeletedBlocks.length
|
||||||
+" blocks.");
|
+" blocks.");
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,6 +46,7 @@
|
||||||
public class TestDatanodeDeath {
|
public class TestDatanodeDeath {
|
||||||
{
|
{
|
||||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.*;
|
||||||
import static org.junit.Assert.fail;
|
|
||||||
import static org.mockito.Mockito.doReturn;
|
import static org.mockito.Mockito.doReturn;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.security.Permission;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
@ -31,6 +31,7 @@
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
|
||||||
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
|
||||||
|
@ -46,6 +47,64 @@ public class TestDatanodeRegistration {
|
||||||
|
|
||||||
public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class);
|
public static final Log LOG = LogFactory.getLog(TestDatanodeRegistration.class);
|
||||||
|
|
||||||
|
private static class MonitorDNS extends SecurityManager {
|
||||||
|
int lookups = 0;
|
||||||
|
@Override
|
||||||
|
public void checkPermission(Permission perm) {}
|
||||||
|
@Override
|
||||||
|
public void checkConnect(String host, int port) {
|
||||||
|
if (port == -1) {
|
||||||
|
lookups++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure the datanode manager does not do host lookup after registration,
|
||||||
|
* especially for node reports.
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDNSLookups() throws Exception {
|
||||||
|
MonitorDNS sm = new MonitorDNS();
|
||||||
|
System.setSecurityManager(sm);
|
||||||
|
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
try {
|
||||||
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(8).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
int initialLookups = sm.lookups;
|
||||||
|
assertTrue("dns security manager is active", initialLookups != 0);
|
||||||
|
|
||||||
|
DatanodeManager dm =
|
||||||
|
cluster.getNamesystem().getBlockManager().getDatanodeManager();
|
||||||
|
|
||||||
|
// make sure no lookups occur
|
||||||
|
dm.refreshNodes(conf);
|
||||||
|
assertEquals(initialLookups, sm.lookups);
|
||||||
|
|
||||||
|
dm.refreshNodes(conf);
|
||||||
|
assertEquals(initialLookups, sm.lookups);
|
||||||
|
|
||||||
|
// ensure none of the reports trigger lookups
|
||||||
|
dm.getDatanodeListForReport(DatanodeReportType.ALL);
|
||||||
|
assertEquals(initialLookups, sm.lookups);
|
||||||
|
|
||||||
|
dm.getDatanodeListForReport(DatanodeReportType.LIVE);
|
||||||
|
assertEquals(initialLookups, sm.lookups);
|
||||||
|
|
||||||
|
dm.getDatanodeListForReport(DatanodeReportType.DEAD);
|
||||||
|
assertEquals(initialLookups, sm.lookups);
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
System.setSecurityManager(null);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Regression test for HDFS-894 ensures that, when datanodes
|
* Regression test for HDFS-894 ensures that, when datanodes
|
||||||
* are restarted, the new IPC port is registered with the
|
* are restarted, the new IPC port is registered with the
|
||||||
|
|
|
@ -51,6 +51,7 @@ public class TestFileAppend2 {
|
||||||
|
|
||||||
{
|
{
|
||||||
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
|
((Log4JLogger)NameNode.blockStateChangeLog).getLogger().setLevel(Level.ALL);
|
||||||
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
|
||||||
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
|
||||||
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
|
||||||
|
|
|
@ -19,13 +19,20 @@
|
||||||
|
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
import static org.mockito.Mockito.doAnswer;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.StringReader;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
||||||
import javax.servlet.ServletContext;
|
import javax.servlet.ServletContext;
|
||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
|
import javax.servlet.jsp.JspWriter;
|
||||||
|
import javax.xml.parsers.DocumentBuilder;
|
||||||
|
import javax.xml.parsers.DocumentBuilderFactory;
|
||||||
|
import javax.xml.parsers.ParserConfigurationException;
|
||||||
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
@ -46,10 +53,17 @@
|
||||||
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
|
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mockito.ArgumentCaptor;
|
||||||
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
|
import org.mockito.stubbing.Answer;
|
||||||
|
import org.xml.sax.InputSource;
|
||||||
|
import org.xml.sax.SAXException;
|
||||||
|
|
||||||
|
|
||||||
public class TestJspHelper {
|
public class TestJspHelper {
|
||||||
|
|
||||||
private Configuration conf = new HdfsConfiguration();
|
private Configuration conf = new HdfsConfiguration();
|
||||||
|
private String jspWriterOutput = "";
|
||||||
|
|
||||||
public static class DummySecretManager extends
|
public static class DummySecretManager extends
|
||||||
AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
|
AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
|
||||||
|
@ -368,7 +382,33 @@ public void testGetProxyUgi() throws IOException {
|
||||||
ae.getMessage());
|
ae.getMessage());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPrintGotoFormWritesValidXML() throws IOException,
|
||||||
|
ParserConfigurationException, SAXException {
|
||||||
|
JspWriter mockJspWriter = mock(JspWriter.class);
|
||||||
|
ArgumentCaptor<String> arg = ArgumentCaptor.forClass(String.class);
|
||||||
|
doAnswer(new Answer<Object>() {
|
||||||
|
@Override
|
||||||
|
public Object answer(InvocationOnMock invok) {
|
||||||
|
Object[] args = invok.getArguments();
|
||||||
|
jspWriterOutput += (String) args[0];
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}).when(mockJspWriter).print(arg.capture());
|
||||||
|
|
||||||
|
jspWriterOutput = "";
|
||||||
|
|
||||||
|
JspHelper.printGotoForm(mockJspWriter, 424242, "a token string",
|
||||||
|
"foobar/file", "0.0.0.0");
|
||||||
|
|
||||||
|
DocumentBuilder parser =
|
||||||
|
DocumentBuilderFactory.newInstance().newDocumentBuilder();
|
||||||
|
InputSource is = new InputSource();
|
||||||
|
is.setCharacterStream(new StringReader(jspWriterOutput));
|
||||||
|
parser.parse(is);
|
||||||
|
}
|
||||||
|
|
||||||
private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
|
private HttpServletRequest getMockRequest(String remoteUser, String user, String doAs) {
|
||||||
HttpServletRequest request = mock(HttpServletRequest.class);
|
HttpServletRequest request = mock(HttpServletRequest.class);
|
||||||
when(request.getParameter(UserParam.NAME)).thenReturn(user);
|
when(request.getParameter(UserParam.NAME)).thenReturn(user);
|
||||||
|
|
|
@ -194,6 +194,9 @@ Release 2.0.3-alpha - Unreleased
|
||||||
|
|
||||||
MAPREDUCE-1806. CombineFileInputFormat does not work with paths not on default FS. (Gera Shegalov via tucu)
|
MAPREDUCE-1806. CombineFileInputFormat does not work with paths not on default FS. (Gera Shegalov via tucu)
|
||||||
|
|
||||||
|
MAPREDUCE-4777. In TestIFile, testIFileReaderWithCodec relies on
|
||||||
|
testIFileWriterWithCodec. (Sandy Ryza via tomwhite)
|
||||||
|
|
||||||
Release 2.0.2-alpha - 2012-09-07
|
Release 2.0.2-alpha - 2012-09-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -584,6 +587,10 @@ Release 0.23.5 - UNRELEASED
|
||||||
MAPREDUCE-4752. Reduce MR AM memory usage through String Interning (Robert
|
MAPREDUCE-4752. Reduce MR AM memory usage through String Interning (Robert
|
||||||
Evans via tgraves)
|
Evans via tgraves)
|
||||||
|
|
||||||
|
MAPREDUCE-4266. remove Ant remnants from MR (tgraves via bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-4666. JVM metrics for history server (jlowe via jeagles)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -634,6 +641,15 @@ Release 0.23.5 - UNRELEASED
|
||||||
|
|
||||||
MAPREDUCE-4771. KeyFieldBasedPartitioner not partitioning properly when
|
MAPREDUCE-4771. KeyFieldBasedPartitioner not partitioning properly when
|
||||||
configured (jlowe via bobby)
|
configured (jlowe via bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-4772. Fetch failures can take way too long for a map to be
|
||||||
|
restarted (bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-4782. NLineInputFormat skips first line of last InputSplit
|
||||||
|
(Mark Fuhs via bobby)
|
||||||
|
|
||||||
|
MAPREDUCE-4774. JobImpl does not handle asynchronous task events in FAILED
|
||||||
|
state (jlowe via bobby)
|
||||||
|
|
||||||
Release 0.23.4 - UNRELEASED
|
Release 0.23.4 - UNRELEASED
|
||||||
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
<?xml version="1.0"?>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Contains utilities that are common for the main and contrib builds.
|
|
||||||
-->
|
|
||||||
<project name="build-utils">
|
|
||||||
|
|
||||||
<!-- Load properties from build properties file, if available -->
|
|
||||||
<dirname property="build-utils.basedir" file="${ant.file.build-utils}"/>
|
|
||||||
<property file="${build-utils.basedir}/build.properties"/>
|
|
||||||
|
|
||||||
<target name="forrest.check" unless="forrest.home">
|
|
||||||
<fail message="'forrest.home' is not defined. Please pass -Dforrest.home=<base of Apache Forrest installation> to Ant on the command-line, or set forest.home in build properties file." />
|
|
||||||
</target>
|
|
||||||
|
|
||||||
</project>
|
|
File diff suppressed because it is too large
Load Diff
|
@ -68,6 +68,7 @@
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||||
|
@ -347,6 +348,9 @@ JobEventType.JOB_KILL, new KillTasksTransition())
|
||||||
.addTransition(JobStateInternal.FAILED, JobStateInternal.FAILED,
|
.addTransition(JobStateInternal.FAILED, JobStateInternal.FAILED,
|
||||||
EnumSet.of(JobEventType.JOB_KILL,
|
EnumSet.of(JobEventType.JOB_KILL,
|
||||||
JobEventType.JOB_UPDATED_NODES,
|
JobEventType.JOB_UPDATED_NODES,
|
||||||
|
JobEventType.JOB_TASK_COMPLETED,
|
||||||
|
JobEventType.JOB_TASK_ATTEMPT_COMPLETED,
|
||||||
|
JobEventType.JOB_MAP_TASK_RESCHEDULED,
|
||||||
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
|
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE))
|
||||||
|
|
||||||
// Transitions from KILLED state
|
// Transitions from KILLED state
|
||||||
|
@ -1409,16 +1413,22 @@ public void transition(JobImpl job, JobEvent event) {
|
||||||
fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
|
fetchFailures = (fetchFailures == null) ? 1 : (fetchFailures+1);
|
||||||
job.fetchFailuresMapping.put(mapId, fetchFailures);
|
job.fetchFailuresMapping.put(mapId, fetchFailures);
|
||||||
|
|
||||||
//get number of running reduces
|
//get number of shuffling reduces
|
||||||
int runningReduceTasks = 0;
|
int shufflingReduceTasks = 0;
|
||||||
for (TaskId taskId : job.reduceTasks) {
|
for (TaskId taskId : job.reduceTasks) {
|
||||||
if (TaskState.RUNNING.equals(job.tasks.get(taskId).getState())) {
|
Task task = job.tasks.get(taskId);
|
||||||
runningReduceTasks++;
|
if (TaskState.RUNNING.equals(task.getState())) {
|
||||||
|
for(TaskAttempt attempt : task.getAttempts().values()) {
|
||||||
|
if(attempt.getReport().getPhase() == Phase.SHUFFLE) {
|
||||||
|
shufflingReduceTasks++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float failureRate = runningReduceTasks == 0 ? 1.0f :
|
float failureRate = shufflingReduceTasks == 0 ? 1.0f :
|
||||||
(float) fetchFailures / runningReduceTasks;
|
(float) fetchFailures / shufflingReduceTasks;
|
||||||
// declare faulty if fetch-failures >= max-allowed-failures
|
// declare faulty if fetch-failures >= max-allowed-failures
|
||||||
boolean isMapFaulty =
|
boolean isMapFaulty =
|
||||||
(failureRate >= MAX_ALLOWED_FETCH_FAILURES_FRACTION);
|
(failureRate >= MAX_ALLOWED_FETCH_FAILURES_FRACTION);
|
||||||
|
|
|
@ -18,14 +18,19 @@
|
||||||
|
|
||||||
package org.apache.hadoop.mapreduce.v2.app;
|
package org.apache.hadoop.mapreduce.v2.app;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.mapreduce.Counters;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
|
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
|
||||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
|
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEventStatus;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||||
|
@ -37,6 +42,7 @@
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.JobTaskAttemptFetchFailureEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
|
||||||
import org.apache.hadoop.yarn.event.EventHandler;
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -254,6 +260,169 @@ public void testFetchFailureWithRecovery() throws Exception {
|
||||||
events = job.getTaskAttemptCompletionEvents(0, 100);
|
events = job.getTaskAttemptCompletionEvents(0, 100);
|
||||||
Assert.assertEquals("Num completion events not correct", 2, events.length);
|
Assert.assertEquals("Num completion events not correct", 2, events.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testFetchFailureMultipleReduces() throws Exception {
|
||||||
|
MRApp app = new MRApp(1, 3, false, this.getClass().getName(), true);
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
// map -> reduce -> fetch-failure -> map retry is incompatible with
|
||||||
|
// sequential, single-task-attempt approach in uber-AM, so disable:
|
||||||
|
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
|
||||||
|
Job job = app.submit(conf);
|
||||||
|
app.waitForState(job, JobState.RUNNING);
|
||||||
|
//all maps would be running
|
||||||
|
Assert.assertEquals("Num tasks not correct",
|
||||||
|
4, job.getTasks().size());
|
||||||
|
Iterator<Task> it = job.getTasks().values().iterator();
|
||||||
|
Task mapTask = it.next();
|
||||||
|
Task reduceTask = it.next();
|
||||||
|
Task reduceTask2 = it.next();
|
||||||
|
Task reduceTask3 = it.next();
|
||||||
|
|
||||||
|
//wait for Task state move to RUNNING
|
||||||
|
app.waitForState(mapTask, TaskState.RUNNING);
|
||||||
|
TaskAttempt mapAttempt1 = mapTask.getAttempts().values().iterator().next();
|
||||||
|
app.waitForState(mapAttempt1, TaskAttemptState.RUNNING);
|
||||||
|
|
||||||
|
//send the done signal to the map attempt
|
||||||
|
app.getContext().getEventHandler().handle(
|
||||||
|
new TaskAttemptEvent(mapAttempt1.getID(),
|
||||||
|
TaskAttemptEventType.TA_DONE));
|
||||||
|
|
||||||
|
// wait for map success
|
||||||
|
app.waitForState(mapTask, TaskState.SUCCEEDED);
|
||||||
|
|
||||||
|
TaskAttemptCompletionEvent[] events =
|
||||||
|
job.getTaskAttemptCompletionEvents(0, 100);
|
||||||
|
Assert.assertEquals("Num completion events not correct",
|
||||||
|
1, events.length);
|
||||||
|
Assert.assertEquals("Event status not correct",
|
||||||
|
TaskAttemptCompletionEventStatus.SUCCEEDED, events[0].getStatus());
|
||||||
|
|
||||||
|
// wait for reduce to start running
|
||||||
|
app.waitForState(reduceTask, TaskState.RUNNING);
|
||||||
|
app.waitForState(reduceTask2, TaskState.RUNNING);
|
||||||
|
app.waitForState(reduceTask3, TaskState.RUNNING);
|
||||||
|
TaskAttempt reduceAttempt =
|
||||||
|
reduceTask.getAttempts().values().iterator().next();
|
||||||
|
app.waitForState(reduceAttempt, TaskAttemptState.RUNNING);
|
||||||
|
|
||||||
|
updateStatus(app, reduceAttempt, Phase.SHUFFLE);
|
||||||
|
|
||||||
|
TaskAttempt reduceAttempt2 =
|
||||||
|
reduceTask2.getAttempts().values().iterator().next();
|
||||||
|
app.waitForState(reduceAttempt2, TaskAttemptState.RUNNING);
|
||||||
|
updateStatus(app, reduceAttempt2, Phase.SHUFFLE);
|
||||||
|
|
||||||
|
TaskAttempt reduceAttempt3 =
|
||||||
|
reduceTask3.getAttempts().values().iterator().next();
|
||||||
|
app.waitForState(reduceAttempt3, TaskAttemptState.RUNNING);
|
||||||
|
updateStatus(app, reduceAttempt3, Phase.SHUFFLE);
|
||||||
|
|
||||||
|
//send 3 fetch failures from reduce to trigger map re execution
|
||||||
|
sendFetchFailure(app, reduceAttempt, mapAttempt1);
|
||||||
|
sendFetchFailure(app, reduceAttempt, mapAttempt1);
|
||||||
|
sendFetchFailure(app, reduceAttempt, mapAttempt1);
|
||||||
|
|
||||||
|
//We should not re-launch the map task yet
|
||||||
|
assertEquals(TaskState.SUCCEEDED, mapTask.getState());
|
||||||
|
updateStatus(app, reduceAttempt2, Phase.REDUCE);
|
||||||
|
updateStatus(app, reduceAttempt3, Phase.REDUCE);
|
||||||
|
|
||||||
|
sendFetchFailure(app, reduceAttempt, mapAttempt1);
|
||||||
|
|
||||||
|
//wait for map Task state move back to RUNNING
|
||||||
|
app.waitForState(mapTask, TaskState.RUNNING);
|
||||||
|
|
||||||
|
//map attempt must have become FAILED
|
||||||
|
Assert.assertEquals("Map TaskAttempt state not correct",
|
||||||
|
TaskAttemptState.FAILED, mapAttempt1.getState());
|
||||||
|
|
||||||
|
Assert.assertEquals("Num attempts in Map Task not correct",
|
||||||
|
2, mapTask.getAttempts().size());
|
||||||
|
|
||||||
|
Iterator<TaskAttempt> atIt = mapTask.getAttempts().values().iterator();
|
||||||
|
atIt.next();
|
||||||
|
TaskAttempt mapAttempt2 = atIt.next();
|
||||||
|
|
||||||
|
app.waitForState(mapAttempt2, TaskAttemptState.RUNNING);
|
||||||
|
//send the done signal to the second map attempt
|
||||||
|
app.getContext().getEventHandler().handle(
|
||||||
|
new TaskAttemptEvent(mapAttempt2.getID(),
|
||||||
|
TaskAttemptEventType.TA_DONE));
|
||||||
|
|
||||||
|
// wait for map success
|
||||||
|
app.waitForState(mapTask, TaskState.SUCCEEDED);
|
||||||
|
|
||||||
|
//send done to reduce
|
||||||
|
app.getContext().getEventHandler().handle(
|
||||||
|
new TaskAttemptEvent(reduceAttempt.getID(),
|
||||||
|
TaskAttemptEventType.TA_DONE));
|
||||||
|
|
||||||
|
//send done to reduce
|
||||||
|
app.getContext().getEventHandler().handle(
|
||||||
|
new TaskAttemptEvent(reduceAttempt2.getID(),
|
||||||
|
TaskAttemptEventType.TA_DONE));
|
||||||
|
|
||||||
|
//send done to reduce
|
||||||
|
app.getContext().getEventHandler().handle(
|
||||||
|
new TaskAttemptEvent(reduceAttempt3.getID(),
|
||||||
|
TaskAttemptEventType.TA_DONE));
|
||||||
|
|
||||||
|
app.waitForState(job, JobState.SUCCEEDED);
|
||||||
|
|
||||||
|
//previous completion event now becomes obsolete
|
||||||
|
Assert.assertEquals("Event status not correct",
|
||||||
|
TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
|
||||||
|
|
||||||
|
events = job.getTaskAttemptCompletionEvents(0, 100);
|
||||||
|
Assert.assertEquals("Num completion events not correct",
|
||||||
|
6, events.length);
|
||||||
|
Assert.assertEquals("Event map attempt id not correct",
|
||||||
|
mapAttempt1.getID(), events[0].getAttemptId());
|
||||||
|
Assert.assertEquals("Event map attempt id not correct",
|
||||||
|
mapAttempt1.getID(), events[1].getAttemptId());
|
||||||
|
Assert.assertEquals("Event map attempt id not correct",
|
||||||
|
mapAttempt2.getID(), events[2].getAttemptId());
|
||||||
|
Assert.assertEquals("Event reduce attempt id not correct",
|
||||||
|
reduceAttempt.getID(), events[3].getAttemptId());
|
||||||
|
Assert.assertEquals("Event status not correct for map attempt1",
|
||||||
|
TaskAttemptCompletionEventStatus.OBSOLETE, events[0].getStatus());
|
||||||
|
Assert.assertEquals("Event status not correct for map attempt1",
|
||||||
|
TaskAttemptCompletionEventStatus.FAILED, events[1].getStatus());
|
||||||
|
Assert.assertEquals("Event status not correct for map attempt2",
|
||||||
|
TaskAttemptCompletionEventStatus.SUCCEEDED, events[2].getStatus());
|
||||||
|
Assert.assertEquals("Event status not correct for reduce attempt1",
|
||||||
|
TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
|
||||||
|
|
||||||
|
TaskAttemptCompletionEvent mapEvents[] =
|
||||||
|
job.getMapAttemptCompletionEvents(0, 2);
|
||||||
|
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
|
||||||
|
Assert.assertArrayEquals("Unexpected map events",
|
||||||
|
Arrays.copyOfRange(events, 0, 2), mapEvents);
|
||||||
|
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
|
||||||
|
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
|
||||||
|
Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private void updateStatus(MRApp app, TaskAttempt attempt, Phase phase) {
|
||||||
|
TaskAttemptStatusUpdateEvent.TaskAttemptStatus status = new TaskAttemptStatusUpdateEvent.TaskAttemptStatus();
|
||||||
|
status.counters = new Counters();
|
||||||
|
status.fetchFailedMaps = new ArrayList<TaskAttemptId>();
|
||||||
|
status.id = attempt.getID();
|
||||||
|
status.mapFinishTime = 0;
|
||||||
|
status.outputSize = 0;
|
||||||
|
status.phase = phase;
|
||||||
|
status.progress = 0.5f;
|
||||||
|
status.shuffleFinishTime = 0;
|
||||||
|
status.sortFinishTime = 0;
|
||||||
|
status.stateString = "OK";
|
||||||
|
status.taskState = attempt.getState();
|
||||||
|
TaskAttemptStatusUpdateEvent event = new TaskAttemptStatusUpdateEvent(attempt.getID(),
|
||||||
|
status);
|
||||||
|
app.getContext().getEventHandler().handle(event);
|
||||||
|
}
|
||||||
|
|
||||||
private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt,
|
private void sendFetchFailure(MRApp app, TaskAttempt reduceAttempt,
|
||||||
TaskAttempt mapAttempt) {
|
TaskAttempt mapAttempt) {
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
@ -42,6 +43,7 @@
|
||||||
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
||||||
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
|
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
|
import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||||
|
@ -51,10 +53,14 @@
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
|
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.InitTransition;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
|
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl.JobNoTasksCompletedTransition;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
||||||
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.yarn.SystemClock;
|
import org.apache.hadoop.yarn.SystemClock;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||||
import org.apache.hadoop.yarn.event.EventHandler;
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
|
||||||
|
import org.apache.hadoop.yarn.state.StateMachine;
|
||||||
|
import org.apache.hadoop.yarn.state.StateMachineFactory;
|
||||||
import org.apache.hadoop.yarn.util.Records;
|
import org.apache.hadoop.yarn.util.Records;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -340,7 +346,7 @@ null, mock(JobTokenSecretManager.class), null, null, null,
|
||||||
return isUber;
|
return isUber;
|
||||||
}
|
}
|
||||||
|
|
||||||
private InitTransition getInitTransition() {
|
private static InitTransition getInitTransition() {
|
||||||
InitTransition initTransition = new InitTransition() {
|
InitTransition initTransition = new InitTransition() {
|
||||||
@Override
|
@Override
|
||||||
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
|
protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
|
||||||
|
@ -350,4 +356,63 @@ protected TaskSplitMetaInfo[] createSplits(JobImpl job, JobId jobId) {
|
||||||
};
|
};
|
||||||
return initTransition;
|
return initTransition;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTransitionsAtFailed() throws IOException {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
JobID jobID = JobID.forName("job_1234567890000_0001");
|
||||||
|
JobId jobId = TypeConverter.toYarn(jobID);
|
||||||
|
OutputCommitter committer = mock(OutputCommitter.class);
|
||||||
|
doThrow(new IOException("forcefail"))
|
||||||
|
.when(committer).setupJob(any(JobContext.class));
|
||||||
|
InlineDispatcher dispatcher = new InlineDispatcher();
|
||||||
|
JobImpl job = new StubbedJob(jobId, Records
|
||||||
|
.newRecord(ApplicationAttemptId.class), conf,
|
||||||
|
dispatcher.getEventHandler(), committer, true, null);
|
||||||
|
|
||||||
|
dispatcher.register(JobEventType.class, job);
|
||||||
|
job.handle(new JobEvent(jobId, JobEventType.JOB_INIT));
|
||||||
|
Assert.assertEquals(JobState.FAILED, job.getState());
|
||||||
|
|
||||||
|
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_COMPLETED));
|
||||||
|
Assert.assertEquals(JobState.FAILED, job.getState());
|
||||||
|
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_COMPLETED));
|
||||||
|
Assert.assertEquals(JobState.FAILED, job.getState());
|
||||||
|
job.handle(new JobEvent(jobId, JobEventType.JOB_MAP_TASK_RESCHEDULED));
|
||||||
|
Assert.assertEquals(JobState.FAILED, job.getState());
|
||||||
|
job.handle(new JobEvent(jobId, JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE));
|
||||||
|
Assert.assertEquals(JobState.FAILED, job.getState());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class StubbedJob extends JobImpl {
|
||||||
|
//override the init transition
|
||||||
|
private final InitTransition initTransition = getInitTransition();
|
||||||
|
StateMachineFactory<JobImpl, JobStateInternal, JobEventType, JobEvent> localFactory
|
||||||
|
= stateMachineFactory.addTransition(JobStateInternal.NEW,
|
||||||
|
EnumSet.of(JobStateInternal.INITED, JobStateInternal.FAILED),
|
||||||
|
JobEventType.JOB_INIT,
|
||||||
|
// This is abusive.
|
||||||
|
initTransition);
|
||||||
|
|
||||||
|
private final StateMachine<JobStateInternal, JobEventType, JobEvent>
|
||||||
|
localStateMachine;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected StateMachine<JobStateInternal, JobEventType, JobEvent> getStateMachine() {
|
||||||
|
return localStateMachine;
|
||||||
|
}
|
||||||
|
|
||||||
|
public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
|
||||||
|
Configuration conf, EventHandler eventHandler,
|
||||||
|
OutputCommitter committer, boolean newApiCommitter, String user) {
|
||||||
|
super(jobId, applicationAttemptId, conf, eventHandler,
|
||||||
|
null, new JobTokenSecretManager(), new Credentials(),
|
||||||
|
new SystemClock(), null, MRAppMetrics.create(), committer,
|
||||||
|
newApiCommitter, user, System.currentTimeMillis(), null, null);
|
||||||
|
|
||||||
|
// This "this leak" is okay because the retained pointer is in an
|
||||||
|
// instance variable.
|
||||||
|
localStateMachine = localFactory.make(this);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -262,6 +262,9 @@ public interface MRJobConfig {
|
||||||
public static final String SHUFFLE_FETCH_FAILURES = "mapreduce.reduce.shuffle.maxfetchfailures";
|
public static final String SHUFFLE_FETCH_FAILURES = "mapreduce.reduce.shuffle.maxfetchfailures";
|
||||||
|
|
||||||
public static final String SHUFFLE_NOTIFY_READERROR = "mapreduce.reduce.shuffle.notify.readerror";
|
public static final String SHUFFLE_NOTIFY_READERROR = "mapreduce.reduce.shuffle.notify.readerror";
|
||||||
|
|
||||||
|
public static final String MAX_SHUFFLE_FETCH_RETRY_DELAY = "mapreduce.reduce.shuffle.retry-delay.max.ms";
|
||||||
|
public static final long DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY = 60000;
|
||||||
|
|
||||||
public static final String REDUCE_SKIP_INCR_PROC_COUNT = "mapreduce.reduce.skip.proc-count.auto-incr";
|
public static final String REDUCE_SKIP_INCR_PROC_COUNT = "mapreduce.reduce.skip.proc-count.auto-incr";
|
||||||
|
|
||||||
|
|
|
@ -107,25 +107,14 @@ public static List<FileSplit> getSplitsForFile(FileStatus status,
|
||||||
numLines++;
|
numLines++;
|
||||||
length += num;
|
length += num;
|
||||||
if (numLines == numLinesPerSplit) {
|
if (numLines == numLinesPerSplit) {
|
||||||
// NLineInputFormat uses LineRecordReader, which always reads
|
splits.add(createFileSplit(fileName, begin, length));
|
||||||
// (and consumes) at least one character out of its upper split
|
|
||||||
// boundary. So to make sure that each mapper gets N lines, we
|
|
||||||
// move back the upper split limits of each split
|
|
||||||
// by one character here.
|
|
||||||
if (begin == 0) {
|
|
||||||
splits.add(new FileSplit(fileName, begin, length - 1,
|
|
||||||
new String[] {}));
|
|
||||||
} else {
|
|
||||||
splits.add(new FileSplit(fileName, begin - 1, length,
|
|
||||||
new String[] {}));
|
|
||||||
}
|
|
||||||
begin += length;
|
begin += length;
|
||||||
length = 0;
|
length = 0;
|
||||||
numLines = 0;
|
numLines = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (numLines != 0) {
|
if (numLines != 0) {
|
||||||
splits.add(new FileSplit(fileName, begin, length, new String[]{}));
|
splits.add(createFileSplit(fileName, begin, length));
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (lr != null) {
|
if (lr != null) {
|
||||||
|
@ -134,6 +123,23 @@ public static List<FileSplit> getSplitsForFile(FileStatus status,
|
||||||
}
|
}
|
||||||
return splits;
|
return splits;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* NLineInputFormat uses LineRecordReader, which always reads
|
||||||
|
* (and consumes) at least one character out of its upper split
|
||||||
|
* boundary. So to make sure that each mapper gets N lines, we
|
||||||
|
* move back the upper split limits of each split
|
||||||
|
* by one character here.
|
||||||
|
* @param fileName Path of file
|
||||||
|
* @param begin the position of the first byte in the file to process
|
||||||
|
* @param length number of bytes in InputSplit
|
||||||
|
* @return FileSplit
|
||||||
|
*/
|
||||||
|
protected static FileSplit createFileSplit(Path fileName, long begin, long length) {
|
||||||
|
return (begin == 0)
|
||||||
|
? new FileSplit(fileName, begin, length - 1, new String[] {})
|
||||||
|
: new FileSplit(fileName, begin - 1, length, new String[] {});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the number of lines per split
|
* Set the number of lines per split
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
import java.net.ConnectException;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
import java.net.MalformedURLException;
|
import java.net.MalformedURLException;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
@ -283,6 +284,7 @@ protected void copyFromHost(MapHost host) throws IOException {
|
||||||
SecureShuffleUtils.verifyReply(replyHash, encHash, jobTokenSecret);
|
SecureShuffleUtils.verifyReply(replyHash, encHash, jobTokenSecret);
|
||||||
LOG.info("for url="+msgToEncode+" sent hash and receievd reply");
|
LOG.info("for url="+msgToEncode+" sent hash and receievd reply");
|
||||||
} catch (IOException ie) {
|
} catch (IOException ie) {
|
||||||
|
boolean connectExcpt = ie instanceof ConnectException;
|
||||||
ioErrs.increment(1);
|
ioErrs.increment(1);
|
||||||
LOG.warn("Failed to connect to " + host + " with " + remaining.size() +
|
LOG.warn("Failed to connect to " + host + " with " + remaining.size() +
|
||||||
" map outputs", ie);
|
" map outputs", ie);
|
||||||
|
@ -291,14 +293,14 @@ protected void copyFromHost(MapHost host) throws IOException {
|
||||||
// indirectly penalizing the host
|
// indirectly penalizing the host
|
||||||
if (!connectSucceeded) {
|
if (!connectSucceeded) {
|
||||||
for(TaskAttemptID left: remaining) {
|
for(TaskAttemptID left: remaining) {
|
||||||
scheduler.copyFailed(left, host, connectSucceeded);
|
scheduler.copyFailed(left, host, connectSucceeded, connectExcpt);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If we got a read error at this stage, it implies there was a problem
|
// If we got a read error at this stage, it implies there was a problem
|
||||||
// with the first map, typically lost map. So, penalize only that map
|
// with the first map, typically lost map. So, penalize only that map
|
||||||
// and add the rest
|
// and add the rest
|
||||||
TaskAttemptID firstMap = maps.get(0);
|
TaskAttemptID firstMap = maps.get(0);
|
||||||
scheduler.copyFailed(firstMap, host, connectSucceeded);
|
scheduler.copyFailed(firstMap, host, connectSucceeded, connectExcpt);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add back all the remaining maps, WITHOUT marking them as failed
|
// Add back all the remaining maps, WITHOUT marking them as failed
|
||||||
|
@ -322,7 +324,7 @@ protected void copyFromHost(MapHost host) throws IOException {
|
||||||
if(failedTasks != null && failedTasks.length > 0) {
|
if(failedTasks != null && failedTasks.length > 0) {
|
||||||
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
|
LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks));
|
||||||
for(TaskAttemptID left: failedTasks) {
|
for(TaskAttemptID left: failedTasks) {
|
||||||
scheduler.copyFailed(left, host, true);
|
scheduler.copyFailed(left, host, true, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -89,6 +89,7 @@ protected Long initialValue() {
|
||||||
private DecimalFormat mbpsFormat = new DecimalFormat("0.00");
|
private DecimalFormat mbpsFormat = new DecimalFormat("0.00");
|
||||||
|
|
||||||
private boolean reportReadErrorImmediately = true;
|
private boolean reportReadErrorImmediately = true;
|
||||||
|
private long maxDelay = MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY;
|
||||||
|
|
||||||
public ShuffleScheduler(JobConf job, TaskStatus status,
|
public ShuffleScheduler(JobConf job, TaskStatus status,
|
||||||
ExceptionReporter reporter,
|
ExceptionReporter reporter,
|
||||||
|
@ -115,6 +116,9 @@ public ShuffleScheduler(JobConf job, TaskStatus status,
|
||||||
MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
|
MRJobConfig.SHUFFLE_FETCH_FAILURES, REPORT_FAILURE_LIMIT);
|
||||||
this.reportReadErrorImmediately = job.getBoolean(
|
this.reportReadErrorImmediately = job.getBoolean(
|
||||||
MRJobConfig.SHUFFLE_NOTIFY_READERROR, true);
|
MRJobConfig.SHUFFLE_NOTIFY_READERROR, true);
|
||||||
|
|
||||||
|
this.maxDelay = job.getLong(MRJobConfig.MAX_SHUFFLE_FETCH_RETRY_DELAY,
|
||||||
|
MRJobConfig.DEFAULT_MAX_SHUFFLE_FETCH_RETRY_DELAY);
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void copySucceeded(TaskAttemptID mapId,
|
public synchronized void copySucceeded(TaskAttemptID mapId,
|
||||||
|
@ -159,7 +163,7 @@ private void updateStatus() {
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
|
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
|
||||||
boolean readError) {
|
boolean readError, boolean connectExcpt) {
|
||||||
host.penalize();
|
host.penalize();
|
||||||
int failures = 1;
|
int failures = 1;
|
||||||
if (failureCounts.containsKey(mapId)) {
|
if (failureCounts.containsKey(mapId)) {
|
||||||
|
@ -184,12 +188,15 @@ public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
checkAndInformJobTracker(failures, mapId, readError);
|
checkAndInformJobTracker(failures, mapId, readError, connectExcpt);
|
||||||
|
|
||||||
checkReducerHealth();
|
checkReducerHealth();
|
||||||
|
|
||||||
long delay = (long) (INITIAL_PENALTY *
|
long delay = (long) (INITIAL_PENALTY *
|
||||||
Math.pow(PENALTY_GROWTH_RATE, failures));
|
Math.pow(PENALTY_GROWTH_RATE, failures));
|
||||||
|
if (delay > maxDelay) {
|
||||||
|
delay = maxDelay;
|
||||||
|
}
|
||||||
|
|
||||||
penalties.add(new Penalty(host, delay));
|
penalties.add(new Penalty(host, delay));
|
||||||
|
|
||||||
|
@ -200,8 +207,9 @@ public synchronized void copyFailed(TaskAttemptID mapId, MapHost host,
|
||||||
// after every read error, if 'reportReadErrorImmediately' is true or
|
// after every read error, if 'reportReadErrorImmediately' is true or
|
||||||
// after every 'maxFetchFailuresBeforeReporting' failures
|
// after every 'maxFetchFailuresBeforeReporting' failures
|
||||||
private void checkAndInformJobTracker(
|
private void checkAndInformJobTracker(
|
||||||
int failures, TaskAttemptID mapId, boolean readError) {
|
int failures, TaskAttemptID mapId, boolean readError,
|
||||||
if ((reportReadErrorImmediately && readError)
|
boolean connectExcpt) {
|
||||||
|
if (connectExcpt || (reportReadErrorImmediately && readError)
|
||||||
|| ((failures % maxFetchFailuresBeforeReporting) == 0)) {
|
|| ((failures % maxFetchFailuresBeforeReporting) == 0)) {
|
||||||
LOG.info("Reporting fetch failure for " + mapId + " to jobtracker.");
|
LOG.info("Reporting fetch failure for " + mapId + " to jobtracker.");
|
||||||
status.addFetchFailedMap((org.apache.hadoop.mapred.TaskAttemptID) mapId);
|
status.addFetchFailedMap((org.apache.hadoop.mapred.TaskAttemptID) mapId);
|
||||||
|
|
|
@ -110,6 +110,14 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>mapreduce.reduce.shuffle.retry-delay.max.ms</name>
|
||||||
|
<value>60000</value>
|
||||||
|
<description>The maximum number of ms the reducer will delay before retrying
|
||||||
|
to download map data.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>mapreduce.reduce.shuffle.parallelcopies</name>
|
<name>mapreduce.reduce.shuffle.parallelcopies</name>
|
||||||
<value>5</value>
|
<value>5</value>
|
||||||
|
|
|
@ -118,8 +118,8 @@ public void testCopyFromHostBogusHeader() throws Exception {
|
||||||
encHash);
|
encHash);
|
||||||
|
|
||||||
verify(allErrs).increment(1);
|
verify(allErrs).increment(1);
|
||||||
verify(ss).copyFailed(map1ID, host, true);
|
verify(ss).copyFailed(map1ID, host, true, false);
|
||||||
verify(ss).copyFailed(map2ID, host, true);
|
verify(ss).copyFailed(map2ID, host, true, false);
|
||||||
|
|
||||||
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
|
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
|
||||||
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
|
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
|
||||||
|
@ -178,8 +178,8 @@ public void testCopyFromHostWait() throws Exception {
|
||||||
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
|
.addRequestProperty(SecureShuffleUtils.HTTP_HEADER_URL_HASH,
|
||||||
encHash);
|
encHash);
|
||||||
verify(allErrs, never()).increment(1);
|
verify(allErrs, never()).increment(1);
|
||||||
verify(ss, never()).copyFailed(map1ID, host, true);
|
verify(ss, never()).copyFailed(map1ID, host, true, false);
|
||||||
verify(ss, never()).copyFailed(map2ID, host, true);
|
verify(ss, never()).copyFailed(map2ID, host, true, false);
|
||||||
|
|
||||||
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
|
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID));
|
||||||
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
|
verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID));
|
||||||
|
|
|
@ -27,6 +27,8 @@
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapreduce.MRConfig;
|
import org.apache.hadoop.mapreduce.MRConfig;
|
||||||
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
||||||
|
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||||
|
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -106,6 +108,8 @@ protected void doSecureLogin(Configuration conf) throws IOException {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void start() {
|
public void start() {
|
||||||
|
DefaultMetricsSystem.initialize("JobHistoryServer");
|
||||||
|
JvmMetrics.initSingleton("JobHistoryServer", null);
|
||||||
try {
|
try {
|
||||||
jhsDTSecretManager.startThreads();
|
jhsDTSecretManager.startThreads();
|
||||||
} catch(IOException io) {
|
} catch(IOException io) {
|
||||||
|
@ -118,6 +122,7 @@ public void start() {
|
||||||
@Override
|
@Override
|
||||||
public void stop() {
|
public void stop() {
|
||||||
jhsDTSecretManager.stopThreads();
|
jhsDTSecretManager.stopThreads();
|
||||||
|
DefaultMetricsSystem.shutdown();
|
||||||
super.stop();
|
super.stop();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,6 +56,10 @@ public void testIFileReaderWithCodec() throws Exception {
|
||||||
Path path = new Path(new Path("build/test.ifile"), "data");
|
Path path = new Path(new Path("build/test.ifile"), "data");
|
||||||
DefaultCodec codec = new GzipCodec();
|
DefaultCodec codec = new GzipCodec();
|
||||||
codec.setConf(conf);
|
codec.setConf(conf);
|
||||||
|
IFile.Writer<Text, Text> writer =
|
||||||
|
new IFile.Writer<Text, Text>(conf, rfs, path, Text.class, Text.class,
|
||||||
|
codec, null);
|
||||||
|
writer.close();
|
||||||
IFile.Reader<Text, Text> reader =
|
IFile.Reader<Text, Text> reader =
|
||||||
new IFile.Reader<Text, Text>(conf, rfs, path, codec, null);
|
new IFile.Reader<Text, Text>(conf, rfs, path, codec, null);
|
||||||
reader.close();
|
reader.close();
|
||||||
|
|
|
@ -50,37 +50,40 @@ public void testFormat() throws Exception {
|
||||||
Job job = Job.getInstance(conf);
|
Job job = Job.getInstance(conf);
|
||||||
Path file = new Path(workDir, "test.txt");
|
Path file = new Path(workDir, "test.txt");
|
||||||
|
|
||||||
int seed = new Random().nextInt();
|
|
||||||
Random random = new Random(seed);
|
|
||||||
|
|
||||||
localFs.delete(workDir, true);
|
localFs.delete(workDir, true);
|
||||||
FileInputFormat.setInputPaths(job, workDir);
|
FileInputFormat.setInputPaths(job, workDir);
|
||||||
int numLinesPerMap = 5;
|
int numLinesPerMap = 5;
|
||||||
NLineInputFormat.setNumLinesPerSplit(job, numLinesPerMap);
|
NLineInputFormat.setNumLinesPerSplit(job, numLinesPerMap);
|
||||||
// for a variety of lengths
|
|
||||||
for (int length = 0; length < MAX_LENGTH;
|
for (int length = 0; length < MAX_LENGTH;
|
||||||
length += random.nextInt(MAX_LENGTH / 10) + 1) {
|
length += 1) {
|
||||||
|
|
||||||
// create a file with length entries
|
// create a file with length entries
|
||||||
Writer writer = new OutputStreamWriter(localFs.create(file));
|
Writer writer = new OutputStreamWriter(localFs.create(file));
|
||||||
try {
|
try {
|
||||||
for (int i = 0; i < length; i++) {
|
for (int i = 0; i < length; i++) {
|
||||||
writer.write(Integer.toString(i));
|
writer.write(Integer.toString(i)+" some more text");
|
||||||
writer.write("\n");
|
writer.write("\n");
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
writer.close();
|
writer.close();
|
||||||
}
|
}
|
||||||
checkFormat(job, numLinesPerMap);
|
int lastN = 0;
|
||||||
|
if (length != 0) {
|
||||||
|
lastN = length % 5;
|
||||||
|
if (lastN == 0) {
|
||||||
|
lastN = 5;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkFormat(job, numLinesPerMap, lastN);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkFormat(Job job, int expectedN)
|
void checkFormat(Job job, int expectedN, int lastN)
|
||||||
throws IOException, InterruptedException {
|
throws IOException, InterruptedException {
|
||||||
NLineInputFormat format = new NLineInputFormat();
|
NLineInputFormat format = new NLineInputFormat();
|
||||||
List<InputSplit> splits = format.getSplits(job);
|
List<InputSplit> splits = format.getSplits(job);
|
||||||
// check all splits except last one
|
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (int i = 0; i < splits.size() -1; i++) {
|
for (int i = 0; i < splits.size(); i++) {
|
||||||
assertEquals("There are no split locations", 0,
|
assertEquals("There are no split locations", 0,
|
||||||
splits.get(i).getLocations().length);
|
splits.get(i).getLocations().length);
|
||||||
TaskAttemptContext context = MapReduceTestUtil.
|
TaskAttemptContext context = MapReduceTestUtil.
|
||||||
|
@ -104,8 +107,13 @@ void checkFormat(Job job, int expectedN)
|
||||||
} finally {
|
} finally {
|
||||||
reader.close();
|
reader.close();
|
||||||
}
|
}
|
||||||
assertEquals("number of lines in split is " + expectedN ,
|
if ( i == splits.size() - 1) {
|
||||||
expectedN, count);
|
assertEquals("number of lines in split(" + i + ") is wrong" ,
|
||||||
|
lastN, count);
|
||||||
|
} else {
|
||||||
|
assertEquals("number of lines in split(" + i + ") is wrong" ,
|
||||||
|
expectedN, count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,167 +0,0 @@
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
|
|
||||||
<info organisation="org.apache.hadoop" module="${ant.project.name}" revision="${version}">
|
|
||||||
<license name="Apache 2.0"/>
|
|
||||||
<ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
|
|
||||||
<description>
|
|
||||||
Hadoop Core
|
|
||||||
</description>
|
|
||||||
</info>
|
|
||||||
<configurations defaultconfmapping="default">
|
|
||||||
<!--these match the Maven configurations-->
|
|
||||||
<conf name="default" extends="master,runtime"/>
|
|
||||||
<conf name="master" description="contains the artifact but no dependencies"/>
|
|
||||||
<conf name="compile" description="contains the artifact but no dependencies"/>
|
|
||||||
<conf name="runtime" description="runtime but not the artifact"/>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
These public configurations contain the core dependencies for running hadoop client or server.
|
|
||||||
The server is effectively a superset of the client.
|
|
||||||
-->
|
|
||||||
<!--Private configurations. -->
|
|
||||||
|
|
||||||
<conf name="common" visibility="private" extends="compile" description="common artifacts"/>
|
|
||||||
<conf name="mapred" visibility="private" extends="compile,runtime" description="Mapred dependent artifacts"/>
|
|
||||||
<conf name="javadoc" visibility="private" description="artiracts required while performing doc generation" extends="common"/>
|
|
||||||
<conf name="test" extends="master" visibility="private" description="the classpath needed to run tests"/>
|
|
||||||
<conf name="package" extends="master" description="the classpath needed for packaging"/>
|
|
||||||
<conf name="system" extends="test" visibility="private" description="the classpath needed to run system tests"/>
|
|
||||||
|
|
||||||
<conf name="test-hdfswithmr" extends="test" visibility="private" description="the classpath needed to run tests"/>
|
|
||||||
|
|
||||||
<conf name="releaseaudit" visibility="private" description="Artifacts required for releaseaudit target"/>
|
|
||||||
|
|
||||||
<conf name="jdiff" visibility="private" extends="common"/>
|
|
||||||
<conf name="checkstyle" visibility="private"/>
|
|
||||||
|
|
||||||
</configurations>
|
|
||||||
|
|
||||||
<publications>
|
|
||||||
<!--get the artifact from our module name-->
|
|
||||||
<artifact conf="master"/>
|
|
||||||
</publications>
|
|
||||||
<dependencies>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-annotations" rev="${hadoop-common.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-common"
|
|
||||||
rev="${hadoop-common.version}" conf="compile->default">
|
|
||||||
<artifact name="hadoop-common" ext="jar" />
|
|
||||||
<artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests" />
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs"
|
|
||||||
rev="${hadoop-hdfs.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-common-instrumented"
|
|
||||||
rev="${hadoop-common.version}" conf="system->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs-instrumented"
|
|
||||||
rev="${hadoop-hdfs.version}" conf="system->default"/>
|
|
||||||
<dependency org="commons-logging" name="commons-logging"
|
|
||||||
rev="${commons-logging.version}" conf="compile->master"/>
|
|
||||||
<dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}"
|
|
||||||
conf="compile->master"/>
|
|
||||||
<dependency org="org.slf4j" name="slf4j-log4j12"
|
|
||||||
rev="${slf4j-log4j12.version}" conf="mapred->master"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-hdfs"
|
|
||||||
rev="${hadoop-hdfs.version}" conf="test->default">
|
|
||||||
<artifact name="hadoop-hdfs" type="tests" ext="jar" m:classifier="tests"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-common"
|
|
||||||
rev="${hadoop-common.version}" conf="test->default">
|
|
||||||
<artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests" />
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-yarn-server-common"
|
|
||||||
rev="${yarn.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-core"
|
|
||||||
rev="${yarn.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-common"
|
|
||||||
rev="${yarn.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-yarn-common"
|
|
||||||
rev="${yarn.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-examples"
|
|
||||||
rev="${yarn.version}" conf="compile->default"/>
|
|
||||||
<dependency org="log4j" name="log4j" rev="${log4j.version}"
|
|
||||||
conf="compile->master"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-mapreduce-client-jobclient"
|
|
||||||
rev="${yarn.version}" conf="compile->default">
|
|
||||||
<artifact name="hadoop-mapreduce-client-jobclient" type="tests" ext="jar" m:classifier="tests"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-rumen"
|
|
||||||
rev="${hadoop-common.version}" conf="compile->default"/>
|
|
||||||
<dependency org="org.apache.hadoop" name="hadoop-archives"
|
|
||||||
rev="${hadoop-common.version}" conf="compile->default"/>
|
|
||||||
|
|
||||||
<dependency org="checkstyle" name="checkstyle" rev="${checkstyle.version}"
|
|
||||||
conf="checkstyle->default"/>
|
|
||||||
|
|
||||||
<dependency org="jdiff" name="jdiff" rev="${jdiff.version}"
|
|
||||||
conf="jdiff->default"/>
|
|
||||||
<dependency org="xerces" name="xerces" rev="${xerces.version}"
|
|
||||||
conf="jdiff->default"/>
|
|
||||||
|
|
||||||
<dependency org="org.apache.rat" name="apache-rat-tasks"
|
|
||||||
rev="${rats-lib.version}" conf="releaseaudit->default"/>
|
|
||||||
<dependency org="commons-lang" name="commons-lang"
|
|
||||||
rev="${commons-lang.version}" conf="releaseaudit->default"/>
|
|
||||||
<dependency org="commons-collections" name="commons-collections"
|
|
||||||
rev="${commons-collections.version}"
|
|
||||||
conf="releaseaudit->default"/>
|
|
||||||
|
|
||||||
<dependency org="org.apache.lucene" name="lucene-core"
|
|
||||||
rev="${lucene-core.version}" conf="javadoc->default"/>
|
|
||||||
<dependency org="org.apache.avro" name="avro-compiler" rev="${avro.version}"
|
|
||||||
conf="compile->master">
|
|
||||||
<exclude module="ant"/>
|
|
||||||
<exclude module="jetty"/>
|
|
||||||
<exclude module="slf4j-simple"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="org.apache.avro" name="avro" rev="${avro.version}"
|
|
||||||
conf="compile->default">
|
|
||||||
<exclude module="ant"/>
|
|
||||||
<exclude module="jetty"/>
|
|
||||||
<exclude module="slf4j-simple"/>
|
|
||||||
</dependency>
|
|
||||||
<dependency org="junit" name="junit" rev="${junit.version}"
|
|
||||||
conf="test->default"/>
|
|
||||||
<dependency org="org.mockito" name="mockito-all" rev="${mockito-all.version}"
|
|
||||||
conf="test->default"/>
|
|
||||||
<dependency org="org.vafer" name="jdeb" rev="${jdeb.version}" conf="package->master"/>
|
|
||||||
<dependency org="org.mortbay.jetty" name="jetty-servlet-tester" rev="${jetty.version}"
|
|
||||||
conf="test->default"/>
|
|
||||||
|
|
||||||
<!-- dependency for rumen anonymization -->
|
|
||||||
<dependency org="org.codehaus.jackson" name="jackson-core-asl" rev="${jackson.version}"
|
|
||||||
conf="compile->default"/>
|
|
||||||
<dependency org="org.codehaus.jackson" name="jackson-mapper-asl" rev="${jackson.version}"
|
|
||||||
conf="compile->default"/>
|
|
||||||
|
|
||||||
<!-- dependency addition for the fault injection -->
|
|
||||||
<dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}"
|
|
||||||
conf="compile->default"/>
|
|
||||||
<dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}"
|
|
||||||
conf="compile->default"/>
|
|
||||||
|
|
||||||
<!-- Exclusions for transitive dependencies pulled in by log4j -->
|
|
||||||
<exclude org="com.sun.jdmk"/>
|
|
||||||
<exclude org="com.sun.jmx"/>
|
|
||||||
<exclude org="javax.jms"/>
|
|
||||||
<exclude org="javax.mail"/>
|
|
||||||
<exclude org="org.apache.hadoop" module="avro"/>
|
|
||||||
<exclude org="org.apache.commons" module="commons-daemon"/>
|
|
||||||
|
|
||||||
</dependencies>
|
|
||||||
|
|
||||||
</ivy-module>
|
|
|
@ -1,28 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred-examples</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<version>@version</version>
|
|
||||||
<dependencies/>
|
|
||||||
</project>
|
|
|
@ -1,34 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred-instrumented</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<version>@version</version>
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-common</artifactId>
|
|
||||||
<version>3.0.0-SNAPSHOT</version>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
|
||||||
</project>
|
|
|
@ -1,34 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred-test-instrumented</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<version>@version</version>
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred</artifactId>
|
|
||||||
<version>@version</version>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
|
||||||
</project>
|
|
|
@ -1,34 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<version>@version</version>
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-common</artifactId>
|
|
||||||
<version>3.0.0-SNAPSHOT</version>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
|
||||||
</project>
|
|
|
@ -1,34 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred-test</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<version>@version</version>
|
|
||||||
<dependencies>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred</artifactId>
|
|
||||||
<version>@version</version>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
|
||||||
</project>
|
|
|
@ -1,28 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
|
||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
|
|
||||||
|
|
||||||
<modelVersion>4.0.0</modelVersion>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-mapred-tools</artifactId>
|
|
||||||
<packaging>jar</packaging>
|
|
||||||
<version>@version</version>
|
|
||||||
<dependencies/>
|
|
||||||
</project>
|
|
|
@ -1,70 +0,0 @@
|
||||||
<ivysettings>
|
|
||||||
|
|
||||||
<!--
|
|
||||||
Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
contributor license agreements. See the NOTICE file distributed with
|
|
||||||
this work for additional information regarding copyright ownership.
|
|
||||||
The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
(the "License"); you may not use this file except in compliance with
|
|
||||||
the License. You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
-->
|
|
||||||
|
|
||||||
<!--
|
|
||||||
see http://www.jayasoft.org/ivy/doc/configuration
|
|
||||||
-->
|
|
||||||
<!-- you can override this property to use mirrors
|
|
||||||
http://repo1.maven.org/maven2/
|
|
||||||
http://mirrors.dotsrc.org/maven2
|
|
||||||
http://ftp.ggi-project.org/pub/packages/maven2
|
|
||||||
http://mirrors.sunsite.dk/maven2
|
|
||||||
http://public.planetmirror.com/pub/maven2
|
|
||||||
http://ibiblio.lsu.edu/main/pub/packages/maven2
|
|
||||||
http://www.ibiblio.net/pub/packages/maven2
|
|
||||||
-->
|
|
||||||
<property name="repo.maven.org" value="http://repo1.maven.org/maven2/" override="false"/>
|
|
||||||
<property name="snapshot.apache.org" value="https://repository.apache.org/content/repositories/snapshots/" override="false"/>
|
|
||||||
<property name="maven2.pattern" value="[organisation]/[module]/[revision]/[module]-[revision](-[classifier])"/>
|
|
||||||
<property name="repo.dir" value="${user.home}/.m2/repository"/>
|
|
||||||
<property name="maven2.pattern.ext" value="${maven2.pattern}.[ext]"/>
|
|
||||||
<property name="resolvers" value="default" override="false"/>
|
|
||||||
<property name="force-resolve" value="false" override="false"/>
|
|
||||||
<settings defaultResolver="${resolvers}"/>
|
|
||||||
|
|
||||||
<resolvers>
|
|
||||||
<ibiblio name="maven2" root="${repo.maven.org}" pattern="${maven2.pattern.ext}" m2compatible="true" checkconsistency="false"/>
|
|
||||||
<ibiblio name="apache-snapshot" root="${snapshot.apache.org}" m2compatible="true"
|
|
||||||
checkmodified="true" changingPattern=".*SNAPSHOT" checkconsistency="false"/>
|
|
||||||
|
|
||||||
<filesystem name="fs" m2compatible="true" checkconsistency="false" force="${force-resolve}">
|
|
||||||
<artifact pattern="${repo.dir}/${maven2.pattern.ext}"/>
|
|
||||||
<ivy pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].pom"/>
|
|
||||||
</filesystem>
|
|
||||||
|
|
||||||
<chain name="default" dual="true" checkmodified="true" changingPattern=".*SNAPSHOT">
|
|
||||||
<resolver ref="apache-snapshot"/>
|
|
||||||
<resolver ref="maven2"/>
|
|
||||||
</chain>
|
|
||||||
|
|
||||||
<chain name="internal" dual="true">
|
|
||||||
<resolver ref="fs"/>
|
|
||||||
<resolver ref="apache-snapshot"/>
|
|
||||||
<resolver ref="maven2"/>
|
|
||||||
</chain>
|
|
||||||
|
|
||||||
<chain name="external">
|
|
||||||
<resolver ref="maven2"/>
|
|
||||||
</chain>
|
|
||||||
|
|
||||||
</resolvers>
|
|
||||||
<modules>
|
|
||||||
<module organisation="org.apache.hadoop" name="hadoop-*" resolver="${resolvers}"/>
|
|
||||||
</modules>
|
|
||||||
</ivysettings>
|
|
|
@ -1,86 +0,0 @@
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
#This properties file lists the versions of the various artifacts used by hadoop and components.
|
|
||||||
#It drives ivy and the generation of a maven POM
|
|
||||||
|
|
||||||
#These are the versions of our dependencies (in alphabetical order)
|
|
||||||
ant-task.version=2.0.10
|
|
||||||
|
|
||||||
#Aspectj depedency for Fault injection
|
|
||||||
#This property has to be updated synchronously with aop.xml
|
|
||||||
aspectj.version=1.6.5
|
|
||||||
|
|
||||||
avro.version=1.5.2
|
|
||||||
paranamer.version=2.2
|
|
||||||
checkstyle.version=4.2
|
|
||||||
|
|
||||||
commons-cli.version=1.2
|
|
||||||
commons-collections.version=3.1
|
|
||||||
commons-httpclient.version=3.1
|
|
||||||
commons-lang.version=2.5
|
|
||||||
commons-logging.version=1.1.1
|
|
||||||
commons-logging-api.version=1.1
|
|
||||||
commons-el.version=1.0
|
|
||||||
commons-fileupload.version=1.2
|
|
||||||
commons-io.version=1.4
|
|
||||||
commons-net.version=1.4.1
|
|
||||||
core.version=3.1.1
|
|
||||||
coreplugin.version=1.3.2
|
|
||||||
|
|
||||||
ftplet-api.version=1.0.0
|
|
||||||
ftpserver-core.version=1.0.0
|
|
||||||
ftpserver-deprecated.version=1.0.0-M2
|
|
||||||
|
|
||||||
hadoop-common.version=3.0.0-SNAPSHOT
|
|
||||||
hadoop-hdfs.version=3.0.0-SNAPSHOT
|
|
||||||
|
|
||||||
hsqldb.version=1.8.0.10
|
|
||||||
|
|
||||||
ivy.version=2.2.0
|
|
||||||
|
|
||||||
jasper.version=5.5.12
|
|
||||||
jdeb.version=0.8
|
|
||||||
jsp.version=2.1
|
|
||||||
jsp-api.version=5.5.12
|
|
||||||
jets3t.version=0.7.1
|
|
||||||
jetty.version=6.1.14
|
|
||||||
jetty-util.version=6.1.14
|
|
||||||
junit.version=4.8.1
|
|
||||||
jdiff.version=1.0.9
|
|
||||||
|
|
||||||
kfs.version=0.3
|
|
||||||
|
|
||||||
log4j.version=1.2.16
|
|
||||||
lucene-core.version=2.3.1
|
|
||||||
|
|
||||||
mina-core.version=2.0.0-M5
|
|
||||||
|
|
||||||
mockito-all.version=1.8.2
|
|
||||||
|
|
||||||
oro.version=2.0.8
|
|
||||||
|
|
||||||
rats-lib.version=0.6
|
|
||||||
|
|
||||||
servlet.version=4.0.6
|
|
||||||
servlet-api-2.5.version=6.1.14
|
|
||||||
servlet-api.version=2.5
|
|
||||||
slf4j-api.version=1.5.11
|
|
||||||
slf4j-log4j12.version=1.5.11
|
|
||||||
|
|
||||||
wagon-http.version=1.0-beta-2
|
|
||||||
xmlenc.version=0.52
|
|
||||||
xerces.version=1.4.4
|
|
||||||
|
|
||||||
jackson.version=1.8.8
|
|
||||||
yarn.version=3.0.0-SNAPSHOT
|
|
||||||
hadoop-mapreduce.version=3.0.0-SNAPSHOT
|
|
|
@ -1,168 +0,0 @@
|
||||||
### "Gridmix" Benchmark ###
|
|
||||||
|
|
||||||
Contents:
|
|
||||||
|
|
||||||
0 Overview
|
|
||||||
1 Getting Started
|
|
||||||
1.0 Build
|
|
||||||
1.1 Configure
|
|
||||||
1.2 Generate test data
|
|
||||||
2 Running
|
|
||||||
2.0 General
|
|
||||||
2.1 Non-Hod cluster
|
|
||||||
2.2 Hod
|
|
||||||
2.2.0 Static cluster
|
|
||||||
2.2.1 Hod cluster
|
|
||||||
|
|
||||||
|
|
||||||
* 0 Overview
|
|
||||||
|
|
||||||
The scripts in this package model a cluster workload. The workload is
|
|
||||||
simulated by generating random data and submitting map/reduce jobs that
|
|
||||||
mimic observed data-access patterns in user jobs. The full benchmark
|
|
||||||
generates approximately 2.5TB of (often compressed) input data operated on
|
|
||||||
by the following simulated jobs:
|
|
||||||
|
|
||||||
1) Three stage map/reduce job
|
|
||||||
Input: 500GB compressed (2TB uncompressed) SequenceFile
|
|
||||||
(k,v) = (5 words, 100 words)
|
|
||||||
hadoop-env: FIXCOMPSEQ
|
|
||||||
Compute1: keep 10% map, 40% reduce
|
|
||||||
Compute2: keep 100% map, 77% reduce
|
|
||||||
Input from Compute1
|
|
||||||
Compute3: keep 116% map, 91% reduce
|
|
||||||
Input from Compute2
|
|
||||||
Motivation: Many user workloads are implemented as pipelined map/reduce
|
|
||||||
jobs, including Pig workloads
|
|
||||||
|
|
||||||
2) Large sort of variable key/value size
|
|
||||||
Input: 500GB compressed (2TB uncompressed) SequenceFile
|
|
||||||
(k,v) = (5-10 words, 100-10000 words)
|
|
||||||
hadoop-env: VARCOMPSEQ
|
|
||||||
Compute: keep 100% map, 100% reduce
|
|
||||||
Motivation: Processing large, compressed datsets is common.
|
|
||||||
|
|
||||||
3) Reference select
|
|
||||||
Input: 500GB compressed (2TB uncompressed) SequenceFile
|
|
||||||
(k,v) = (5-10 words, 100-10000 words)
|
|
||||||
hadoop-env: VARCOMPSEQ
|
|
||||||
Compute: keep 0.2% map, 5% reduce
|
|
||||||
1 Reducer
|
|
||||||
Motivation: Sampling from a large, reference dataset is common.
|
|
||||||
|
|
||||||
4) Indirect Read
|
|
||||||
Input: 500GB compressed (2TB uncompressed) Text
|
|
||||||
(k,v) = (5 words, 20 words)
|
|
||||||
hadoop-env: FIXCOMPTEXT
|
|
||||||
Compute: keep 50% map, 100% reduce Each map reads 1 input file,
|
|
||||||
adding additional input files from the output of the
|
|
||||||
previous iteration for 10 iterations
|
|
||||||
Motivation: User jobs in the wild will often take input data without
|
|
||||||
consulting the framework. This simulates an iterative job
|
|
||||||
whose input data is all "indirect," i.e. given to the
|
|
||||||
framework sans locality metadata.
|
|
||||||
|
|
||||||
5) API text sort (java, pipes, streaming)
|
|
||||||
Input: 500GB uncompressed Text
|
|
||||||
(k,v) = (1-10 words, 0-200 words)
|
|
||||||
hadoop-env: VARINFLTEXT
|
|
||||||
Compute: keep 100% map, 100% reduce
|
|
||||||
Motivation: This benchmark should exercise each of the APIs to
|
|
||||||
map/reduce
|
|
||||||
|
|
||||||
Each of these jobs may be run individually or- using the scripts provided-
|
|
||||||
as a simulation of user activity sized to run in approximately 4 hours on a
|
|
||||||
480-500 node cluster using Hadoop 0.15.0. The benchmark runs a mix of small,
|
|
||||||
medium, and large jobs simultaneously, submitting each at fixed intervals.
|
|
||||||
|
|
||||||
Notes(1-4): Since input data are compressed, this means that each mapper
|
|
||||||
outputs a lot more bytes than it reads in, typically causing map output
|
|
||||||
spills.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
* 1 Getting Started
|
|
||||||
|
|
||||||
1.0 Build
|
|
||||||
|
|
||||||
1) Compile the examples, including the C++ sources:
|
|
||||||
> ant -Dcompile.c++=yes examples
|
|
||||||
2) Copy the pipe sort example to a location in the default filesystem
|
|
||||||
(usually HDFS, default /gridmix/programs)
|
|
||||||
> $HADOOP_PREFIX/hadoop dfs -mkdir $GRID_MIX_PROG
|
|
||||||
> $HADOOP_PREFIX/hadoop dfs -put build/c++-examples/$PLATFORM_STR/bin/pipes-sort $GRID_MIX_PROG
|
|
||||||
|
|
||||||
1.1 Configure
|
|
||||||
|
|
||||||
One must modify hadoop-env to supply the following information:
|
|
||||||
|
|
||||||
HADOOP_PREFIX The hadoop install location
|
|
||||||
GRID_MIX_HOME The location of these scripts
|
|
||||||
APP_JAR The location of the hadoop example
|
|
||||||
GRID_MIX_DATA The location of the datsets for these benchmarks
|
|
||||||
GRID_MIX_PROG The location of the pipe-sort example
|
|
||||||
|
|
||||||
Reasonable defaults are provided for all but HADOOP_PREFIX. The datasets used
|
|
||||||
by each of the respective benchmarks are recorded in the Input::hadoop-env
|
|
||||||
comment in section 0 and their location may be changed in hadoop-env. Note
|
|
||||||
that each job expects particular input data and the parameters given to it
|
|
||||||
must be changed in each script if a different InputFormat, keytype, or
|
|
||||||
valuetype is desired.
|
|
||||||
|
|
||||||
Note that NUM_OF_REDUCERS_FOR_*_JOB properties should be sized to the
|
|
||||||
cluster on which the benchmarks will be run. The default assumes a large
|
|
||||||
(450-500 node) cluster.
|
|
||||||
|
|
||||||
1.2 Generate test data
|
|
||||||
|
|
||||||
Test data is generated using the generateData.sh script. While one may
|
|
||||||
modify the structure and size of the data generated here, note that many of
|
|
||||||
the scripts- particularly for medium and small sized jobs- rely not only on
|
|
||||||
specific InputFormats and key/value types, but also on a particular
|
|
||||||
structure to the input data. Changing these values will likely be necessary
|
|
||||||
to run on small and medium-sized clusters, but any modifications must be
|
|
||||||
informed by an explicit familiarity with the underlying scripts.
|
|
||||||
|
|
||||||
It is sufficient to run the script without modification, though it may
|
|
||||||
require up to 4TB of free space in the default filesystem. Changing the size
|
|
||||||
of the input data (COMPRESSED_DATA_BYTES, UNCOMPRESSED_DATA_BYTES,
|
|
||||||
INDIRECT_DATA_BYTES) is safe. A 4x compression ratio for generated, block
|
|
||||||
compressed data is typical.
|
|
||||||
|
|
||||||
* 2 Running
|
|
||||||
|
|
||||||
2.0 General
|
|
||||||
|
|
||||||
The submissionScripts directory contains the high-level scripts submitting
|
|
||||||
sized jobs for the gridmix benchmark. Each submits $NUM_OF_*_JOBS_PER_CLASS
|
|
||||||
instances as specified in the gridmix-env script, where an instance is an
|
|
||||||
invocation of a script as in $JOBTYPE/$JOBTYPE.$CLASS (e.g.
|
|
||||||
javasort/text-sort.large). Each instance may submit one or more map/reduce
|
|
||||||
jobs.
|
|
||||||
|
|
||||||
There is a backoff script, submissionScripts/sleep_if_too_busy that can be
|
|
||||||
modified to define throttling criteria. By default, it simply counts running
|
|
||||||
java processes.
|
|
||||||
|
|
||||||
2.1 Non-Hod cluster
|
|
||||||
|
|
||||||
The submissionScripts/allToSameCluster script will invoke each of the other
|
|
||||||
submission scripts for the gridmix benchmark. Depending on how your cluster
|
|
||||||
manages job submission, these scripts may require modification. The details
|
|
||||||
are very context-dependent.
|
|
||||||
|
|
||||||
2.2 Hod
|
|
||||||
|
|
||||||
Note that there are options in hadoop-env that control jobs sumitted thruogh
|
|
||||||
Hod. One may specify the location of a config (HOD_CONFIG), the number of
|
|
||||||
nodes to allocate for classes of jobs, and any additional options one wants
|
|
||||||
to apply. The default includes an example for supplying a Hadoop tarball for
|
|
||||||
testing platform changes (see Hod documentation).
|
|
||||||
|
|
||||||
2.2.0 Static Cluster
|
|
||||||
|
|
||||||
> hod --hod.script=submissionScripts/allToSameCluster -m 500
|
|
||||||
|
|
||||||
2.2.1 Hod-allocated cluster
|
|
||||||
|
|
||||||
> ./submissionScripts/allThroughHod
|
|
|
@ -1,90 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/gridmix-env
|
|
||||||
|
|
||||||
# Smaller data set is used by default.
|
|
||||||
COMPRESSED_DATA_BYTES=2147483648
|
|
||||||
UNCOMPRESSED_DATA_BYTES=536870912
|
|
||||||
INDIRECT_DATA_BYTES=58720256
|
|
||||||
|
|
||||||
# Number of partitions for output data
|
|
||||||
if [ -z ${NUM_MAPS} ] ; then
|
|
||||||
NUM_MAPS=100
|
|
||||||
fi
|
|
||||||
|
|
||||||
INDIRECT_DATA_FILES=200
|
|
||||||
|
|
||||||
# If the env var USE_REAL_DATASET is set, then use the params to generate the bigger (real) dataset.
|
|
||||||
if [ ! -z ${USE_REAL_DATASET} ] ; then
|
|
||||||
echo "Using real dataset"
|
|
||||||
# 2TB data compressing to approx 500GB
|
|
||||||
COMPRESSED_DATA_BYTES=2147483648000
|
|
||||||
# 500GB
|
|
||||||
UNCOMPRESSED_DATA_BYTES=536870912000
|
|
||||||
# Default approx 70MB per data file, compressed
|
|
||||||
INDIRECT_DATA_BYTES=58720256000
|
|
||||||
fi
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar \
|
|
||||||
${EXAMPLE_JAR} randomtextwriter \
|
|
||||||
-D mapreduce.randomtextwriter.totalbytes=${COMPRESSED_DATA_BYTES} \
|
|
||||||
-D mapreduce.randomtextwriter.bytespermap=$((${COMPRESSED_DATA_BYTES} / ${NUM_MAPS})) \
|
|
||||||
-D mapreduce.randomtextwriter.minwordskey=5 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordskey=10 \
|
|
||||||
-D mapreduce.randomtextwriter.minwordsvalue=100 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordsvalue=10000 \
|
|
||||||
-D mapreduce.output.fileoutputformat.compress=true \
|
|
||||||
-D mapred.map.output.compression.type=BLOCK \
|
|
||||||
-outFormat org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat \
|
|
||||||
${VARCOMPSEQ} &
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar \
|
|
||||||
${EXAMPLE_JAR} randomtextwriter \
|
|
||||||
-D mapreduce.randomtextwriter.totalbytes=${COMPRESSED_DATA_BYTES} \
|
|
||||||
-D mapreduce.randomtextwriter.bytespermap=$((${COMPRESSED_DATA_BYTES} / ${NUM_MAPS})) \
|
|
||||||
-D mapreduce.randomtextwriter.minwordskey=5 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordskey=5 \
|
|
||||||
-D mapreduce.randomtextwriter.minwordsvalue=100 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordsvalue=100 \
|
|
||||||
-D mapreduce.output.fileoutputformat.compress=true \
|
|
||||||
-D mapred.map.output.compression.type=BLOCK \
|
|
||||||
-outFormat org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat \
|
|
||||||
${FIXCOMPSEQ} &
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar \
|
|
||||||
${EXAMPLE_JAR} randomtextwriter \
|
|
||||||
-D mapreduce.randomtextwriter.totalbytes=${UNCOMPRESSED_DATA_BYTES} \
|
|
||||||
-D mapreduce.randomtextwriter.bytespermap=$((${UNCOMPRESSED_DATA_BYTES} / ${NUM_MAPS})) \
|
|
||||||
-D mapreduce.randomtextwriter.minwordskey=1 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordskey=10 \
|
|
||||||
-D mapreduce.randomtextwriter.minwordsvalue=0 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordsvalue=200 \
|
|
||||||
-D mapreduce.output.fileoutputformat.compress=false \
|
|
||||||
-outFormat org.apache.hadoop.mapreduce.lib.output.TextOutputFormat \
|
|
||||||
${VARINFLTEXT} &
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar \
|
|
||||||
${EXAMPLE_JAR} randomtextwriter \
|
|
||||||
-D mapreduce.randomtextwriter.totalbytes=${INDIRECT_DATA_BYTES} \
|
|
||||||
-D mapreduce.randomtextwriter.bytespermap=$((${INDIRECT_DATA_BYTES} / ${INDIRECT_DATA_FILES})) \
|
|
||||||
-D mapreduce.randomtextwriter.minwordskey=5 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordskey=5 \
|
|
||||||
-D mapreduce.randomtextwriter.minwordsvalue=20 \
|
|
||||||
-D mapreduce.randomtextwriter.maxwordsvalue=20 \
|
|
||||||
-D mapreduce.output.fileoutputformat.compress=true \
|
|
||||||
-D mapred.map.output.compression.type=BLOCK \
|
|
||||||
-outFormat org.apache.hadoop.mapreduce.lib.output.TextOutputFormat \
|
|
||||||
${FIXCOMPTEXT} &
|
|
|
@ -1,86 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
## Environment configuration
|
|
||||||
# Hadoop installation
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${HADOOP_PREFIX}" ] ; then
|
|
||||||
export HADOOP_PREFIX=
|
|
||||||
fi
|
|
||||||
# Base directory for gridmix install
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${GRID_MIX_HOME}" ] ; then
|
|
||||||
export GRID_MIX_HOME=${GRID_DIR}
|
|
||||||
fi
|
|
||||||
# Hadoop example jar
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${EXAMPLE_JAR}" ] ; then
|
|
||||||
export EXAMPLE_JAR="${HADOOP_PREFIX}/hadoop-*examples.jar"
|
|
||||||
fi
|
|
||||||
# Hadoop test jar
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${APP_JAR}" ] ; then
|
|
||||||
export APP_JAR="${HADOOP_PREFIX}/hadoop-*test.jar"
|
|
||||||
fi
|
|
||||||
# Hadoop streaming jar
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${STREAM_JAR}" ] ; then
|
|
||||||
export STREAM_JAR="${HADOOP_PREFIX}/contrib/streaming/hadoop-*streaming.jar"
|
|
||||||
fi
|
|
||||||
# Location on default filesystem for writing gridmix data (usually HDFS)
|
|
||||||
# Default: /gridmix/data
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${GRID_MIX_DATA}" ] ; then
|
|
||||||
export GRID_MIX_DATA=/gridmix/data
|
|
||||||
fi
|
|
||||||
# Location of executables in default filesystem (usually HDFS)
|
|
||||||
# Default: /gridmix/programs
|
|
||||||
# set var only if it has not already been set externally
|
|
||||||
if [ -z "${GRID_MIX_PROG}" ] ; then
|
|
||||||
export GRID_MIX_PROG=/gridmix/programs
|
|
||||||
fi
|
|
||||||
|
|
||||||
## Data sources
|
|
||||||
# Variable length key, value compressed SequenceFile
|
|
||||||
export VARCOMPSEQ=${GRID_MIX_DATA}/WebSimulationBlockCompressed
|
|
||||||
# Fixed length key, value compressed SequenceFile
|
|
||||||
export FIXCOMPSEQ=${GRID_MIX_DATA}/MonsterQueryBlockCompressed
|
|
||||||
# Variable length key, value uncompressed Text File
|
|
||||||
export VARINFLTEXT=${GRID_MIX_DATA}/SortUncompressed
|
|
||||||
# Fixed length key, value compressed Text File
|
|
||||||
export FIXCOMPTEXT=${GRID_MIX_DATA}/EntropySimulationCompressed
|
|
||||||
|
|
||||||
## Job sizing
|
|
||||||
export NUM_OF_LARGE_JOBS_FOR_ENTROPY_CLASS=5
|
|
||||||
export NUM_OF_LARGE_JOBS_PER_CLASS=3
|
|
||||||
export NUM_OF_MEDIUM_JOBS_PER_CLASS=20
|
|
||||||
export NUM_OF_SMALL_JOBS_PER_CLASS=40
|
|
||||||
|
|
||||||
export NUM_OF_REDUCERS_FOR_LARGE_JOB=370
|
|
||||||
export NUM_OF_REDUCERS_FOR_MEDIUM_JOB=170
|
|
||||||
export NUM_OF_REDUCERS_FOR_SMALL_JOB=15
|
|
||||||
|
|
||||||
## Throttling
|
|
||||||
export INTERVAL_BETWEEN_SUBMITION=20
|
|
||||||
|
|
||||||
## Hod
|
|
||||||
#export HOD_OPTIONS=""
|
|
||||||
|
|
||||||
export CLUSTER_DIR_BASE=$GRID_MIX_HOME/CLUSTER_DIR_BASE
|
|
||||||
export HOD_CONFIG=
|
|
||||||
export ALL_HOD_OPTIONS="-c ${HOD_CONFIG} ${HOD_OPTIONS}"
|
|
||||||
export SMALL_JOB_HOD_OPTIONS="$ALL_HOD_OPTIONS -n 5"
|
|
||||||
export MEDIUM_JOB_HOD_OPTIONS="$ALL_HOD_OPTIONS -n 50"
|
|
||||||
export LARGE_JOB_HOD_OPTIONS="$ALL_HOD_OPTIONS -n 100"
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
INDIR=${VARINFLTEXT}
|
|
||||||
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
OUTDIR=perf-out/sort-out-dir-large_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar ${EXAMPLE_JAR} sort -m 1 -r $NUM_OF_REDUCERS_FOR_LARGE_JOB -inFormat org.apache.hadoop.mapred.KeyValueTextInputFormat -outFormat org.apache.hadoop.mapred.TextOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text $INDIR $OUTDIR
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
INDIR="${VARINFLTEXT}/{part-000*0,part-000*1,part-000*2}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/sort-out-dir-medium_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar ${EXAMPLE_JAR} sort -m 1 -r $NUM_OF_REDUCERS_FOR_MEDIUM_JOB -inFormat org.apache.hadoop.mapred.KeyValueTextInputFormat -outFormat org.apache.hadoop.mapred.TextOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text $INDIR $OUTDIR
|
|
||||||
|
|
|
@ -1,25 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
INDIR="${VARINFLTEXT}/{part-00000,part-00001,part-00002}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/sort-out-dir-small_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar ${EXAMPLE_JAR} sort -m 1 -r $NUM_OF_REDUCERS_FOR_SMALL_JOB -inFormat org.apache.hadoop.mapred.KeyValueTextInputFormat -outFormat org.apache.hadoop.mapred.TextOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text $INDIR $OUTDIR
|
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=100
|
|
||||||
INDIR=${FIXCOMPTEXT}
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/maxent-out-dir-large_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 50 -keepred 100 -inFormatIndirect org.apache.hadoop.mapred.TextInputFormat -outFormat org.apache.hadoop.mapred.TextOutputFormat -outKey org.apache.hadoop.io.LongWritable -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR.1 -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
ITER=7
|
|
||||||
for ((i=1; i<$ITER; ++i))
|
|
||||||
do
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 50 -keepred 100 -inFormatIndirect org.apache.hadoop.mapred.TextInputFormat -outFormat org.apache.hadoop.mapred.TextOutputFormat -outKey org.apache.hadoop.io.LongWritable -outValue org.apache.hadoop.io.Text -indir $INDIR -indir $OUTDIR.$i -outdir $OUTDIR.$(($i+1)) -r $NUM_OF_REDUCERS
|
|
||||||
if [ $? -ne "0" ]
|
|
||||||
then exit $?
|
|
||||||
fi
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR.$i
|
|
||||||
done
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR.$ITER
|
|
|
@ -1,38 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_LARGE_JOB
|
|
||||||
INDIR=${FIXCOMPSEQ}
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/mq-out-dir-large_$Date.1
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 10 -keepred 40 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
INDIR=$OUTDIR
|
|
||||||
OUTDIR=perf-out/mq-out-dir-large_$Date.2
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 100 -keepred 77 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
INDIR=$OUTDIR
|
|
||||||
OUTDIR=perf-out/mq-out-dir-large_$Date.3
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 116 -keepred 91 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_MEDIUM_JOB
|
|
||||||
INDIR="${FIXCOMPSEQ}/{part-000*0,part-000*1,part-000*2}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/mq-out-dir-medium_$Date.1
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 10 -keepred 40 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
INDIR=$OUTDIR
|
|
||||||
OUTDIR=perf-out/mq-out-dir-medium_$Date.2
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 100 -keepred 77 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
INDIR=$OUTDIR
|
|
||||||
OUTDIR=perf-out/mq-out-dir-medium_$Date.3
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 116 -keepred 91 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
|
@ -1,38 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_SMALL_JOB
|
|
||||||
INDIR="${FIXCOMPSEQ}/{part-00000,part-00001,part-00002}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/mq-out-dir-small_$Date.1
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 10 -keepred 40 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
INDIR=$OUTDIR
|
|
||||||
OUTDIR=perf-out/mq-out-dir-small_$Date.2
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 100 -keepred 77 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
||||||
INDIR=$OUTDIR
|
|
||||||
OUTDIR=perf-out/mq-out-dir-small_$Date.3
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar $APP_JAR loadgen -keepmap 116 -keepred 91 -inFormat org.apache.hadoop.mapred.SequenceFileInputFormat -outFormat org.apache.hadoop.mapred.SequenceFileOutputFormat -outKey org.apache.hadoop.io.Text -outValue org.apache.hadoop.io.Text -indir $INDIR -outdir $OUTDIR -r $NUM_OF_REDUCERS
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_LARGE_JOB
|
|
||||||
INDIR=${VARINFLTEXT}
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/pipe-out-dir-large_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop pipes -input $INDIR -output $OUTDIR -inputformat org.apache.hadoop.mapred.KeyValueTextInputFormat -program ${GRID_MIX_PROG}/pipes-sort -reduces $NUM_OF_REDUCERS -jobconf mapreduce.job.output.key.class=org.apache.hadoop.io.Text,mapreduce.job.output.value.class=org.apache.hadoop.io.Text -writer org.apache.hadoop.mapred.TextOutputFormat
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_MEDIUM_JOB
|
|
||||||
INDIR="${VARINFLTEXT}/{part-000*0,part-000*1,part-000*2}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/pipe-out-dir-medium_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop pipes -input $INDIR -output $OUTDIR -inputformat org.apache.hadoop.mapred.KeyValueTextInputFormat -program ${GRID_MIX_PROG}/pipes-sort -reduces $NUM_OF_REDUCERS -jobconf mapreduce.job.output.key.class=org.apache.hadoop.io.Text,mapreduce.job.output.value.class=org.apache.hadoop.io.Text -writer org.apache.hadoop.mapred.TextOutputFormat
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_SMALL_JOB
|
|
||||||
INDIR="${VARINFLTEXT}/{part-00000,part-00001,part-00002}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/pipe-out-dir-small_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop pipes -input $INDIR -output $OUTDIR -inputformat org.apache.hadoop.mapred.KeyValueTextInputFormat -program ${GRID_MIX_PROG}/pipes-sort -reduces $NUM_OF_REDUCERS -jobconf mapreduce.job.output.key.class=org.apache.hadoop.io.Text,mapreduce.job.output.value.class=org.apache.hadoop.io.Text -writer org.apache.hadoop.mapred.TextOutputFormat
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
export NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_LARGE_JOB
|
|
||||||
export INDIR=${VARINFLTEXT}
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
export OUTDIR=perf-out/stream-out-dir-large_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar ${STREAM_JAR} -input $INDIR -output $OUTDIR -mapper cat -reducer cat -numReduceTasks $NUM_OF_REDUCERS
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_MEDIUM_JOB
|
|
||||||
INDIR="${VARINFLTEXT}/{part-000*0,part-000*1,part-000*2}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/stream-out-dir-medium_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar ${STREAM_JAR} -input $INDIR -output $OUTDIR -mapper cat -reducer cat -numReduceTasks $NUM_OF_REDUCERS
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
GRID_DIR=`dirname "$0"`
|
|
||||||
GRID_DIR=`cd "$GRID_DIR"; pwd`
|
|
||||||
source $GRID_DIR/../gridmix-env
|
|
||||||
|
|
||||||
NUM_OF_REDUCERS=$NUM_OF_REDUCERS_FOR_SMALL_JOB
|
|
||||||
INDIR="${VARINFLTEXT}/{part-00000,part-00001,part-00002}"
|
|
||||||
Date=`date +%F-%H-%M-%S-%N`
|
|
||||||
|
|
||||||
OUTDIR=perf-out/stream-out-dir-small_$Date
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop dfs -rmr $OUTDIR
|
|
||||||
|
|
||||||
|
|
||||||
${HADOOP_PREFIX}/bin/hadoop jar ${STREAM_JAR} -input $INDIR -output $OUTDIR -mapper cat -reducer cat -numReduceTasks $NUM_OF_REDUCERS
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue