Merge branch 'trunk' into HDFS-7240
This commit is contained in:
commit
52640fb884
|
@ -393,6 +393,10 @@
|
|||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-common</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-registry</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-server-common</artifactId>
|
||||
|
|
|
@ -66,4 +66,6 @@ public interface Constants {
|
|||
|
||||
static public final FsPermission PERMISSION_555 =
|
||||
new FsPermission((short) 0555);
|
||||
|
||||
String CONFIG_VIEWFS_RENAME_STRATEGY = "fs.viewfs.rename.strategy";
|
||||
}
|
||||
|
|
|
@ -126,7 +126,8 @@ public class ViewFileSystem extends FileSystem {
|
|||
Configuration config;
|
||||
InodeTree<FileSystem> fsState; // the fs state; ie the mount table
|
||||
Path homeDir = null;
|
||||
|
||||
// Default to rename within same mountpoint
|
||||
private RenameStrategy renameStrategy = RenameStrategy.SAME_MOUNTPOINT;
|
||||
/**
|
||||
* Make the path Absolute and get the path-part of a pathname.
|
||||
* Checks that URI matches this file system
|
||||
|
@ -207,6 +208,9 @@ public class ViewFileSystem extends FileSystem {
|
|||
}
|
||||
};
|
||||
workingDir = this.getHomeDirectory();
|
||||
renameStrategy = RenameStrategy.valueOf(
|
||||
conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
|
||||
RenameStrategy.SAME_MOUNTPOINT.toString()));
|
||||
} catch (URISyntaxException e) {
|
||||
throw new IOException("URISyntax exception: " + theUri);
|
||||
}
|
||||
|
@ -490,27 +494,55 @@ public class ViewFileSystem extends FileSystem {
|
|||
if (resDst.isInternalDir()) {
|
||||
throw readOnlyMountTable("rename", dst);
|
||||
}
|
||||
/**
|
||||
// Alternate 1: renames within same file system - valid but we disallow
|
||||
// Alternate 2: (as described in next para - valid but we have disallowed it
|
||||
//
|
||||
// Note we compare the URIs. the URIs include the link targets.
|
||||
// hence we allow renames across mount links as long as the mount links
|
||||
// point to the same target.
|
||||
if (!resSrc.targetFileSystem.getUri().equals(
|
||||
resDst.targetFileSystem.getUri())) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
|
||||
URI srcUri = resSrc.targetFileSystem.getUri();
|
||||
URI dstUri = resDst.targetFileSystem.getUri();
|
||||
|
||||
verifyRenameStrategy(srcUri, dstUri,
|
||||
resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
|
||||
|
||||
ChRootedFileSystem srcFS = (ChRootedFileSystem) resSrc.targetFileSystem;
|
||||
ChRootedFileSystem dstFS = (ChRootedFileSystem) resDst.targetFileSystem;
|
||||
return srcFS.getMyFs().rename(srcFS.fullPath(resSrc.remainingPath),
|
||||
dstFS.fullPath(resDst.remainingPath));
|
||||
}
|
||||
|
||||
static void verifyRenameStrategy(URI srcUri, URI dstUri,
|
||||
boolean isSrcDestSame, ViewFileSystem.RenameStrategy renameStrategy)
|
||||
throws IOException {
|
||||
switch (renameStrategy) {
|
||||
case SAME_FILESYSTEM_ACROSS_MOUNTPOINT:
|
||||
if (srcUri.getAuthority() != null) {
|
||||
if (!(srcUri.getScheme().equals(dstUri.getScheme()) && srcUri
|
||||
.getAuthority().equals(dstUri.getAuthority()))) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
case SAME_TARGET_URI_ACROSS_MOUNTPOINT:
|
||||
// Alternate 2: Rename across mountpoints with same target.
|
||||
// i.e. Rename across alias mountpoints.
|
||||
//
|
||||
// Note we compare the URIs. the URIs include the link targets.
|
||||
// hence we allow renames across mount links as long as the mount links
|
||||
// point to the same target.
|
||||
if (!srcUri.equals(dstUri)) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
}
|
||||
|
||||
break;
|
||||
case SAME_MOUNTPOINT:
|
||||
//
|
||||
// Alternate 3 : renames ONLY within the the same mount links.
|
||||
//
|
||||
if (!isSrcDestSame) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException ("Unexpected rename strategy");
|
||||
}
|
||||
*/
|
||||
|
||||
//
|
||||
// Alternate 3 : renames ONLY within the the same mount links.
|
||||
//
|
||||
if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
}
|
||||
return resSrc.targetFileSystem.rename(resSrc.remainingPath,
|
||||
resDst.remainingPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1241,4 +1273,9 @@ public class ViewFileSystem extends FileSystem {
|
|||
return allPolicies;
|
||||
}
|
||||
}
|
||||
|
||||
enum RenameStrategy {
|
||||
SAME_MOUNTPOINT, SAME_TARGET_URI_ACROSS_MOUNTPOINT,
|
||||
SAME_FILESYSTEM_ACROSS_MOUNTPOINT
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,7 +157,9 @@ public class ViewFs extends AbstractFileSystem {
|
|||
final Configuration config;
|
||||
InodeTree<AbstractFileSystem> fsState; // the fs state; ie the mount table
|
||||
Path homeDir = null;
|
||||
|
||||
private ViewFileSystem.RenameStrategy renameStrategy =
|
||||
ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT;
|
||||
|
||||
static AccessControlException readOnlyMountTable(final String operation,
|
||||
final String p) {
|
||||
return new AccessControlException(
|
||||
|
@ -237,6 +239,9 @@ public class ViewFs extends AbstractFileSystem {
|
|||
// return MergeFs.createMergeFs(mergeFsURIList, config);
|
||||
}
|
||||
};
|
||||
renameStrategy = ViewFileSystem.RenameStrategy.valueOf(
|
||||
conf.get(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
|
||||
ViewFileSystem.RenameStrategy.SAME_MOUNTPOINT.toString()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -495,37 +500,23 @@ public class ViewFs extends AbstractFileSystem {
|
|||
+ " is readOnly");
|
||||
}
|
||||
|
||||
InodeTree.ResolveResult<AbstractFileSystem> resDst =
|
||||
InodeTree.ResolveResult<AbstractFileSystem> resDst =
|
||||
fsState.resolve(getUriPath(dst), false);
|
||||
if (resDst.isInternalDir()) {
|
||||
throw new AccessControlException(
|
||||
"Cannot Rename within internal dirs of mount table: dest=" + dst
|
||||
+ " is readOnly");
|
||||
}
|
||||
|
||||
/**
|
||||
// Alternate 1: renames within same file system - valid but we disallow
|
||||
// Alternate 2: (as described in next para - valid but we have disallowed it
|
||||
//
|
||||
// Note we compare the URIs. the URIs include the link targets.
|
||||
// hence we allow renames across mount links as long as the mount links
|
||||
// point to the same target.
|
||||
if (!resSrc.targetFileSystem.getUri().equals(
|
||||
resDst.targetFileSystem.getUri())) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
}
|
||||
*/
|
||||
|
||||
//
|
||||
// Alternate 3 : renames ONLY within the the same mount links.
|
||||
//
|
||||
//Alternate 1: renames within same file system
|
||||
URI srcUri = resSrc.targetFileSystem.getUri();
|
||||
URI dstUri = resDst.targetFileSystem.getUri();
|
||||
ViewFileSystem.verifyRenameStrategy(srcUri, dstUri,
|
||||
resSrc.targetFileSystem == resDst.targetFileSystem, renameStrategy);
|
||||
|
||||
if (resSrc.targetFileSystem !=resDst.targetFileSystem) {
|
||||
throw new IOException("Renames across Mount points not supported");
|
||||
}
|
||||
|
||||
resSrc.targetFileSystem.renameInternal(resSrc.remainingPath,
|
||||
resDst.remainingPath, overwrite);
|
||||
ChRootedFs srcFS = (ChRootedFs) resSrc.targetFileSystem;
|
||||
ChRootedFs dstFS = (ChRootedFs) resDst.targetFileSystem;
|
||||
srcFS.getMyFs().renameInternal(srcFS.fullPath(resSrc.remainingPath),
|
||||
dstFS.fullPath(resDst.remainingPath), overwrite);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -178,8 +178,10 @@ public final class CodecUtil {
|
|||
}
|
||||
} catch (LinkageError | Exception e) {
|
||||
// Fallback to next coder if possible
|
||||
LOG.warn("Failed to create raw erasure encoder " + rawCoderName +
|
||||
", fallback to next codec if possible", e);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Failed to create raw erasure encoder " + rawCoderName +
|
||||
", fallback to next codec if possible", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Fail to create raw erasure " +
|
||||
|
@ -198,12 +200,14 @@ public final class CodecUtil {
|
|||
}
|
||||
} catch (LinkageError | Exception e) {
|
||||
// Fallback to next coder if possible
|
||||
LOG.warn("Failed to create raw erasure decoder " + rawCoderName +
|
||||
", fallback to next codec if possible", e);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Failed to create raw erasure decoder " + rawCoderName +
|
||||
", fallback to next codec if possible", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
throw new IllegalArgumentException("Fail to create raw erasure " +
|
||||
"encoder with given codec: " + codecName);
|
||||
"decoder with given codec: " + codecName);
|
||||
}
|
||||
|
||||
private static ErasureCodec createCodec(Configuration conf,
|
||||
|
|
|
@ -50,6 +50,11 @@ public final class ErasureCodeNative {
|
|||
}
|
||||
LOADING_FAILURE_REASON = problem;
|
||||
}
|
||||
|
||||
if (LOADING_FAILURE_REASON != null) {
|
||||
LOG.warn("ISA-L support is not available in your platform... " +
|
||||
"using builtin-java codec where applicable");
|
||||
}
|
||||
}
|
||||
|
||||
private ErasureCodeNative() {}
|
||||
|
|
|
@ -633,7 +633,8 @@ public class Client implements AutoCloseable {
|
|||
return false;
|
||||
}
|
||||
|
||||
private synchronized void setupConnection() throws IOException {
|
||||
private synchronized void setupConnection(
|
||||
UserGroupInformation ticket) throws IOException {
|
||||
short ioFailures = 0;
|
||||
short timeoutFailures = 0;
|
||||
while (true) {
|
||||
|
@ -661,24 +662,26 @@ public class Client implements AutoCloseable {
|
|||
* client, to ensure Server matching address of the client connection
|
||||
* to host name in principal passed.
|
||||
*/
|
||||
UserGroupInformation ticket = remoteId.getTicket();
|
||||
InetSocketAddress bindAddr = null;
|
||||
if (ticket != null && ticket.hasKerberosCredentials()) {
|
||||
KerberosInfo krbInfo =
|
||||
remoteId.getProtocol().getAnnotation(KerberosInfo.class);
|
||||
if (krbInfo != null && krbInfo.clientPrincipal() != null) {
|
||||
String host =
|
||||
SecurityUtil.getHostFromPrincipal(remoteId.getTicket().getUserName());
|
||||
|
||||
if (krbInfo != null) {
|
||||
String principal = ticket.getUserName();
|
||||
String host = SecurityUtil.getHostFromPrincipal(principal);
|
||||
// If host name is a valid local address then bind socket to it
|
||||
InetAddress localAddr = NetUtils.getLocalInetAddress(host);
|
||||
if (localAddr != null) {
|
||||
this.socket.setReuseAddress(true);
|
||||
this.socket.bind(new InetSocketAddress(localAddr, 0));
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Binding " + principal + " to " + localAddr);
|
||||
}
|
||||
bindAddr = new InetSocketAddress(localAddr, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NetUtils.connect(this.socket, server, connectionTimeout);
|
||||
NetUtils.connect(this.socket, server, bindAddr, connectionTimeout);
|
||||
this.socket.setSoTimeout(soTimeout);
|
||||
return;
|
||||
} catch (ConnectTimeoutException toe) {
|
||||
|
@ -762,7 +765,14 @@ public class Client implements AutoCloseable {
|
|||
AtomicBoolean fallbackToSimpleAuth) {
|
||||
if (socket != null || shouldCloseConnection.get()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
UserGroupInformation ticket = remoteId.getTicket();
|
||||
if (ticket != null) {
|
||||
final UserGroupInformation realUser = ticket.getRealUser();
|
||||
if (realUser != null) {
|
||||
ticket = realUser;
|
||||
}
|
||||
}
|
||||
try {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Connecting to "+server);
|
||||
|
@ -774,14 +784,10 @@ public class Client implements AutoCloseable {
|
|||
short numRetries = 0;
|
||||
Random rand = null;
|
||||
while (true) {
|
||||
setupConnection();
|
||||
setupConnection(ticket);
|
||||
ipcStreams = new IpcStreams(socket, maxResponseLength);
|
||||
writeConnectionHeader(ipcStreams);
|
||||
if (authProtocol == AuthProtocol.SASL) {
|
||||
UserGroupInformation ticket = remoteId.getTicket();
|
||||
if (ticket.getRealUser() != null) {
|
||||
ticket = ticket.getRealUser();
|
||||
}
|
||||
try {
|
||||
authMethod = ticket
|
||||
.doAs(new PrivilegedExceptionAction<AuthMethod>() {
|
||||
|
|
|
@ -1678,10 +1678,7 @@ public class UserGroupInformation {
|
|||
* @return the user's name up to the first '/' or '@'.
|
||||
*/
|
||||
public String getShortUserName() {
|
||||
for (User p: subject.getPrincipals(User.class)) {
|
||||
return p.getShortName();
|
||||
}
|
||||
return null;
|
||||
return user.getShortName();
|
||||
}
|
||||
|
||||
public String getPrimaryGroupName() throws IOException {
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
/**
|
||||
* Fast thread-safe version of NumberFormat
|
||||
*/
|
||||
public class FastNumberFormat {
|
||||
|
||||
public static StringBuilder format(StringBuilder sb, long value, int minimumDigits) {
|
||||
if (value < 0) {
|
||||
sb.append('-');
|
||||
value = -value;
|
||||
}
|
||||
|
||||
long tmp = value;
|
||||
do {
|
||||
tmp /= 10;
|
||||
} while (--minimumDigits > 0 && tmp > 0);
|
||||
|
||||
for (int i = minimumDigits; i > 0; --i) {
|
||||
sb.append('0');
|
||||
}
|
||||
|
||||
sb.append(value);
|
||||
return sb;
|
||||
}
|
||||
}
|
|
@ -30,6 +30,11 @@ static void (*dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
|
|||
static int (*dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
|
||||
static void (*dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
|
||||
static int (*dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
|
||||
static int (*dlsym_EVP_CIPHER_CTX_test_flags)(const EVP_CIPHER_CTX *, int);
|
||||
static int (*dlsym_EVP_CIPHER_CTX_block_size)(const EVP_CIPHER_CTX *);
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
|
||||
static int (*dlsym_EVP_CIPHER_CTX_encrypting)(const EVP_CIPHER_CTX *);
|
||||
#endif
|
||||
static int (*dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *, const EVP_CIPHER *, \
|
||||
ENGINE *, const unsigned char *, const unsigned char *, int);
|
||||
static int (*dlsym_EVP_CipherUpdate)(EVP_CIPHER_CTX *, unsigned char *, \
|
||||
|
@ -46,6 +51,11 @@ typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_free)(EVP_CIPHER_CTX *);
|
|||
typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_cleanup)(EVP_CIPHER_CTX *);
|
||||
typedef void (__cdecl *__dlsym_EVP_CIPHER_CTX_init)(EVP_CIPHER_CTX *);
|
||||
typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_set_padding)(EVP_CIPHER_CTX *, int);
|
||||
typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_test_flags)(const EVP_CIPHER_CTX *, int);
|
||||
typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_block_size)(const EVP_CIPHER_CTX *);
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
|
||||
typedef int (__cdecl *__dlsym_EVP_CIPHER_CTX_encrypting)(const EVP_CIPHER_CTX *);
|
||||
#endif
|
||||
typedef int (__cdecl *__dlsym_EVP_CipherInit_ex)(EVP_CIPHER_CTX *, \
|
||||
const EVP_CIPHER *, ENGINE *, const unsigned char *, \
|
||||
const unsigned char *, int);
|
||||
|
@ -60,6 +70,11 @@ static __dlsym_EVP_CIPHER_CTX_free dlsym_EVP_CIPHER_CTX_free;
|
|||
static __dlsym_EVP_CIPHER_CTX_cleanup dlsym_EVP_CIPHER_CTX_cleanup;
|
||||
static __dlsym_EVP_CIPHER_CTX_init dlsym_EVP_CIPHER_CTX_init;
|
||||
static __dlsym_EVP_CIPHER_CTX_set_padding dlsym_EVP_CIPHER_CTX_set_padding;
|
||||
static __dlsym_EVP_CIPHER_CTX_test_flags dlsym_EVP_CIPHER_CTX_test_flags;
|
||||
static __dlsym_EVP_CIPHER_CTX_block_size dlsym_EVP_CIPHER_CTX_block_size;
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
|
||||
static __dlsym_EVP_CIPHER_CTX_encrypting dlsym_EVP_CIPHER_CTX_encrypting;
|
||||
#endif
|
||||
static __dlsym_EVP_CipherInit_ex dlsym_EVP_CipherInit_ex;
|
||||
static __dlsym_EVP_CipherUpdate dlsym_EVP_CipherUpdate;
|
||||
static __dlsym_EVP_CipherFinal_ex dlsym_EVP_CipherFinal_ex;
|
||||
|
@ -114,6 +129,14 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
|
|||
"EVP_CIPHER_CTX_init");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_set_padding, env, openssl, \
|
||||
"EVP_CIPHER_CTX_set_padding");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_test_flags, env, openssl, \
|
||||
"EVP_CIPHER_CTX_test_flags");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_block_size, env, openssl, \
|
||||
"EVP_CIPHER_CTX_block_size");
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CIPHER_CTX_encrypting, env, openssl, \
|
||||
"EVP_CIPHER_CTX_encrypting");
|
||||
#endif
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherInit_ex, env, openssl, \
|
||||
"EVP_CipherInit_ex");
|
||||
LOAD_DYNAMIC_SYMBOL(dlsym_EVP_CipherUpdate, env, openssl, \
|
||||
|
@ -135,6 +158,17 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_initIDs
|
|||
LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_set_padding, \
|
||||
dlsym_EVP_CIPHER_CTX_set_padding, env, \
|
||||
openssl, "EVP_CIPHER_CTX_set_padding");
|
||||
LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_test_flags, \
|
||||
dlsym_EVP_CIPHER_CTX_test_flags, env, \
|
||||
openssl, "EVP_CIPHER_CTX_test_flags");
|
||||
LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_block_size, \
|
||||
dlsym_EVP_CIPHER_CTX_block_size, env, \
|
||||
openssl, "EVP_CIPHER_CTX_block_size");
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
|
||||
LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CIPHER_CTX_encrypting, \
|
||||
dlsym_EVP_CIPHER_CTX_encrypting, env, \
|
||||
openssl, "EVP_CIPHER_CTX_encrypting");
|
||||
#endif
|
||||
LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherInit_ex, dlsym_EVP_CipherInit_ex, \
|
||||
env, openssl, "EVP_CipherInit_ex");
|
||||
LOAD_DYNAMIC_SYMBOL(__dlsym_EVP_CipherUpdate, dlsym_EVP_CipherUpdate, \
|
||||
|
@ -253,14 +287,18 @@ JNIEXPORT jlong JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_init
|
|||
static int check_update_max_output_len(EVP_CIPHER_CTX *context, int input_len,
|
||||
int max_output_len)
|
||||
{
|
||||
if (context->flags & EVP_CIPH_NO_PADDING) {
|
||||
if ( dlsym_EVP_CIPHER_CTX_test_flags(context, EVP_CIPH_NO_PADDING) ) {
|
||||
if (max_output_len >= input_len) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
} else {
|
||||
int b = context->cipher->block_size;
|
||||
int b = dlsym_EVP_CIPHER_CTX_block_size(context);
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
if (context->encrypt) {
|
||||
#else
|
||||
if (dlsym_EVP_CIPHER_CTX_encrypting(context)) {
|
||||
#endif
|
||||
if (max_output_len >= input_len + b - 1) {
|
||||
return 1;
|
||||
}
|
||||
|
@ -307,10 +345,10 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_crypto_OpensslCipher_update
|
|||
static int check_doFinal_max_output_len(EVP_CIPHER_CTX *context,
|
||||
int max_output_len)
|
||||
{
|
||||
if (context->flags & EVP_CIPH_NO_PADDING) {
|
||||
if ( dlsym_EVP_CIPHER_CTX_test_flags(context, EVP_CIPH_NO_PADDING) ) {
|
||||
return 1;
|
||||
} else {
|
||||
int b = context->cipher->block_size;
|
||||
int b = dlsym_EVP_CIPHER_CTX_block_size(context);
|
||||
if (max_output_len >= b) {
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -499,7 +499,15 @@
|
|||
name to use for the service when the client wishes to make an RPC call.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.azure.user.agent.prefix</name>
|
||||
<value>unknown</value>
|
||||
<description>
|
||||
WASB passes User-Agent header to the Azure back-end. The default value
|
||||
contains WASB version, Java Runtime version, Azure Client library version,
|
||||
and the value of the configuration option fs.azure.user.agent.prefix.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.uid.cache.secs</name>
|
||||
|
@ -792,6 +800,15 @@
|
|||
(ie client side mount table:).</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.viewfs.rename.strategy</name>
|
||||
<value>SAME_MOUNTPOINT</value>
|
||||
<description>Allowed rename strategy to rename between multiple mountpoints.
|
||||
Allowed values are SAME_MOUNTPOINT,SAME_TARGET_URI_ACROSS_MOUNTPOINT and
|
||||
SAME_FILESYSTEM_ACROSS_MOUNTPOINT.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.AbstractFileSystem.ftp.impl</name>
|
||||
<value>org.apache.hadoop.fs.ftp.FtpFs</value>
|
||||
|
|
|
@ -95,6 +95,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
|||
xmlPropsToSkipCompare.add("nfs3.mountd.port");
|
||||
xmlPropsToSkipCompare.add("nfs3.server.port");
|
||||
xmlPropsToSkipCompare.add("test.fs.s3n.name");
|
||||
xmlPropsToSkipCompare.add("fs.viewfs.rename.strategy");
|
||||
|
||||
// S3N/S3A properties are in a different subtree.
|
||||
// - org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys
|
||||
|
@ -116,6 +117,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
|||
xmlPropsToSkipCompare.add("fs.azure.secure.mode");
|
||||
xmlPropsToSkipCompare.add("fs.azure.authorization");
|
||||
xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
|
||||
xmlPropsToSkipCompare.add("fs.azure.user.agent.prefix");
|
||||
|
||||
// Deprecated properties. These should eventually be removed from the
|
||||
// class.
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.fs.contract;
|
|||
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.LocatedFileStatus;
|
||||
|
@ -715,6 +716,21 @@ public class ContractTestUtils extends Assert {
|
|||
assertIsFile(filename, status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that a file exists and whose {@link FileStatus} entry
|
||||
* declares that this is a file and not a symlink or directory.
|
||||
*
|
||||
* @param fileContext filesystem to resolve path against
|
||||
* @param filename name of the file
|
||||
* @throws IOException IO problems during file operations
|
||||
*/
|
||||
public static void assertIsFile(FileContext fileContext, Path filename)
|
||||
throws IOException {
|
||||
assertPathExists(fileContext, "Expected file", filename);
|
||||
FileStatus status = fileContext.getFileStatus(filename);
|
||||
assertIsFile(filename, status);
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that a file exists and whose {@link FileStatus} entry
|
||||
* declares that this is a file and not a symlink or directory.
|
||||
|
@ -765,6 +781,25 @@ public class ContractTestUtils extends Assert {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that a path exists -but make no assertions as to the
|
||||
* type of that entry.
|
||||
*
|
||||
* @param fileContext fileContext to examine
|
||||
* @param message message to include in the assertion failure message
|
||||
* @param path path in the filesystem
|
||||
* @throws FileNotFoundException raised if the path is missing
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
public static void assertPathExists(FileContext fileContext, String message,
|
||||
Path path) throws IOException {
|
||||
if (!fileContext.util().exists(path)) {
|
||||
//failure, report it
|
||||
throw new FileNotFoundException(
|
||||
message + ": not found " + path + " in " + path.getParent());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that a path does not exist.
|
||||
*
|
||||
|
@ -785,6 +820,25 @@ public class ContractTestUtils extends Assert {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that a path does not exist.
|
||||
*
|
||||
* @param fileContext fileContext to examine
|
||||
* @param message message to include in the assertion failure message
|
||||
* @param path path in the filesystem
|
||||
* @throws IOException IO problems
|
||||
*/
|
||||
public static void assertPathDoesNotExist(FileContext fileContext,
|
||||
String message, Path path) throws IOException {
|
||||
try {
|
||||
FileStatus status = fileContext.getFileStatus(path);
|
||||
fail(message + ": unexpectedly found " + path + " as " + status);
|
||||
} catch (FileNotFoundException expected) {
|
||||
//this is expected
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Assert that a FileSystem.listStatus on a dir finds the subdir/child entry.
|
||||
* @param fs filesystem
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.Trash;
|
||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.AclUtil;
|
||||
|
@ -51,6 +52,7 @@ import org.apache.hadoop.security.AccessControlException;
|
|||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assume;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -366,28 +368,83 @@ abstract public class ViewFileSystemBaseTest {
|
|||
}
|
||||
|
||||
// rename across mount points that point to same target also fail
|
||||
@Test(expected=IOException.class)
|
||||
@Test
|
||||
public void testRenameAcrossMounts1() throws IOException {
|
||||
fileSystemTestHelper.createFile(fsView, "/user/foo");
|
||||
fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
|
||||
/* - code if we had wanted this to succeed
|
||||
Assert.assertFalse(fSys.exists(new Path("/user/foo")));
|
||||
Assert.assertFalse(fSysLocal.exists(new Path(targetTestRoot,"user/foo")));
|
||||
Assert.assertTrue(fSys.isFile(FileSystemTestHelper.getTestRootPath(fSys,"/user2/fooBarBar")));
|
||||
Assert.assertTrue(fSysLocal.isFile(new Path(targetTestRoot,"user/fooBarBar")));
|
||||
*/
|
||||
try {
|
||||
fsView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
|
||||
ContractTestUtils.fail("IOException is not thrown on rename operation");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils
|
||||
.assertExceptionContains("Renames across Mount points not supported",
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// rename across mount points fail if the mount link targets are different
|
||||
// even if the targets are part of the same target FS
|
||||
|
||||
@Test(expected=IOException.class)
|
||||
@Test
|
||||
public void testRenameAcrossMounts2() throws IOException {
|
||||
fileSystemTestHelper.createFile(fsView, "/user/foo");
|
||||
fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
|
||||
try {
|
||||
fsView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
|
||||
ContractTestUtils.fail("IOException is not thrown on rename operation");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils
|
||||
.assertExceptionContains("Renames across Mount points not supported",
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// RenameStrategy SAME_TARGET_URI_ACROSS_MOUNTPOINT enabled
|
||||
// to rename across mount points that point to same target URI
|
||||
@Test
|
||||
public void testRenameAcrossMounts3() throws IOException {
|
||||
Configuration conf2 = new Configuration(conf);
|
||||
conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
|
||||
ViewFileSystem.RenameStrategy.SAME_TARGET_URI_ACROSS_MOUNTPOINT
|
||||
.toString());
|
||||
FileSystem fsView2 = FileSystem.newInstance(FsConstants.VIEWFS_URI, conf2);
|
||||
fileSystemTestHelper.createFile(fsView2, "/user/foo");
|
||||
fsView2.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fsView2, "src should not exist after rename",
|
||||
new Path("/user/foo"));
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fsTarget, "src should not exist after rename",
|
||||
new Path(targetTestRoot, "user/foo"));
|
||||
ContractTestUtils.assertIsFile(fsView2,
|
||||
fileSystemTestHelper.getTestRootPath(fsView2, "/user2/fooBarBar"));
|
||||
ContractTestUtils
|
||||
.assertIsFile(fsTarget, new Path(targetTestRoot, "user/fooBarBar"));
|
||||
}
|
||||
|
||||
// RenameStrategy SAME_FILESYSTEM_ACROSS_MOUNTPOINT enabled
|
||||
// to rename across mount points where the mount link targets are different
|
||||
// but are part of the same target FS
|
||||
@Test
|
||||
public void testRenameAcrossMounts4() throws IOException {
|
||||
Configuration conf2 = new Configuration(conf);
|
||||
conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
|
||||
ViewFileSystem.RenameStrategy.SAME_FILESYSTEM_ACROSS_MOUNTPOINT
|
||||
.toString());
|
||||
FileSystem fsView2 = FileSystem.newInstance(FsConstants.VIEWFS_URI, conf2);
|
||||
fileSystemTestHelper.createFile(fsView2, "/user/foo");
|
||||
fsView2.rename(new Path("/user/foo"), new Path("/data/fooBar"));
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fsView2, "src should not exist after rename",
|
||||
new Path("/user/foo"));
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fsTarget, "src should not exist after rename",
|
||||
new Path(targetTestRoot, "user/foo"));
|
||||
ContractTestUtils.assertIsFile(fsView2,
|
||||
fileSystemTestHelper.getTestRootPath(fsView2, "/data/fooBar"));
|
||||
ContractTestUtils
|
||||
.assertIsFile(fsTarget, new Path(targetTestRoot, "data/fooBar"));
|
||||
}
|
||||
|
||||
static protected boolean SupportsBlocks = false; // local fs use 1 block
|
||||
// override for HDFS
|
||||
@Test
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.fs.FileStatus;
|
|||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||
import org.apache.hadoop.fs.local.LocalConfigKeys;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
|
@ -66,6 +67,7 @@ import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
|
|||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -345,33 +347,93 @@ abstract public class ViewFsBaseTest {
|
|||
}
|
||||
|
||||
// rename across mount points that point to same target also fail
|
||||
@Test(expected=IOException.class)
|
||||
@Test
|
||||
public void testRenameAcrossMounts1() throws IOException {
|
||||
fileContextTestHelper.createFile(fcView, "/user/foo");
|
||||
fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
|
||||
/* - code if we had wanted this to succeed
|
||||
Assert.assertFalse(exists(fc, new Path("/user/foo")));
|
||||
Assert.assertFalse(exists(fclocal, new Path(targetTestRoot,"user/foo")));
|
||||
Assert.assertTrue(isFile(fc,
|
||||
FileContextTestHelper.getTestRootPath(fc,"/user2/fooBarBar")));
|
||||
Assert.assertTrue(isFile(fclocal,
|
||||
new Path(targetTestRoot,"user/fooBarBar")));
|
||||
*/
|
||||
try {
|
||||
fcView.rename(new Path("/user/foo"), new Path("/user2/fooBarBar"));
|
||||
ContractTestUtils.fail("IOException is not thrown on rename operation");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils
|
||||
.assertExceptionContains("Renames across Mount points not supported",
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// rename across mount points fail if the mount link targets are different
|
||||
// even if the targets are part of the same target FS
|
||||
|
||||
@Test(expected=IOException.class)
|
||||
@Test
|
||||
public void testRenameAcrossMounts2() throws IOException {
|
||||
fileContextTestHelper.createFile(fcView, "/user/foo");
|
||||
fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
|
||||
try {
|
||||
fcView.rename(new Path("/user/foo"), new Path("/data/fooBar"));
|
||||
ContractTestUtils.fail("IOException is not thrown on rename operation");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils
|
||||
.assertExceptionContains("Renames across Mount points not supported",
|
||||
e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
// RenameStrategy SAME_TARGET_URI_ACROSS_MOUNTPOINT enabled
|
||||
// to rename across mount points that point to same target URI
|
||||
@Test
|
||||
public void testRenameAcrossMounts3() throws IOException {
|
||||
Configuration conf2 = new Configuration(conf);
|
||||
conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
|
||||
ViewFileSystem.RenameStrategy.SAME_TARGET_URI_ACROSS_MOUNTPOINT
|
||||
.toString());
|
||||
|
||||
FileContext fcView2 =
|
||||
FileContext.getFileContext(FsConstants.VIEWFS_URI, conf2);
|
||||
String user1Path = "/user/foo";
|
||||
fileContextTestHelper.createFile(fcView2, user1Path);
|
||||
String user2Path = "/user2/fooBarBar";
|
||||
Path user2Dst = new Path(user2Path);
|
||||
fcView2.rename(new Path(user1Path), user2Dst);
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fcView2, "src should not exist after rename",
|
||||
new Path(user1Path));
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fcTarget, "src should not exist after rename",
|
||||
new Path(targetTestRoot, "user/foo"));
|
||||
ContractTestUtils.assertIsFile(fcView2,
|
||||
fileContextTestHelper.getTestRootPath(fcView2, user2Path));
|
||||
ContractTestUtils
|
||||
.assertIsFile(fcTarget, new Path(targetTestRoot, "user/fooBarBar"));
|
||||
}
|
||||
|
||||
// RenameStrategy SAME_FILESYSTEM_ACROSS_MOUNTPOINT enabled
|
||||
// to rename across mount points if the mount link targets are different
|
||||
// but are part of the same target FS
|
||||
@Test
|
||||
public void testRenameAcrossMounts4() throws IOException {
|
||||
Configuration conf2 = new Configuration(conf);
|
||||
conf2.set(Constants.CONFIG_VIEWFS_RENAME_STRATEGY,
|
||||
ViewFileSystem.RenameStrategy.SAME_FILESYSTEM_ACROSS_MOUNTPOINT
|
||||
.toString());
|
||||
FileContext fcView2 =
|
||||
FileContext.getFileContext(FsConstants.VIEWFS_URI, conf2);
|
||||
String userPath = "/user/foo";
|
||||
fileContextTestHelper.createFile(fcView2, userPath);
|
||||
String anotherMountPath = "/data/fooBar";
|
||||
Path anotherDst = new Path(anotherMountPath);
|
||||
fcView2.rename(new Path(userPath), anotherDst);
|
||||
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fcView2, "src should not exist after rename",
|
||||
new Path(userPath));
|
||||
ContractTestUtils
|
||||
.assertPathDoesNotExist(fcTarget, "src should not exist after rename",
|
||||
new Path(targetTestRoot, "user/foo"));
|
||||
ContractTestUtils.assertIsFile(fcView2,
|
||||
fileContextTestHelper.getTestRootPath(fcView2, anotherMountPath));
|
||||
ContractTestUtils
|
||||
.assertIsFile(fcView2, new Path(targetTestRoot, "data/fooBar"));
|
||||
}
|
||||
|
||||
static protected boolean SupportsBlocks = false; // local fs use 1 block
|
||||
// override for HDFS
|
||||
@Test
|
||||
|
|
|
@ -39,9 +39,12 @@ import java.io.InputStream;
|
|||
import java.io.OutputStream;
|
||||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Proxy;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
|
@ -76,6 +79,7 @@ import org.apache.hadoop.ipc.Server.Connection;
|
|||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.KerberosInfo;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
|
@ -1484,6 +1488,78 @@ public class TestIPC {
|
|||
Assert.fail("didn't get limit exceeded");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUserBinding() throws Exception {
|
||||
checkUserBinding(false);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testProxyUserBinding() throws Exception {
|
||||
checkUserBinding(true);
|
||||
}
|
||||
|
||||
private void checkUserBinding(boolean asProxy) throws Exception {
|
||||
Socket s;
|
||||
// don't attempt bind with no service host.
|
||||
s = checkConnect(null, asProxy);
|
||||
Mockito.verify(s, Mockito.never()).bind(Mockito.any(SocketAddress.class));
|
||||
|
||||
// don't attempt bind with service host not belonging to this host.
|
||||
s = checkConnect("1.2.3.4", asProxy);
|
||||
Mockito.verify(s, Mockito.never()).bind(Mockito.any(SocketAddress.class));
|
||||
|
||||
// do attempt bind when service host is this host.
|
||||
InetAddress addr = InetAddress.getLocalHost();
|
||||
s = checkConnect(addr.getHostAddress(), asProxy);
|
||||
Mockito.verify(s).bind(new InetSocketAddress(addr, 0));
|
||||
}
|
||||
|
||||
// dummy protocol that claims to support kerberos.
|
||||
@KerberosInfo(serverPrincipal = "server@REALM")
|
||||
private static class TestBindingProtocol {
|
||||
}
|
||||
|
||||
private Socket checkConnect(String addr, boolean asProxy) throws Exception {
|
||||
// create a fake ugi that claims to have kerberos credentials.
|
||||
StringBuilder principal = new StringBuilder();
|
||||
principal.append("client");
|
||||
if (addr != null) {
|
||||
principal.append("/").append(addr);
|
||||
}
|
||||
principal.append("@REALM");
|
||||
UserGroupInformation ugi =
|
||||
spy(UserGroupInformation.createRemoteUser(principal.toString()));
|
||||
Mockito.doReturn(true).when(ugi).hasKerberosCredentials();
|
||||
if (asProxy) {
|
||||
ugi = UserGroupInformation.createProxyUser("proxy", ugi);
|
||||
}
|
||||
|
||||
// create a mock socket that throws on connect.
|
||||
SocketException expectedConnectEx =
|
||||
new SocketException("Expected connect failure");
|
||||
Socket s = Mockito.mock(Socket.class);
|
||||
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
|
||||
Mockito.doReturn(s).when(mockFactory).createSocket();
|
||||
doThrow(expectedConnectEx).when(s).connect(
|
||||
Mockito.any(SocketAddress.class), Mockito.anyInt());
|
||||
|
||||
// do a dummy call and expect it to throw an exception on connect.
|
||||
// tests should verify if/how a bind occurred.
|
||||
try (Client client = new Client(LongWritable.class, conf, mockFactory)) {
|
||||
final InetSocketAddress sockAddr = new InetSocketAddress(0);
|
||||
final LongWritable param = new LongWritable(RANDOM.nextLong());
|
||||
final ConnectionId remoteId = new ConnectionId(
|
||||
sockAddr, TestBindingProtocol.class, ugi, 0,
|
||||
RetryPolicies.TRY_ONCE_THEN_FAIL, conf);
|
||||
client.call(RPC.RpcKind.RPC_BUILTIN, param, remoteId, null);
|
||||
fail("call didn't throw connect exception");
|
||||
} catch (SocketException se) {
|
||||
// ipc layer re-wraps exceptions, so check the cause.
|
||||
Assert.assertSame(expectedConnectEx, se.getCause());
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
private void doIpcVersionTest(
|
||||
byte[] requestData,
|
||||
byte[] expectedResponse) throws IOException {
|
||||
|
|
|
@ -1,132 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.test;
|
||||
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
/**
|
||||
* Helper class to create one-liner stubs, so that instead of: <pre>
|
||||
* SomeType someDescriptiveMock = mock(SomeType.class);
|
||||
* when(someDescriptiveMock.someMethod()).thenReturn(someValue);</pre>
|
||||
* <p>You can now do: <pre>
|
||||
* SomeType someDescriptiveMock = make(stub(SomeType.class)
|
||||
* .returning(someValue).from.someMethod());</pre>
|
||||
*/
|
||||
public class MockitoMaker {
|
||||
|
||||
/**
|
||||
* Create a mock object from a mocked method call.
|
||||
*
|
||||
* @param <T> type of mocked object
|
||||
* @param methodCall for mocked object
|
||||
* @return mocked object
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> T make(Object methodCall) {
|
||||
StubBuilder<T> sb = StubBuilder.current();
|
||||
when(methodCall).thenReturn(sb.firstReturn, sb.laterReturns);
|
||||
return (T) StubBuilder.current().from;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a stub builder of a mocked object.
|
||||
*
|
||||
* @param <T> type of the target object to be mocked
|
||||
* @param target class of the target object to be mocked
|
||||
* @return the stub builder of the mocked object
|
||||
*/
|
||||
public static <T> StubBuilder<T> stub(Class<T> target) {
|
||||
return new StubBuilder<T>(mock(target));
|
||||
}
|
||||
|
||||
/**
|
||||
* Builder class for stubs
|
||||
* @param <T> type of the object to be mocked
|
||||
*/
|
||||
public static class StubBuilder<T> {
|
||||
|
||||
/**
|
||||
* The target mock object
|
||||
*/
|
||||
public final T from;
|
||||
|
||||
// We want to be able to use this even when the tests are run in parallel.
|
||||
@SuppressWarnings("rawtypes")
|
||||
private static final ThreadLocal<StubBuilder> tls =
|
||||
new ThreadLocal<StubBuilder>() {
|
||||
@Override protected StubBuilder initialValue() {
|
||||
return new StubBuilder();
|
||||
}
|
||||
};
|
||||
|
||||
private Object firstReturn = null;
|
||||
private Object[] laterReturns = {};
|
||||
|
||||
/**
|
||||
* Default constructor for the initial stub builder
|
||||
*/
|
||||
public StubBuilder() {
|
||||
this.from = null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a stub builder with a mock instance
|
||||
*
|
||||
* @param mockInstance the mock object
|
||||
*/
|
||||
public StubBuilder(T mockInstance) {
|
||||
tls.set(this);
|
||||
this.from = mockInstance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current stub builder from thread local
|
||||
*
|
||||
* @param <T>
|
||||
* @return the stub builder of the mocked object
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public static <T> StubBuilder<T> current() {
|
||||
return tls.get();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the return value for the current stub builder
|
||||
*
|
||||
* @param value the return value
|
||||
* @return the stub builder
|
||||
*/
|
||||
public StubBuilder<T> returning(Object value) {
|
||||
this.firstReturn = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the return values for the current stub builder
|
||||
*
|
||||
* @param value the first return value
|
||||
* @param values the return values for later invocations
|
||||
* @return the stub builder
|
||||
*/
|
||||
public StubBuilder<T> returning(Object value, Object... values) {
|
||||
this.firstReturn = value;
|
||||
this.laterReturns = values;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -29,7 +29,6 @@ import static org.junit.Assert.*;
|
|||
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
import static org.apache.hadoop.test.MockitoMaker.*;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
|
@ -83,13 +82,14 @@ public class TestDiskChecker {
|
|||
|
||||
private void _mkdirs(boolean exists, FsPermission before, FsPermission after)
|
||||
throws Throwable {
|
||||
File localDir = make(stub(File.class).returning(exists).from.exists());
|
||||
File localDir = mock(File.class);
|
||||
when(localDir.exists()).thenReturn(exists);
|
||||
when(localDir.mkdir()).thenReturn(true);
|
||||
Path dir = mock(Path.class); // use default stubs
|
||||
LocalFileSystem fs = make(stub(LocalFileSystem.class)
|
||||
.returning(localDir).from.pathToFile(dir));
|
||||
FileStatus stat = make(stub(FileStatus.class)
|
||||
.returning(after).from.getPermission());
|
||||
LocalFileSystem fs = mock(LocalFileSystem.class);
|
||||
when(fs.pathToFile(dir)).thenReturn(localDir);
|
||||
FileStatus stat = mock(FileStatus.class);
|
||||
when(stat.getPermission()).thenReturn(after);
|
||||
when(fs.getFileStatus(dir)).thenReturn(stat);
|
||||
|
||||
try {
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
|
||||
/**
|
||||
* Test for FastNumberFormat
|
||||
*/
|
||||
public class TestFastNumberFormat {
|
||||
private final int MIN_DIGITS = 6;
|
||||
|
||||
@Test(timeout = 1000)
|
||||
public void testLongWithPadding() throws Exception {
|
||||
NumberFormat numberFormat = NumberFormat.getInstance();
|
||||
numberFormat.setGroupingUsed(false);
|
||||
numberFormat.setMinimumIntegerDigits(6);
|
||||
long[] testLongs = {1, 23, 456, 7890, 12345, 678901, 2345689, 0, -0, -1,
|
||||
-23, -456, -7890, -12345, -678901, -2345689};
|
||||
for (long l: testLongs) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
FastNumberFormat.format(sb, l, MIN_DIGITS);
|
||||
String fastNumberStr = sb.toString();
|
||||
Assert.assertEquals("Number formats should be equal",
|
||||
numberFormat.format(l), fastNumberStr);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
# HTTPFS temporary directory
|
||||
#
|
||||
# export HTTPFS_TEMP=${HADOOP_HOME}/temp
|
||||
# export HTTPFS_TEMP=${HADOOP_HDFS_HOME}/temp
|
||||
|
||||
# The HTTP port used by HTTPFS
|
||||
#
|
||||
|
|
|
@ -71,8 +71,6 @@ public class HttpFSServerWebServer {
|
|||
HttpFSServerWebServer(Configuration conf, Configuration sslConf) throws
|
||||
Exception {
|
||||
// Override configuration with deprecated environment variables.
|
||||
deprecateEnv("HTTPFS_TEMP", conf, HttpServer2.HTTP_TEMP_DIR_KEY,
|
||||
HTTPFS_SITE_XML);
|
||||
deprecateEnv("HTTPFS_HTTP_HOSTNAME", conf, HTTP_HOSTNAME_KEY,
|
||||
HTTPFS_SITE_XML);
|
||||
deprecateEnv("HTTPFS_HTTP_PORT", conf, HTTP_PORT_KEY,
|
||||
|
|
16
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
Normal file → Executable file
16
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/libexec/shellprofile.d/hadoop-httpfs.sh
Normal file → Executable file
|
@ -37,14 +37,18 @@ function hdfs_subcommand_httpfs
|
|||
# shellcheck disable=SC2034
|
||||
|
||||
hadoop_add_param HADOOP_OPTS "-Dhttpfs.home.dir" \
|
||||
"-Dhttpfs.home.dir=${HADOOP_HOME}"
|
||||
"-Dhttpfs.home.dir=${HTTPFS_HOME:-${HADOOP_HDFS_HOME}}"
|
||||
hadoop_add_param HADOOP_OPTS "-Dhttpfs.config.dir" \
|
||||
"-Dhttpfs.config.dir=${HTTPFS_CONFIG:-${HADOOP_CONF_DIR}}"
|
||||
hadoop_add_param HADOOP_OPTS "-Dhttpfs.log.dir" \
|
||||
"-Dhttpfs.log.dir=${HTTPFS_LOG:-${HADOOP_LOG_DIR}}"
|
||||
|
||||
if [[ "${HADOOP_DAEMON_MODE}" == "default" ]] ||
|
||||
[[ "${HADOOP_DAEMON_MODE}" == "start" ]]; then
|
||||
hadoop_mkdir "${HTTPFS_TEMP:-${HADOOP_HOME}/temp}"
|
||||
fi
|
||||
}
|
||||
local temp_dir=${HTTPFS_TEMP:-${HADOOP_HDFS_HOME}/temp}
|
||||
hadoop_add_param HADOOP_OPTS "-Dhttpfs.temp.dir" \
|
||||
"-Dhttpfs.temp.dir=${temp_dir}"
|
||||
case ${HADOOP_DAEMON_MODE} in
|
||||
start|default)
|
||||
hadoop_mkdir "${temp_dir}"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
|
|
@ -149,7 +149,6 @@ HTTPFS_MAX_THREADS | hadoop.http.max.threads | httpfs-site.xml
|
|||
HTTPFS_SSL_ENABLED | httpfs.ssl.enabled | httpfs-site.xml
|
||||
HTTPFS_SSL_KEYSTORE_FILE | ssl.server.keystore.location | ssl-server.xml
|
||||
HTTPFS_SSL_KEYSTORE_PASS | ssl.server.keystore.password | ssl-server.xml
|
||||
HTTPFS_TEMP | hadoop.http.temp.dir | httpfs-site.xml
|
||||
|
||||
HTTP Default Services
|
||||
---------------------
|
||||
|
|
|
@ -73,6 +73,9 @@ import com.google.common.collect.Maps;
|
|||
import java.util.Properties;
|
||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||
|
||||
/**
|
||||
* Main test class for HttpFSServer.
|
||||
*/
|
||||
public class TestHttpFSServer extends HFSTestCase {
|
||||
|
||||
@Test
|
||||
|
@ -82,15 +85,20 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
String dir = TestDirHelper.getTestDir().getAbsolutePath();
|
||||
|
||||
Configuration httpfsConf = new Configuration(false);
|
||||
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
|
||||
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir,
|
||||
httpfsConf);
|
||||
server.init();
|
||||
server.destroy();
|
||||
}
|
||||
|
||||
public static class MockGroups implements Service,Groups {
|
||||
/**
|
||||
* Mock groups.
|
||||
*/
|
||||
public static class MockGroups implements Service, Groups {
|
||||
|
||||
@Override
|
||||
public void init(org.apache.hadoop.lib.server.Server server) throws ServiceException {
|
||||
public void init(org.apache.hadoop.lib.server.Server server)
|
||||
throws ServiceException {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -112,8 +120,10 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void serverStatusChange(org.apache.hadoop.lib.server.Server.Status oldStatus,
|
||||
org.apache.hadoop.lib.server.Server.Status newStatus) throws ServiceException {
|
||||
public void serverStatusChange(
|
||||
org.apache.hadoop.lib.server.Server.Status oldStatus,
|
||||
org.apache.hadoop.lib.server.Server.Status newStatus)
|
||||
throws ServiceException {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -300,25 +310,30 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
createHttpFSServer(false, false);
|
||||
|
||||
URL url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation", "nobody"));
|
||||
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
|
||||
"nobody"));
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_UNAUTHORIZED);
|
||||
Assert.assertEquals(conn.getResponseCode(),
|
||||
HttpURLConnection.HTTP_UNAUTHORIZED);
|
||||
|
||||
url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
|
||||
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
|
||||
MessageFormat.format("/webhdfs/v1?user.name={0}&op=instrumentation",
|
||||
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
|
||||
BufferedReader reader = new BufferedReader(
|
||||
new InputStreamReader(conn.getInputStream()));
|
||||
String line = reader.readLine();
|
||||
reader.close();
|
||||
Assert.assertTrue(line.contains("\"counters\":{"));
|
||||
|
||||
url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/foo?user.name={0}&op=instrumentation",
|
||||
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
|
||||
MessageFormat.format(
|
||||
"/webhdfs/v1/foo?user.name={0}&op=instrumentation",
|
||||
HadoopUsersConfTestHelper.getHadoopUsers()[0]));
|
||||
conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
Assert.assertEquals(conn.getResponseCode(),
|
||||
HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -330,10 +345,12 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
URL url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user));
|
||||
MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus",
|
||||
user));
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
|
||||
BufferedReader reader = new BufferedReader(
|
||||
new InputStreamReader(conn.getInputStream()));
|
||||
reader.readLine();
|
||||
reader.close();
|
||||
}
|
||||
|
@ -369,10 +386,12 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
URL url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
|
||||
MessageFormat.format(
|
||||
"/webhdfs/v1/tmp?user.name={0}&op=liststatus&filter=f*", user));
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK);
|
||||
BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
|
||||
BufferedReader reader = new BufferedReader(
|
||||
new InputStreamReader(conn.getInputStream()));
|
||||
reader.readLine();
|
||||
reader.close();
|
||||
}
|
||||
|
@ -384,15 +403,14 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
* @param perms The permission field, if any (may be null)
|
||||
* @throws Exception
|
||||
*/
|
||||
private void createWithHttp ( String filename, String perms )
|
||||
throws Exception {
|
||||
private void createWithHttp(String filename, String perms) throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps;
|
||||
if ( perms == null ) {
|
||||
if (perms == null) {
|
||||
pathOps = MessageFormat.format(
|
||||
"/webhdfs/v1/{0}?user.name={1}&op=CREATE",
|
||||
filename, user);
|
||||
|
@ -422,7 +440,7 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps = MessageFormat.format(
|
||||
|
@ -449,7 +467,7 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
String params) throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps = MessageFormat.format(
|
||||
|
@ -471,7 +489,7 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
* @return The value of 'permission' in statusJson
|
||||
* @throws Exception
|
||||
*/
|
||||
private String getPerms ( String statusJson ) throws Exception {
|
||||
private String getPerms(String statusJson) throws Exception {
|
||||
JSONParser parser = new JSONParser();
|
||||
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
|
||||
JSONObject details = (JSONObject) jsonObject.get("FileStatus");
|
||||
|
@ -499,20 +517,20 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
* @return A List of Strings which are the elements of the ACL entries
|
||||
* @throws Exception
|
||||
*/
|
||||
private List<String> getAclEntries ( String statusJson ) throws Exception {
|
||||
private List<String> getAclEntries(String statusJson) throws Exception {
|
||||
List<String> entries = new ArrayList<String>();
|
||||
JSONParser parser = new JSONParser();
|
||||
JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
|
||||
JSONObject details = (JSONObject) jsonObject.get("AclStatus");
|
||||
JSONArray jsonEntries = (JSONArray) details.get("entries");
|
||||
if ( jsonEntries != null ) {
|
||||
if (jsonEntries != null) {
|
||||
for (Object e : jsonEntries) {
|
||||
entries.add(e.toString());
|
||||
}
|
||||
}
|
||||
return entries;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Parse xAttrs from JSON result of GETXATTRS call, return xAttrs Map.
|
||||
* @param statusJson JSON from GETXATTRS
|
||||
|
@ -533,8 +551,8 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
}
|
||||
return xAttrs;
|
||||
}
|
||||
|
||||
/** Decode xattr value from string */
|
||||
|
||||
/** Decode xattr value from string. */
|
||||
private byte[] decodeXAttrValue(String value) throws IOException {
|
||||
if (value != null) {
|
||||
return XAttrCodec.decodeValue(value);
|
||||
|
@ -574,7 +592,7 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
|
||||
Assert.assertTrue("321".equals(getPerms(statusJson)));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Validate XAttr get/set/remove calls.
|
||||
*/
|
||||
|
@ -594,12 +612,12 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
|
||||
FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
|
||||
fs.mkdirs(new Path(dir));
|
||||
|
||||
createWithHttp(path,null);
|
||||
|
||||
createWithHttp(path, null);
|
||||
String statusJson = getStatus(path, "GETXATTRS");
|
||||
Map<String, byte[]> xAttrs = getXAttrs(statusJson);
|
||||
Assert.assertEquals(0, xAttrs.size());
|
||||
|
||||
|
||||
// Set two xattrs
|
||||
putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
|
||||
putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
|
||||
|
@ -608,25 +626,26 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
Assert.assertEquals(2, xAttrs.size());
|
||||
Assert.assertArrayEquals(value1, xAttrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xAttrs.get(name2));
|
||||
|
||||
|
||||
// Remove one xattr
|
||||
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
|
||||
statusJson = getStatus(path, "GETXATTRS");
|
||||
xAttrs = getXAttrs(statusJson);
|
||||
Assert.assertEquals(1, xAttrs.size());
|
||||
Assert.assertArrayEquals(value2, xAttrs.get(name2));
|
||||
|
||||
|
||||
// Remove another xattr, then there is no xattr
|
||||
putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
|
||||
statusJson = getStatus(path, "GETXATTRS");
|
||||
xAttrs = getXAttrs(statusJson);
|
||||
Assert.assertEquals(0, xAttrs.size());
|
||||
}
|
||||
|
||||
/** Params for setting an xAttr */
|
||||
public static String setXAttrParam(String name, byte[] value) throws IOException {
|
||||
|
||||
/** Params for setting an xAttr. */
|
||||
public static String setXAttrParam(String name, byte[] value)
|
||||
throws IOException {
|
||||
return "xattr.name=" + name + "&xattr.value=" + XAttrCodec.encodeValue(
|
||||
value, XAttrCodec.HEX) + "&encoding=hex&flag=create";
|
||||
value, XAttrCodec.HEX) + "&encoding=hex&flag=create";
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -791,7 +810,9 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
URL url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
|
||||
MessageFormat.format(
|
||||
"/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2",
|
||||
user));
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
||||
InputStream is = conn.getInputStream();
|
||||
|
@ -809,12 +830,13 @@ public class TestHttpFSServer extends HFSTestCase {
|
|||
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
URL url = new URL(TestJettyHelper.getJettyURL(),
|
||||
MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
|
||||
MessageFormat.format("/webhdfs/v1/foo?user.name={0}", user));
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
conn.setDoInput(true);
|
||||
conn.setDoOutput(true);
|
||||
conn.setRequestMethod("PUT");
|
||||
Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
Assert.assertEquals(conn.getResponseCode(),
|
||||
HttpURLConnection.HTTP_BAD_REQUEST);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -110,12 +110,12 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
|
||||
// HDFS configuration
|
||||
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
|
||||
if ( !hadoopConfDir.mkdirs() ) {
|
||||
if (!hadoopConfDir.mkdirs()) {
|
||||
throw new IOException();
|
||||
}
|
||||
|
||||
String fsDefaultName =
|
||||
nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
|
||||
nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
|
||||
Configuration conf = new Configuration(false);
|
||||
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
|
||||
|
||||
|
@ -146,7 +146,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL url = cl.getResource("webapp");
|
||||
if ( url == null ) {
|
||||
if (url == null) {
|
||||
throw new IOException();
|
||||
}
|
||||
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
|
||||
|
@ -168,7 +168,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps = MessageFormat.format(
|
||||
|
@ -179,7 +179,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
conn.connect();
|
||||
int resp = conn.getResponseCode();
|
||||
BufferedReader reader;
|
||||
if ( expectOK ) {
|
||||
if (expectOK) {
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
|
||||
reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
|
||||
String res = reader.readLine();
|
||||
|
@ -204,7 +204,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
String params, boolean expectOK) throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps = MessageFormat.format(
|
||||
|
@ -216,7 +216,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
conn.setRequestMethod("PUT");
|
||||
conn.connect();
|
||||
int resp = conn.getResponseCode();
|
||||
if ( expectOK ) {
|
||||
if (expectOK) {
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
|
||||
} else {
|
||||
Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
|
||||
|
@ -229,6 +229,7 @@ public class TestHttpFSServerNoACLs extends HTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Test without ACLs.
|
||||
* Ensure that
|
||||
* <ol>
|
||||
* <li>GETFILESTATUS and LISTSTATUS work happily</li>
|
||||
|
|
|
@ -111,7 +111,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
|
|||
|
||||
// HDFS configuration
|
||||
File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
|
||||
if ( !hadoopConfDir.mkdirs() ) {
|
||||
if (!hadoopConfDir.mkdirs()) {
|
||||
throw new IOException();
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
|
|||
|
||||
ClassLoader cl = Thread.currentThread().getContextClassLoader();
|
||||
URL url = cl.getResource("webapp");
|
||||
if ( url == null ) {
|
||||
if (url == null) {
|
||||
throw new IOException();
|
||||
}
|
||||
WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
|
||||
|
@ -168,7 +168,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
|
|||
throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps = MessageFormat.format(
|
||||
|
@ -197,7 +197,7 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
|
|||
String params) throws Exception {
|
||||
String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
|
||||
// Remove leading / from filename
|
||||
if ( filename.charAt(0) == '/' ) {
|
||||
if (filename.charAt(0) == '/') {
|
||||
filename = filename.substring(1);
|
||||
}
|
||||
String pathOps = MessageFormat.format(
|
||||
|
@ -245,4 +245,4 @@ public class TestHttpFSServerNoXAttrs extends HTestCase {
|
|||
putCmd(path, "SETXATTR", TestHttpFSServer.setXAttrParam(name1, value1));
|
||||
putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
|
@ -121,6 +122,7 @@ public class Dispatcher {
|
|||
|
||||
/** The maximum number of concurrent blocks moves at a datanode */
|
||||
private final int maxConcurrentMovesPerNode;
|
||||
private final int maxMoverThreads;
|
||||
|
||||
private final long getBlocksSize;
|
||||
private final long getBlocksMinBlockSize;
|
||||
|
@ -139,11 +141,13 @@ public class Dispatcher {
|
|||
static class Allocator {
|
||||
private final int max;
|
||||
private int count = 0;
|
||||
private int lotSize = 1;
|
||||
|
||||
Allocator(int max) {
|
||||
this.max = max;
|
||||
}
|
||||
|
||||
/** Allocate specified number of items */
|
||||
synchronized int allocate(int n) {
|
||||
final int remaining = max - count;
|
||||
if (remaining <= 0) {
|
||||
|
@ -155,9 +159,19 @@ public class Dispatcher {
|
|||
}
|
||||
}
|
||||
|
||||
/** Aloocate a single lot of items */
|
||||
int allocate() {
|
||||
return allocate(lotSize);
|
||||
}
|
||||
|
||||
synchronized void reset() {
|
||||
count = 0;
|
||||
}
|
||||
|
||||
/** Set the lot size */
|
||||
synchronized void setLotSize(int lotSize) {
|
||||
this.lotSize = lotSize;
|
||||
}
|
||||
}
|
||||
|
||||
private static class GlobalBlockMap {
|
||||
|
@ -1017,6 +1031,7 @@ public class Dispatcher {
|
|||
this.dispatchExecutor = dispatcherThreads == 0? null
|
||||
: Executors.newFixedThreadPool(dispatcherThreads);
|
||||
this.moverThreadAllocator = new Allocator(moverThreads);
|
||||
this.maxMoverThreads = moverThreads;
|
||||
this.maxConcurrentMovesPerNode = maxConcurrentMovesPerNode;
|
||||
|
||||
this.getBlocksSize = getBlocksSize;
|
||||
|
@ -1116,7 +1131,7 @@ public class Dispatcher {
|
|||
final DDatanode targetDn = p.target.getDDatanode();
|
||||
ExecutorService moveExecutor = targetDn.getMoveExecutor();
|
||||
if (moveExecutor == null) {
|
||||
final int nThreads = moverThreadAllocator.allocate(maxConcurrentMovesPerNode);
|
||||
final int nThreads = moverThreadAllocator.allocate();
|
||||
if (nThreads > 0) {
|
||||
moveExecutor = targetDn.initMoveExecutor(nThreads);
|
||||
}
|
||||
|
@ -1166,6 +1181,25 @@ public class Dispatcher {
|
|||
LOG.debug("Disperse Interval sec = " +
|
||||
concurrentThreads / BALANCER_NUM_RPC_PER_SEC);
|
||||
}
|
||||
|
||||
// Determine the size of each mover thread pool per target
|
||||
int threadsPerTarget = maxMoverThreads/targets.size();
|
||||
if (threadsPerTarget == 0) {
|
||||
// Some scheduled moves will get ignored as some targets won't have
|
||||
// any threads allocated.
|
||||
moverThreadAllocator.setLotSize(1);
|
||||
LOG.warn(DFSConfigKeys.DFS_BALANCER_MOVERTHREADS_KEY + "=" +
|
||||
maxMoverThreads + " is too small for moving blocks to " +
|
||||
targets.size() + " targets. Balancing may be slower.");
|
||||
} else {
|
||||
if (threadsPerTarget > maxConcurrentMovesPerNode) {
|
||||
threadsPerTarget = maxConcurrentMovesPerNode;
|
||||
LOG.info("Limiting threads per target to the specified max.");
|
||||
}
|
||||
moverThreadAllocator.setLotSize(threadsPerTarget);
|
||||
LOG.info("Allocating " + threadsPerTarget + " threads per target.");
|
||||
}
|
||||
|
||||
long dSec = 0;
|
||||
final Iterator<Source> i = sources.iterator();
|
||||
for (int j = 0; j < futures.length; j++) {
|
||||
|
|
|
@ -331,11 +331,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
}
|
||||
|
||||
public void resetBlocks() {
|
||||
setCapacity(0);
|
||||
setRemaining(0);
|
||||
setBlockPoolUsed(0);
|
||||
setDfsUsed(0);
|
||||
setXceiverCount(0);
|
||||
updateStorageStats(this.getStorageReports(), 0L, 0L, 0, 0, null);
|
||||
this.invalidateBlocks.clear();
|
||||
this.volumeFailures = 0;
|
||||
// pendingCached, cached, and pendingUncached are protected by the
|
||||
|
@ -384,6 +380,16 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
public void updateHeartbeatState(StorageReport[] reports, long cacheCapacity,
|
||||
long cacheUsed, int xceiverCount, int volFailures,
|
||||
VolumeFailureSummary volumeFailureSummary) {
|
||||
updateStorageStats(reports, cacheCapacity, cacheUsed, xceiverCount,
|
||||
volFailures, volumeFailureSummary);
|
||||
setLastUpdate(Time.now());
|
||||
setLastUpdateMonotonic(Time.monotonicNow());
|
||||
rollBlocksScheduled(getLastUpdateMonotonic());
|
||||
}
|
||||
|
||||
private void updateStorageStats(StorageReport[] reports, long cacheCapacity,
|
||||
long cacheUsed, int xceiverCount, int volFailures,
|
||||
VolumeFailureSummary volumeFailureSummary) {
|
||||
long totalCapacity = 0;
|
||||
long totalRemaining = 0;
|
||||
long totalBlockPoolUsed = 0;
|
||||
|
@ -434,8 +440,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
setCacheCapacity(cacheCapacity);
|
||||
setCacheUsed(cacheUsed);
|
||||
setXceiverCount(xceiverCount);
|
||||
setLastUpdate(Time.now());
|
||||
setLastUpdateMonotonic(Time.monotonicNow());
|
||||
this.volumeFailures = volFailures;
|
||||
this.volumeFailureSummary = volumeFailureSummary;
|
||||
for (StorageReport report : reports) {
|
||||
|
@ -451,7 +455,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
|||
totalDfsUsed += report.getDfsUsed();
|
||||
totalNonDfsUsed += report.getNonDfsUsed();
|
||||
}
|
||||
rollBlocksScheduled(getLastUpdateMonotonic());
|
||||
|
||||
// Update total metrics for the node.
|
||||
setCapacity(totalCapacity);
|
||||
|
|
|
@ -1332,7 +1332,7 @@ public class DataNode extends ReconfigurableBase
|
|||
|
||||
// used only for testing
|
||||
@VisibleForTesting
|
||||
void setHeartbeatsDisabledForTests(
|
||||
public void setHeartbeatsDisabledForTests(
|
||||
boolean heartbeatsDisabledForTests) {
|
||||
this.heartbeatsDisabledForTests = heartbeatsDisabledForTests;
|
||||
}
|
||||
|
|
|
@ -1424,13 +1424,27 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
minBytesRcvd + ", " + maxBytesRcvd + "].");
|
||||
}
|
||||
|
||||
long bytesOnDisk = rbw.getBytesOnDisk();
|
||||
long blockDataLength = rbw.getReplicaInfo().getBlockDataLength();
|
||||
if (bytesOnDisk != blockDataLength) {
|
||||
LOG.info("Resetting bytesOnDisk to match blockDataLength (={}) for " +
|
||||
"replica {}", blockDataLength, rbw);
|
||||
bytesOnDisk = blockDataLength;
|
||||
rbw.setLastChecksumAndDataLen(bytesOnDisk, null);
|
||||
}
|
||||
|
||||
if (bytesOnDisk < bytesAcked) {
|
||||
throw new ReplicaNotFoundException("Found fewer bytesOnDisk than " +
|
||||
"bytesAcked for replica " + rbw);
|
||||
}
|
||||
|
||||
FsVolumeReference ref = rbw.getReplicaInfo()
|
||||
.getVolume().obtainReference();
|
||||
try {
|
||||
// Truncate the potentially corrupt portion.
|
||||
// If the source was client and the last node in the pipeline was lost,
|
||||
// any corrupt data written after the acked length can go unnoticed.
|
||||
if (numBytes > bytesAcked) {
|
||||
if (bytesOnDisk > bytesAcked) {
|
||||
rbw.getReplicaInfo().truncateBlock(bytesAcked);
|
||||
rbw.setNumBytes(bytesAcked);
|
||||
rbw.setLastChecksumAndDataLen(bytesAcked, null);
|
||||
|
@ -2460,8 +2474,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
|
||||
//check replica bytes on disk.
|
||||
if (replica.getBytesOnDisk() < replica.getVisibleLength()) {
|
||||
throw new IOException("THIS IS NOT SUPPOSED TO HAPPEN:"
|
||||
+ " getBytesOnDisk() < getVisibleLength(), rip=" + replica);
|
||||
throw new IOException("getBytesOnDisk() < getVisibleLength(), rip="
|
||||
+ replica);
|
||||
}
|
||||
|
||||
//check the replica's files
|
||||
|
|
|
@ -209,9 +209,8 @@ public class EncryptionZoneManager {
|
|||
if (!hasCreatedEncryptionZone()) {
|
||||
return null;
|
||||
}
|
||||
List<INode> inodes = iip.getReadOnlyINodes();
|
||||
for (int i = inodes.size() - 1; i >= 0; i--) {
|
||||
final INode inode = inodes.get(i);
|
||||
for (int i = iip.length() - 1; i >= 0; i--) {
|
||||
final INode inode = iip.getINode(i);
|
||||
if (inode != null) {
|
||||
final EncryptionZoneInt ezi = encryptionZones.get(inode.getId());
|
||||
if (ezi != null) {
|
||||
|
|
|
@ -355,9 +355,8 @@ final class FSDirErasureCodingOp {
|
|||
Preconditions.checkNotNull(iip, "INodes cannot be null");
|
||||
fsd.readLock();
|
||||
try {
|
||||
List<INode> inodes = iip.getReadOnlyINodes();
|
||||
for (int i = inodes.size() - 1; i >= 0; i--) {
|
||||
final INode inode = inodes.get(i);
|
||||
for (int i = iip.length() - 1; i >= 0; i--) {
|
||||
final INode inode = iip.getINode(i);
|
||||
if (inode == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -283,12 +283,14 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
|
||||
protected void removeFeature(Feature f) {
|
||||
int size = features.length;
|
||||
Preconditions.checkState(size > 0, "Feature "
|
||||
+ f.getClass().getSimpleName() + " not found.");
|
||||
if (size == 0) {
|
||||
throwFeatureNotFoundException(f);
|
||||
}
|
||||
|
||||
if (size == 1) {
|
||||
Preconditions.checkState(features[0] == f, "Feature "
|
||||
+ f.getClass().getSimpleName() + " not found.");
|
||||
if (features[0] != f) {
|
||||
throwFeatureNotFoundException(f);
|
||||
}
|
||||
features = EMPTY_FEATURE;
|
||||
return;
|
||||
}
|
||||
|
@ -307,14 +309,22 @@ public abstract class INodeWithAdditionalFields extends INode
|
|||
}
|
||||
}
|
||||
|
||||
Preconditions.checkState(!overflow && j == size - 1, "Feature "
|
||||
+ f.getClass().getSimpleName() + " not found.");
|
||||
if (overflow || j != size - 1) {
|
||||
throwFeatureNotFoundException(f);
|
||||
}
|
||||
features = arr;
|
||||
}
|
||||
|
||||
private void throwFeatureNotFoundException(Feature f) {
|
||||
throw new IllegalStateException(
|
||||
"Feature " + f.getClass().getSimpleName() + " not found.");
|
||||
}
|
||||
|
||||
protected <T extends Feature> T getFeature(Class<? extends Feature> clazz) {
|
||||
Preconditions.checkArgument(clazz != null);
|
||||
for (Feature f : features) {
|
||||
final int size = features.length;
|
||||
for (int i=0; i < size; i++) {
|
||||
Feature f = features[i];
|
||||
if (clazz.isAssignableFrom(f.getClass())) {
|
||||
@SuppressWarnings("unchecked")
|
||||
T ret = (T) f;
|
||||
|
|
|
@ -18,9 +18,6 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -336,17 +333,9 @@ public class INodesInPath {
|
|||
* otherwise, i < 0, return the (length + i)-th inode.
|
||||
*/
|
||||
public INode getINode(int i) {
|
||||
if (inodes == null || inodes.length == 0) {
|
||||
throw new NoSuchElementException("inodes is null or empty");
|
||||
}
|
||||
int index = i >= 0 ? i : inodes.length + i;
|
||||
if (index < inodes.length && index >= 0) {
|
||||
return inodes[index];
|
||||
} else {
|
||||
throw new NoSuchElementException("inodes.length == " + inodes.length);
|
||||
}
|
||||
return inodes[(i < 0) ? inodes.length + i : i];
|
||||
}
|
||||
|
||||
|
||||
/** @return the last inode. */
|
||||
public INode getLastINode() {
|
||||
return getINode(-1);
|
||||
|
@ -384,10 +373,6 @@ public class INodesInPath {
|
|||
return inodes.length;
|
||||
}
|
||||
|
||||
public List<INode> getReadOnlyINodes() {
|
||||
return Collections.unmodifiableList(Arrays.asList(inodes));
|
||||
}
|
||||
|
||||
public INode[] getINodesArray() {
|
||||
INode[] retArr = new INode[inodes.length];
|
||||
System.arraycopy(inodes, 0, retArr, 0, inodes.length);
|
||||
|
|
|
@ -520,9 +520,13 @@ public class DFSAdmin extends FsShell {
|
|||
+ " (" + StringUtils.byteDesc(remaining) + ")");
|
||||
System.out.println("DFS Used: " + used
|
||||
+ " (" + StringUtils.byteDesc(used) + ")");
|
||||
double dfsUsedPercent = 0;
|
||||
if (presentCapacity != 0) {
|
||||
dfsUsedPercent = used/(double)presentCapacity;
|
||||
}
|
||||
System.out.println("DFS Used%: "
|
||||
+ StringUtils.formatPercent(used/(double)presentCapacity, 2));
|
||||
|
||||
+ StringUtils.formatPercent(dfsUsedPercent, 2));
|
||||
|
||||
/* These counts are not always upto date. They are updated after
|
||||
* iteration of an internal list. Should be updated in a few seconds to
|
||||
* minutes. Use "-metaSave" to list of all such blocks and accurate
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
|
|||
import org.apache.hadoop.fs.FsConstants;
|
||||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
|
@ -45,6 +46,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
|||
import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
|
||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
|
@ -247,4 +249,24 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
|
|||
Assert.assertTrue("File checksum not matching!",
|
||||
fileChecksumViaViewFs.equals(fileChecksumViaTargetFs));
|
||||
}
|
||||
|
||||
//Rename should fail on across different fileSystems
|
||||
@Test
|
||||
public void testRenameAccorssFilesystem() throws IOException {
|
||||
//data is mountpoint in nn1
|
||||
Path mountDataRootPath = new Path("/data");
|
||||
//mountOnNn2 is nn2 mountpoint
|
||||
Path fsTargetFilePath = new Path("/mountOnNn2");
|
||||
Path filePath = new Path(mountDataRootPath + "/ttest");
|
||||
Path hdfFilepath = new Path(fsTargetFilePath + "/ttest2");
|
||||
fsView.create(filePath);
|
||||
try {
|
||||
fsView.rename(filePath, hdfFilepath);
|
||||
ContractTestUtils.fail("Should thrown IOE on Renames across filesytems");
|
||||
} catch (IOException e) {
|
||||
GenericTestUtils
|
||||
.assertExceptionContains("Renames across Mount points not supported",
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
|||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
|
@ -302,6 +303,15 @@ public class FsDatasetImplTestUtils implements FsDatasetTestUtils {
|
|||
rbw.getBlockFile().createNewFile();
|
||||
rbw.getMetaFile().createNewFile();
|
||||
dataset.volumeMap.add(bpid, rbw);
|
||||
|
||||
FileIoProvider fileIoProvider = rbw.getFileIoProvider();
|
||||
|
||||
try (RandomAccessFile blockRAF = fileIoProvider.getRandomAccessFile(
|
||||
volume, rbw.getBlockFile(), "rw")) {
|
||||
//extend blockFile
|
||||
blockRAF.setLength(eb.getNumBytes());
|
||||
}
|
||||
saveMetaFileHeader(rbw.getMetaFile());
|
||||
return rbw;
|
||||
}
|
||||
|
||||
|
|
|
@ -19,8 +19,10 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
@ -36,12 +38,14 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipeline;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
|
||||
import org.junit.Assert;
|
||||
|
@ -154,7 +158,7 @@ public class TestWriteToReplica {
|
|||
|
||||
ExtendedBlock[] blocks = new ExtendedBlock[] {
|
||||
new ExtendedBlock(bpid, 1, 1, 2001), new ExtendedBlock(bpid, 2, 1, 2002),
|
||||
new ExtendedBlock(bpid, 3, 1, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
|
||||
new ExtendedBlock(bpid, 3, 2, 2003), new ExtendedBlock(bpid, 4, 1, 2004),
|
||||
new ExtendedBlock(bpid, 5, 1, 2005), new ExtendedBlock(bpid, 6, 1, 2006)
|
||||
};
|
||||
|
||||
|
@ -552,7 +556,52 @@ public class TestWriteToReplica {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test that we can successfully recover a {@link ReplicaBeingWritten}
|
||||
* which has inconsistent metadata (bytes were written to disk but bytesOnDisk
|
||||
* was not updated) but that recovery fails when the block is actually
|
||||
* corrupt (bytes are not present on disk).
|
||||
*/
|
||||
@Test
|
||||
public void testRecoverInconsistentRbw() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
cluster.waitActive();
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
FsDatasetImpl fsDataset = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
|
||||
|
||||
// set up replicasMap
|
||||
String bpid = cluster.getNamesystem().getBlockPoolId();
|
||||
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
|
||||
|
||||
ReplicaBeingWritten rbw = (ReplicaBeingWritten)fsDataset.
|
||||
getReplicaInfo(bpid, blocks[RBW].getBlockId());
|
||||
long bytesOnDisk = rbw.getBytesOnDisk();
|
||||
// simulate an inconsistent replica length update by reducing in-memory
|
||||
// value of on disk length
|
||||
rbw.setLastChecksumAndDataLen(bytesOnDisk - 1, null);
|
||||
fsDataset.recoverRbw(blocks[RBW], blocks[RBW].getGenerationStamp(), 0L,
|
||||
rbw.getNumBytes());
|
||||
// after the recovery, on disk length should equal acknowledged length.
|
||||
Assert.assertTrue(rbw.getBytesOnDisk() == rbw.getBytesAcked());
|
||||
|
||||
// reduce on disk length again; this time actually truncate the file to
|
||||
// simulate the data not being present
|
||||
rbw.setLastChecksumAndDataLen(bytesOnDisk - 1, null);
|
||||
try (RandomAccessFile blockRAF = rbw.getFileIoProvider().
|
||||
getRandomAccessFile(rbw.getVolume(), rbw.getBlockFile(), "rw")) {
|
||||
// truncate blockFile
|
||||
blockRAF.setLength(bytesOnDisk - 1);
|
||||
fsDataset.recoverRbw(blocks[RBW], blocks[RBW].getGenerationStamp(), 0L,
|
||||
rbw.getNumBytes());
|
||||
fail("recovery should have failed");
|
||||
} catch (ReplicaNotFoundException rnfe) {
|
||||
GenericTestUtils.assertExceptionContains("Found fewer bytesOnDisk than " +
|
||||
"bytesAcked for replica", rnfe);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare the replica map before and after the restart
|
||||
**/
|
||||
|
|
|
@ -17,9 +17,11 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -36,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockType;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
|
@ -52,6 +55,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
|
|||
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
|
||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -178,4 +182,53 @@ public class TestDeadDatanode {
|
|||
.getDatanodeDescriptor().equals(clientNode));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testNonDFSUsedONDeadNodeReReg() throws Exception {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
|
||||
6 * 1000);
|
||||
long CAPACITY = 5000L;
|
||||
long[] capacities = new long[] { 4 * CAPACITY, 4 * CAPACITY };
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
|
||||
.simulatedCapacities(capacities).build();
|
||||
long initialCapacity = cluster.getNamesystem(0).getCapacityTotal();
|
||||
assertTrue(initialCapacity > 0);
|
||||
DataNode dn1 = cluster.getDataNodes().get(0);
|
||||
DataNode dn2 = cluster.getDataNodes().get(1);
|
||||
final DatanodeDescriptor dn2Desc = cluster.getNamesystem(0)
|
||||
.getBlockManager().getDatanodeManager()
|
||||
.getDatanode(dn2.getDatanodeId());
|
||||
dn1.setHeartbeatsDisabledForTests(true);
|
||||
cluster.setDataNodeDead(dn1.getDatanodeId());
|
||||
assertEquals("Capacity shouldn't include DeadNode", dn2Desc.getCapacity(),
|
||||
cluster.getNamesystem(0).getCapacityTotal());
|
||||
assertEquals("NonDFS-used shouldn't include DeadNode",
|
||||
dn2Desc.getNonDfsUsed(),
|
||||
cluster.getNamesystem(0).getNonDfsUsedSpace());
|
||||
// Wait for re-registration and heartbeat
|
||||
dn1.setHeartbeatsDisabledForTests(false);
|
||||
final DatanodeDescriptor dn1Desc = cluster.getNamesystem(0)
|
||||
.getBlockManager().getDatanodeManager()
|
||||
.getDatanode(dn1.getDatanodeId());
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
|
||||
@Override public Boolean get() {
|
||||
return dn1Desc.isAlive() && dn1Desc.isHeartbeatedSinceRegistration();
|
||||
}
|
||||
}, 100, 5000);
|
||||
assertEquals("Capacity should be 0 after all DNs dead", initialCapacity,
|
||||
cluster.getNamesystem(0).getCapacityTotal());
|
||||
long nonDfsAfterReg = cluster.getNamesystem(0).getNonDfsUsedSpace();
|
||||
assertEquals("NonDFS should include actual DN NonDFSUsed",
|
||||
dn1Desc.getNonDfsUsed() + dn2Desc.getNonDfsUsed(), nonDfsAfterReg);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -310,9 +310,8 @@ public class TestSnapshotPathINodes {
|
|||
}
|
||||
|
||||
private int getNumNonNull(INodesInPath iip) {
|
||||
List<INode> inodes = iip.getReadOnlyINodes();
|
||||
for (int i = inodes.size() - 1; i >= 0; i--) {
|
||||
if (inodes.get(i) != null) {
|
||||
for (int i = iip.length() - 1; i >= 0; i--) {
|
||||
if (iip.getINode(i) != null) {
|
||||
return i+1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7203,7 +7203,130 @@
|
|||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
|
||||
<test> <!-- TESTED -->
|
||||
<description>stat: Test for hdfs:// path - user/group name for directory</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir hdfs:///dirtest</command>
|
||||
<command>-fs NAMENODE -chown hadoop:hadoopgrp hdfs:///dirtest</command>
|
||||
<command>-fs NAMENODE -stat "%u-%g" hdfs:///dirtest</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -r hdfs:///dirtest</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hadoop-hadoopgrp</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test> <!-- TESTED -->
|
||||
<description>stat: Test for hdfs:// path - user/group name for file</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///data60bytes</command>
|
||||
<command>-fs NAMENODE -chown hadoop:hadoopgrp hdfs:////data60bytes</command>
|
||||
<command>-fs NAMENODE -stat "%u-%g" hdfs:////data60bytes</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -r hdfs:///data60bytes</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hadoop-hadoopgrp</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test> <!-- TESTED -->
|
||||
<description>stat: Test for hdfs:// path - user/group name for multiple files</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///data60bytes</command>
|
||||
<command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///data30bytes</command>
|
||||
<command>-fs NAMENODE -chown hadoop:hadoopgrp hdfs:///data60bytes</command>
|
||||
<command>-fs NAMENODE -chown hdfs:hdfs hdfs:///data30bytes</command>
|
||||
<command>-fs NAMENODE -stat "%u-%g" hdfs:///data*</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -r hdfs:///data60bytes</command>
|
||||
<command>-fs NAMENODE -rm -r hdfs:////data30bytes</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hadoop-hadoopgrp</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hdfs-hdfs</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test> <!-- TESTED -->
|
||||
<description>stat: Test for Namenode's path - user/group name for directory</description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /dir0</command>
|
||||
<command>-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/</command>
|
||||
<command>-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -r NAMENODE/dir0</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hadoop-hadoopgrp</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test> <!-- TESTED -->
|
||||
<description>stat: Test for Namenode's path - user/group name for file </description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /dir0</command>
|
||||
<command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
|
||||
<command>-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/data15bytes</command>
|
||||
<command>-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/data15bytes</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -r NAMENODE/dir0</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hadoop-hadoopgrp</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<test> <!-- TESTED -->
|
||||
<description>stat: Test for Namenode's path - user/group name for multiple files </description>
|
||||
<test-commands>
|
||||
<command>-fs NAMENODE -mkdir /dir0</command>
|
||||
<command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
|
||||
<command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/data30bytes</command>
|
||||
<command>-fs NAMENODE -chown hadoop:hadoopgrp NAMENODE/dir0/data15bytes</command>
|
||||
<command>-fs NAMENODE -chown hdfs:hdfs NAMENODE/dir0/data30bytes</command>
|
||||
<command>-fs NAMENODE -stat "%u-%g" NAMENODE/dir0/data*</command>
|
||||
</test-commands>
|
||||
<cleanup-commands>
|
||||
<command>-fs NAMENODE -rm -r NAMENODE/dir0</command>
|
||||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hadoop-hadoopgrp</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>TokenComparator</type>
|
||||
<expected-output>hdfs-hdfs</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
</test>
|
||||
|
||||
<!-- Tests for tail -->
|
||||
<test> <!-- TESTED -->
|
||||
<description>tail: contents of file(absolute path)</description>
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
|||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
|
||||
import static org.apache.hadoop.test.MetricsAsserts.*;
|
||||
import static org.apache.hadoop.test.MockitoMaker.*;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -33,10 +32,10 @@ public class TestMRAppMetrics {
|
|||
|
||||
@Test public void testNames() {
|
||||
Job job = mock(Job.class);
|
||||
Task mapTask = make(stub(Task.class).returning(TaskType.MAP).
|
||||
from.getType());
|
||||
Task reduceTask = make(stub(Task.class).returning(TaskType.REDUCE).
|
||||
from.getType());
|
||||
Task mapTask = mock(Task.class);
|
||||
when(mapTask.getType()).thenReturn(TaskType.MAP);
|
||||
Task reduceTask = mock(Task.class);
|
||||
when(reduceTask.getType()).thenReturn(TaskType.REDUCE);
|
||||
MRAppMetrics metrics = MRAppMetrics.create();
|
||||
|
||||
metrics.submittedJob(job);
|
||||
|
|
|
@ -2414,10 +2414,7 @@ public class TestRMContainerAllocator {
|
|||
conf.setInt(
|
||||
MRJobConfig.MR_AM_IGNORE_BLACKLISTING_BLACKLISTED_NODE_PERECENT, -1);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MyResourceManager rm1 = new MyResourceManager(conf, memStore);
|
||||
MyResourceManager rm1 = new MyResourceManager(conf);
|
||||
rm1.start();
|
||||
|
||||
// Submit the application
|
||||
|
@ -2504,7 +2501,7 @@ public class TestRMContainerAllocator {
|
|||
assertBlacklistAdditionsAndRemovals(0, 0, rm1);
|
||||
|
||||
// Phase-2 start 2nd RM is up
|
||||
MyResourceManager rm2 = new MyResourceManager(conf, memStore);
|
||||
MyResourceManager rm2 = new MyResourceManager(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
allocator.updateSchedulerProxy(rm2);
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.junit.Test;
|
|||
import static org.junit.Assert.*;
|
||||
|
||||
import static org.mockito.Mockito.*;
|
||||
import static org.apache.hadoop.test.MockitoMaker.*;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
|
@ -80,12 +79,14 @@ public class TestMRCJCFileInputFormat {
|
|||
@Test
|
||||
public void testNumInputFiles() throws Exception {
|
||||
Configuration conf = spy(new Configuration());
|
||||
Job job = make(stub(Job.class).returning(conf).from.getConfiguration());
|
||||
FileStatus stat = make(stub(FileStatus.class).returning(0L).from.getLen());
|
||||
Job mockedJob = mock(Job.class);
|
||||
when(mockedJob.getConfiguration()).thenReturn(conf);
|
||||
FileStatus stat = mock(FileStatus.class);
|
||||
when(stat.getLen()).thenReturn(0L);
|
||||
TextInputFormat ispy = spy(new TextInputFormat());
|
||||
doReturn(Arrays.asList(stat)).when(ispy).listStatus(job);
|
||||
doReturn(Arrays.asList(stat)).when(ispy).listStatus(mockedJob);
|
||||
|
||||
ispy.getSplits(job);
|
||||
ispy.getSplits(mockedJob);
|
||||
verify(conf).setLong(FileInputFormat.NUM_INPUT_FILES, 1);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,14 +20,14 @@ package org.apache.hadoop.mapred;
|
|||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.apache.hadoop.test.MockitoMaker.make;
|
||||
import static org.apache.hadoop.test.MockitoMaker.stub;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
|
||||
import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
|
||||
import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.EOFException;
|
||||
|
@ -188,8 +188,8 @@ public class TestShuffleHandler {
|
|||
public void testShuffleMetrics() throws Exception {
|
||||
MetricsSystem ms = new MetricsSystemImpl();
|
||||
ShuffleHandler sh = new ShuffleHandler(ms);
|
||||
ChannelFuture cf = make(stub(ChannelFuture.class).
|
||||
returning(true, false).from.isSuccess());
|
||||
ChannelFuture cf = mock(ChannelFuture.class);
|
||||
when(cf.isSuccess()).thenReturn(true).thenReturn(false);
|
||||
|
||||
sh.metrics.shuffleConnections.incr();
|
||||
sh.metrics.shuffleOutputBytes.incr(1*MiB);
|
||||
|
@ -1080,10 +1080,10 @@ public class TestShuffleHandler {
|
|||
new ArrayList<ShuffleHandler.ReduceMapFileCount>();
|
||||
|
||||
final ChannelHandlerContext mockCtx =
|
||||
Mockito.mock(ChannelHandlerContext.class);
|
||||
final MessageEvent mockEvt = Mockito.mock(MessageEvent.class);
|
||||
final Channel mockCh = Mockito.mock(AbstractChannel.class);
|
||||
final ChannelPipeline mockPipeline = Mockito.mock(ChannelPipeline.class);
|
||||
mock(ChannelHandlerContext.class);
|
||||
final MessageEvent mockEvt = mock(MessageEvent.class);
|
||||
final Channel mockCh = mock(AbstractChannel.class);
|
||||
final ChannelPipeline mockPipeline = mock(ChannelPipeline.class);
|
||||
|
||||
// Mock HttpRequest and ChannelFuture
|
||||
final HttpRequest mockHttpRequest = createMockHttpRequest();
|
||||
|
@ -1094,16 +1094,16 @@ public class TestShuffleHandler {
|
|||
|
||||
// Mock Netty Channel Context and Channel behavior
|
||||
Mockito.doReturn(mockCh).when(mockCtx).getChannel();
|
||||
Mockito.when(mockCh.getPipeline()).thenReturn(mockPipeline);
|
||||
Mockito.when(mockPipeline.get(
|
||||
when(mockCh.getPipeline()).thenReturn(mockPipeline);
|
||||
when(mockPipeline.get(
|
||||
Mockito.any(String.class))).thenReturn(timerHandler);
|
||||
Mockito.when(mockCtx.getChannel()).thenReturn(mockCh);
|
||||
when(mockCtx.getChannel()).thenReturn(mockCh);
|
||||
Mockito.doReturn(mockFuture).when(mockCh).write(Mockito.any(Object.class));
|
||||
Mockito.when(mockCh.write(Object.class)).thenReturn(mockFuture);
|
||||
when(mockCh.write(Object.class)).thenReturn(mockFuture);
|
||||
|
||||
//Mock MessageEvent behavior
|
||||
Mockito.doReturn(mockCh).when(mockEvt).getChannel();
|
||||
Mockito.when(mockEvt.getChannel()).thenReturn(mockCh);
|
||||
when(mockEvt.getChannel()).thenReturn(mockCh);
|
||||
Mockito.doReturn(mockHttpRequest).when(mockEvt).getMessage();
|
||||
|
||||
final ShuffleHandler sh = new MockShuffleHandler();
|
||||
|
@ -1127,8 +1127,8 @@ public class TestShuffleHandler {
|
|||
|
||||
public ChannelFuture createMockChannelFuture(Channel mockCh,
|
||||
final List<ShuffleHandler.ReduceMapFileCount> listenerList) {
|
||||
final ChannelFuture mockFuture = Mockito.mock(ChannelFuture.class);
|
||||
Mockito.when(mockFuture.getChannel()).thenReturn(mockCh);
|
||||
final ChannelFuture mockFuture = mock(ChannelFuture.class);
|
||||
when(mockFuture.getChannel()).thenReturn(mockCh);
|
||||
Mockito.doReturn(true).when(mockFuture).isSuccess();
|
||||
Mockito.doAnswer(new Answer() {
|
||||
@Override
|
||||
|
@ -1146,7 +1146,7 @@ public class TestShuffleHandler {
|
|||
}
|
||||
|
||||
public HttpRequest createMockHttpRequest() {
|
||||
HttpRequest mockHttpRequest = Mockito.mock(HttpRequest.class);
|
||||
HttpRequest mockHttpRequest = mock(HttpRequest.class);
|
||||
Mockito.doReturn(HttpMethod.GET).when(mockHttpRequest).getMethod();
|
||||
Mockito.doAnswer(new Answer() {
|
||||
@Override
|
||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.fs.azure.metrics.ResponseReceivedMetricUpdater;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.eclipse.jetty.util.ajax.JSON;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -71,6 +72,10 @@ import com.microsoft.azure.storage.StorageCredentialsAccountAndKey;
|
|||
import com.microsoft.azure.storage.StorageCredentialsSharedAccessSignature;
|
||||
import com.microsoft.azure.storage.StorageErrorCode;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import com.microsoft.azure.storage.Constants;
|
||||
import com.microsoft.azure.storage.StorageEvent;
|
||||
import com.microsoft.azure.storage.core.BaseRequest;
|
||||
import com.microsoft.azure.storage.SendingRequestEvent;
|
||||
import com.microsoft.azure.storage.blob.BlobListingDetails;
|
||||
import com.microsoft.azure.storage.blob.BlobProperties;
|
||||
import com.microsoft.azure.storage.blob.BlobRequestOptions;
|
||||
|
@ -83,13 +88,13 @@ import com.microsoft.azure.storage.core.Utility;
|
|||
|
||||
/**
|
||||
* Core implementation of Windows Azure Filesystem for Hadoop.
|
||||
* Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage
|
||||
* Provides the bridging logic between Hadoop's abstract filesystem and Azure Storage
|
||||
*
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@VisibleForTesting
|
||||
public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
||||
|
||||
|
||||
/**
|
||||
* Configuration knob on whether we do block-level MD5 validation on
|
||||
* upload/download.
|
||||
|
@ -102,6 +107,12 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
static final String DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME = "storageemulator";
|
||||
static final String STORAGE_EMULATOR_ACCOUNT_NAME_PROPERTY_NAME = "fs.azure.storage.emulator.account.name";
|
||||
|
||||
/**
|
||||
* Configuration for User-Agent field.
|
||||
*/
|
||||
static final String USER_AGENT_ID_KEY = "fs.azure.user.agent.prefix";
|
||||
static final String USER_AGENT_ID_DEFAULT = "unknown";
|
||||
|
||||
public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class);
|
||||
|
||||
private StorageInterface storageInteractionLayer;
|
||||
|
@ -133,15 +144,15 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
private static final String KEY_MAX_BACKOFF_INTERVAL = "fs.azure.io.retry.max.backoff.interval";
|
||||
private static final String KEY_BACKOFF_INTERVAL = "fs.azure.io.retry.backoff.interval";
|
||||
private static final String KEY_MAX_IO_RETRIES = "fs.azure.io.retry.max.retries";
|
||||
|
||||
private static final String KEY_COPYBLOB_MIN_BACKOFF_INTERVAL =
|
||||
|
||||
private static final String KEY_COPYBLOB_MIN_BACKOFF_INTERVAL =
|
||||
"fs.azure.io.copyblob.retry.min.backoff.interval";
|
||||
private static final String KEY_COPYBLOB_MAX_BACKOFF_INTERVAL =
|
||||
private static final String KEY_COPYBLOB_MAX_BACKOFF_INTERVAL =
|
||||
"fs.azure.io.copyblob.retry.max.backoff.interval";
|
||||
private static final String KEY_COPYBLOB_BACKOFF_INTERVAL =
|
||||
private static final String KEY_COPYBLOB_BACKOFF_INTERVAL =
|
||||
"fs.azure.io.copyblob.retry.backoff.interval";
|
||||
private static final String KEY_COPYBLOB_MAX_IO_RETRIES =
|
||||
"fs.azure.io.copyblob.retry.max.retries";
|
||||
private static final String KEY_COPYBLOB_MAX_IO_RETRIES =
|
||||
"fs.azure.io.copyblob.retry.max.retries";
|
||||
|
||||
private static final String KEY_SELF_THROTTLE_ENABLE = "fs.azure.selfthrottling.enable";
|
||||
private static final String KEY_SELF_THROTTLE_READ_FACTOR = "fs.azure.selfthrottling.read.factor";
|
||||
|
@ -188,7 +199,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* The set of directories where we should store files as page blobs.
|
||||
*/
|
||||
private Set<String> pageBlobDirs;
|
||||
|
||||
|
||||
/**
|
||||
* Configuration key to indicate the set of directories in WASB where
|
||||
* we should do atomic folder rename synchronized with createNonRecursive.
|
||||
|
@ -232,11 +243,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
private static final int DEFAULT_MAX_BACKOFF_INTERVAL = 30 * 1000; // 30s
|
||||
private static final int DEFAULT_BACKOFF_INTERVAL = 1 * 1000; // 1s
|
||||
private static final int DEFAULT_MAX_RETRY_ATTEMPTS = 15;
|
||||
|
||||
|
||||
private static final int DEFAULT_COPYBLOB_MIN_BACKOFF_INTERVAL = 3 * 1000;
|
||||
private static final int DEFAULT_COPYBLOB_MAX_BACKOFF_INTERVAL = 90 * 1000;
|
||||
private static final int DEFAULT_COPYBLOB_BACKOFF_INTERVAL = 30 * 1000;
|
||||
private static final int DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS = 15;
|
||||
private static final int DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS = 15;
|
||||
|
||||
// Self-throttling defaults. Allowed range = (0,1.0]
|
||||
// Value of 1.0 means no self-throttling.
|
||||
|
@ -306,6 +317,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
private boolean useSecureMode = false;
|
||||
private boolean useLocalSasKeyMode = false;
|
||||
|
||||
// User-Agent
|
||||
private String userAgentId;
|
||||
|
||||
private String delegationToken;
|
||||
|
||||
/** The error message template when container is not accessible. */
|
||||
|
@ -319,7 +333,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* A test hook interface that can modify the operation context we use for
|
||||
* Azure Storage operations, e.g. to inject errors.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
@VisibleForTesting
|
||||
interface TestHookOperationContext {
|
||||
OperationContext modifyOperationContext(OperationContext original);
|
||||
}
|
||||
|
@ -336,11 +350,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
/**
|
||||
* Add a test hook to modify the operation context we use for Azure Storage
|
||||
* operations.
|
||||
*
|
||||
*
|
||||
* @param testHook
|
||||
* The test hook, or null to unset previous hooks.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
@VisibleForTesting
|
||||
void addTestHookToOperationContext(TestHookOperationContext testHook) {
|
||||
this.testHookOperationContext = testHook;
|
||||
}
|
||||
|
@ -358,7 +372,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
/**
|
||||
* Creates a JSON serializer that can serialize a PermissionStatus object into
|
||||
* the JSON string we want in the blob metadata.
|
||||
*
|
||||
*
|
||||
* @return The JSON serializer.
|
||||
*/
|
||||
private static JSON createPermissionJsonSerializer() {
|
||||
|
@ -425,7 +439,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
/**
|
||||
* Check if concurrent reads and writes on the same blob are allowed.
|
||||
*
|
||||
*
|
||||
* @return true if concurrent reads and OOB writes has been configured, false
|
||||
* otherwise.
|
||||
*/
|
||||
|
@ -437,11 +451,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* Method for the URI and configuration object necessary to create a storage
|
||||
* session with an Azure session. It parses the scheme to ensure it matches
|
||||
* the storage protocol supported by this file system.
|
||||
*
|
||||
*
|
||||
* @param uri - URI for target storage blob.
|
||||
* @param conf - reference to configuration object.
|
||||
* @param instrumentation - the metrics source that will keep track of operations here.
|
||||
*
|
||||
*
|
||||
* @throws IllegalArgumentException if URI or job object is null, or invalid scheme.
|
||||
*/
|
||||
@Override
|
||||
|
@ -504,6 +518,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
pageBlobDirs = getDirectorySet(KEY_PAGE_BLOB_DIRECTORIES);
|
||||
LOG.debug("Page blob directories: {}", setToString(pageBlobDirs));
|
||||
|
||||
// User-agent
|
||||
userAgentId = conf.get(USER_AGENT_ID_KEY, USER_AGENT_ID_DEFAULT);
|
||||
|
||||
// Extract directories that should have atomic rename applied.
|
||||
atomicRenameDirs = getDirectorySet(KEY_ATOMIC_RENAME_DIRECTORIES);
|
||||
String hbaseRoot;
|
||||
|
@ -539,7 +556,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
/**
|
||||
* Method to extract the account name from an Azure URI.
|
||||
*
|
||||
*
|
||||
* @param uri
|
||||
* -- WASB blob URI
|
||||
* @returns accountName -- the account name for the URI.
|
||||
|
@ -590,7 +607,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
/**
|
||||
* Method to extract the container name from an Azure URI.
|
||||
*
|
||||
*
|
||||
* @param uri
|
||||
* -- WASB blob URI
|
||||
* @returns containerName -- the container name for the URI. May be null.
|
||||
|
@ -641,7 +658,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
/**
|
||||
* Get the appropriate return the appropriate scheme for communicating with
|
||||
* Azure depending on whether wasb or wasbs is specified in the target URI.
|
||||
*
|
||||
*
|
||||
* return scheme - HTTPS or HTTP as appropriate.
|
||||
*/
|
||||
private String getHTTPScheme() {
|
||||
|
@ -663,7 +680,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
/**
|
||||
* Set the configuration parameters for this client storage session with
|
||||
* Azure.
|
||||
*
|
||||
*
|
||||
* @throws AzureException
|
||||
*/
|
||||
private void configureAzureStorageSession() throws AzureException {
|
||||
|
@ -763,10 +780,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
/**
|
||||
* Connect to Azure storage using anonymous credentials.
|
||||
*
|
||||
*
|
||||
* @param uri
|
||||
* - URI to target blob (R/O access to public blob)
|
||||
*
|
||||
*
|
||||
* @throws StorageException
|
||||
* raised on errors communicating with Azure storage.
|
||||
* @throws IOException
|
||||
|
@ -893,7 +910,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
STORAGE_EMULATOR_ACCOUNT_NAME_PROPERTY_NAME,
|
||||
DEFAULT_STORAGE_EMULATOR_ACCOUNT_NAME));
|
||||
}
|
||||
|
||||
|
||||
@VisibleForTesting
|
||||
public static String getAccountKeyFromConfiguration(String accountName,
|
||||
Configuration conf) throws KeyProviderException {
|
||||
|
@ -930,7 +947,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* Establish a session with Azure blob storage based on the target URI. The
|
||||
* method determines whether or not the URI target contains an explicit
|
||||
* account or an implicit default cluster-wide account.
|
||||
*
|
||||
*
|
||||
* @throws AzureException
|
||||
* @throws IOException
|
||||
*/
|
||||
|
@ -983,7 +1000,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
instrumentation.setAccountName(accountName);
|
||||
String containerName = getContainerFromAuthority(sessionUri);
|
||||
instrumentation.setContainerName(containerName);
|
||||
|
||||
|
||||
// Check whether this is a storage emulator account.
|
||||
if (isStorageEmulatorAccount(accountName)) {
|
||||
// It is an emulator account, connect to it with no credentials.
|
||||
|
@ -1086,7 +1103,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
*/
|
||||
private String verifyAndConvertToStandardFormat(String rawDir) throws URISyntaxException {
|
||||
URI asUri = new URI(rawDir);
|
||||
if (asUri.getAuthority() == null
|
||||
if (asUri.getAuthority() == null
|
||||
|| asUri.getAuthority().toLowerCase(Locale.ENGLISH).equalsIgnoreCase(
|
||||
sessionUri.getAuthority().toLowerCase(Locale.ENGLISH))) {
|
||||
// Applies to me.
|
||||
|
@ -1167,8 +1184,8 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This should be called from any method that does any modifications to the
|
||||
* underlying container: it makes sure to put the WASB current version in the
|
||||
|
@ -1364,11 +1381,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* could mean either:
|
||||
* (1) container=mycontainer; blob=myblob.txt
|
||||
* (2) container=$root; blob=mycontainer/myblob.txt
|
||||
*
|
||||
*
|
||||
* To avoid this type of ambiguity the Azure blob storage prevents
|
||||
* arbitrary path under $root. For a simple and more consistent user
|
||||
* experience it was decided to eliminate the opportunity for creating
|
||||
* such paths by making the $root container read-only under WASB.
|
||||
* such paths by making the $root container read-only under WASB.
|
||||
*/
|
||||
|
||||
// Check that no attempt is made to write to blobs on default
|
||||
|
@ -1445,7 +1462,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
/**
|
||||
* Default permission to use when no permission metadata is found.
|
||||
*
|
||||
*
|
||||
* @return The default permission to use.
|
||||
*/
|
||||
private static PermissionStatus defaultPermissionNoBlobMetadata() {
|
||||
|
@ -1688,7 +1705,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
|
||||
/**
|
||||
* Private method to check for authenticated access.
|
||||
*
|
||||
*
|
||||
* @ returns boolean -- true if access is credentialed and authenticated and
|
||||
* false otherwise.
|
||||
*/
|
||||
|
@ -1708,7 +1725,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* original file system object was constructed with a short- or long-form URI.
|
||||
* If the root directory is non-null the URI in the file constructor was in
|
||||
* the long form.
|
||||
*
|
||||
*
|
||||
* @param includeMetadata
|
||||
* if set, the listed items will have their metadata populated
|
||||
* already.
|
||||
|
@ -1717,7 +1734,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
*
|
||||
* @returns blobItems : iterable collection of blob items.
|
||||
* @throws URISyntaxException
|
||||
*
|
||||
*
|
||||
*/
|
||||
private Iterable<ListBlobItem> listRootBlobs(boolean includeMetadata,
|
||||
boolean useFlatBlobListing) throws StorageException, URISyntaxException {
|
||||
|
@ -1736,7 +1753,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* the directory depending on whether the original file system object was
|
||||
* constructed with a short- or long-form URI. If the root directory is
|
||||
* non-null the URI in the file constructor was in the long form.
|
||||
*
|
||||
*
|
||||
* @param aPrefix
|
||||
* : string name representing the prefix of containing blobs.
|
||||
* @param includeMetadata
|
||||
|
@ -1744,10 +1761,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* already.
|
||||
* @param useFlatBlobListing
|
||||
* if set the list is flat, otherwise it is hierarchical.
|
||||
*
|
||||
*
|
||||
* @returns blobItems : iterable collection of blob items.
|
||||
* @throws URISyntaxException
|
||||
*
|
||||
*
|
||||
*/
|
||||
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean includeMetadata,
|
||||
boolean useFlatBlobListing) throws StorageException, URISyntaxException {
|
||||
|
@ -1769,7 +1786,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* constructed with a short- or long-form URI. It also uses the specified flat
|
||||
* or hierarchical option, listing details options, request options, and
|
||||
* operation context.
|
||||
*
|
||||
*
|
||||
* @param aPrefix
|
||||
* string name representing the prefix of containing blobs.
|
||||
* @param useFlatBlobListing
|
||||
|
@ -1784,7 +1801,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* - context of the current operation
|
||||
* @returns blobItems : iterable collection of blob items.
|
||||
* @throws URISyntaxException
|
||||
*
|
||||
*
|
||||
*/
|
||||
private Iterable<ListBlobItem> listRootBlobs(String aPrefix, boolean useFlatBlobListing,
|
||||
EnumSet<BlobListingDetails> listingDetails, BlobRequestOptions options,
|
||||
|
@ -1804,13 +1821,13 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* get the block blob reference depending on whether the original file system
|
||||
* object was constructed with a short- or long-form URI. If the root
|
||||
* directory is non-null the URI in the file constructor was in the long form.
|
||||
*
|
||||
*
|
||||
* @param aKey
|
||||
* : a key used to query Azure for the block blob.
|
||||
* @returns blob : a reference to the Azure block blob corresponding to the
|
||||
* key.
|
||||
* @throws URISyntaxException
|
||||
*
|
||||
*
|
||||
*/
|
||||
private CloudBlobWrapper getBlobReference(String aKey)
|
||||
throws StorageException, URISyntaxException {
|
||||
|
@ -1831,10 +1848,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* This private method normalizes the key by stripping the container name from
|
||||
* the path and returns a path relative to the root directory of the
|
||||
* container.
|
||||
*
|
||||
*
|
||||
* @param keyUri
|
||||
* - adjust this key to a path relative to the root directory
|
||||
*
|
||||
*
|
||||
* @returns normKey
|
||||
*/
|
||||
private String normalizeKey(URI keyUri) {
|
||||
|
@ -1853,11 +1870,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* This private method normalizes the key by stripping the container name from
|
||||
* the path and returns a path relative to the root directory of the
|
||||
* container.
|
||||
*
|
||||
*
|
||||
* @param blob
|
||||
* - adjust the key to this blob to a path relative to the root
|
||||
* directory
|
||||
*
|
||||
*
|
||||
* @returns normKey
|
||||
*/
|
||||
private String normalizeKey(CloudBlobWrapper blob) {
|
||||
|
@ -1868,11 +1885,11 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* This private method normalizes the key by stripping the container name from
|
||||
* the path and returns a path relative to the root directory of the
|
||||
* container.
|
||||
*
|
||||
*
|
||||
* @param directory
|
||||
* - adjust the key to this directory to a path relative to the root
|
||||
* directory
|
||||
*
|
||||
*
|
||||
* @returns normKey
|
||||
*/
|
||||
private String normalizeKey(CloudBlobDirectoryWrapper directory) {
|
||||
|
@ -1889,7 +1906,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* operation that has listeners hooked to it that will update the metrics for
|
||||
* this file system. This method does not bind to receive send request
|
||||
* callbacks by default.
|
||||
*
|
||||
*
|
||||
* @return The OperationContext object to use.
|
||||
*/
|
||||
private OperationContext getInstrumentedContext() {
|
||||
|
@ -1900,16 +1917,27 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
/**
|
||||
* Creates a new OperationContext for the Azure Storage operation that has
|
||||
* listeners hooked to it that will update the metrics for this file system.
|
||||
*
|
||||
*
|
||||
* @param bindConcurrentOOBIo
|
||||
* - bind to intercept send request call backs to handle OOB I/O.
|
||||
*
|
||||
*
|
||||
* @return The OperationContext object to use.
|
||||
*/
|
||||
private OperationContext getInstrumentedContext(boolean bindConcurrentOOBIo) {
|
||||
|
||||
OperationContext operationContext = new OperationContext();
|
||||
|
||||
// Set User-Agent
|
||||
operationContext.getSendingRequestEventHandler().addListener(new StorageEvent<SendingRequestEvent>() {
|
||||
@Override
|
||||
public void eventOccurred(SendingRequestEvent eventArg) {
|
||||
HttpURLConnection connection = (HttpURLConnection) eventArg.getConnectionObject();
|
||||
String userAgentInfo = String.format(Utility.LOCALE_US, "WASB/%s (%s) %s",
|
||||
VersionInfo.getVersion(), userAgentId, BaseRequest.getUserAgent());
|
||||
connection.setRequestProperty(Constants.HeaderConstants.USER_AGENT, userAgentInfo);
|
||||
}
|
||||
});
|
||||
|
||||
if (selfThrottlingEnabled) {
|
||||
SelfThrottlingIntercept.hook(operationContext, selfThrottlingReadFactor,
|
||||
selfThrottlingWriteFactor);
|
||||
|
@ -2096,7 +2124,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
/**
|
||||
* Searches the given list of {@link FileMetadata} objects for a directory
|
||||
* with the given key.
|
||||
*
|
||||
*
|
||||
* @param list
|
||||
* The list to search.
|
||||
* @param key
|
||||
|
@ -2229,7 +2257,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
* Build up a metadata list of blobs in an Azure blob directory. This method
|
||||
* uses a in-order first traversal of blob directory structures to maintain
|
||||
* the sorted order of the blob names.
|
||||
*
|
||||
*
|
||||
* @param aCloudBlobDirectory Azure blob directory
|
||||
* @param aFileMetadataList a list of file metadata objects for each
|
||||
* non-directory blob.
|
||||
|
@ -2564,7 +2592,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
//
|
||||
// Copy blob operation in Azure storage is very costly. It will be highly
|
||||
// likely throttled during Azure storage gc. Short term fix will be using
|
||||
// a more intensive exponential retry policy when the cluster is getting
|
||||
// a more intensive exponential retry policy when the cluster is getting
|
||||
// throttled.
|
||||
try {
|
||||
dstBlob.startCopyFromBlob(srcBlob, null, getInstrumentedContext());
|
||||
|
@ -2585,10 +2613,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
int copyBlobMaxRetries = sessionConfiguration.getInt(
|
||||
KEY_COPYBLOB_MAX_IO_RETRIES,
|
||||
DEFAULT_COPYBLOB_MAX_RETRY_ATTEMPTS);
|
||||
|
||||
|
||||
BlobRequestOptions options = new BlobRequestOptions();
|
||||
options.setRetryPolicyFactory(new RetryExponentialRetry(
|
||||
copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
|
||||
copyBlobMinBackoff, copyBlobDeltaBackoff, copyBlobMaxBackoff,
|
||||
copyBlobMaxRetries));
|
||||
dstBlob.startCopyFromBlob(srcBlob, options, getInstrumentedContext());
|
||||
} else {
|
||||
|
@ -2794,7 +2822,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
|
|||
bandwidthGaugeUpdater = null;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Finalizer to ensure complete shutdown
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
|
|
|
@ -358,7 +358,7 @@ final class BlockBlobInputStream extends InputStream implements Seekable {
|
|||
* Gets the current capacity of the stream.
|
||||
*/
|
||||
public synchronized int capacity() {
|
||||
return length - offset;
|
||||
return length;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -192,6 +192,19 @@ The configuration option `fs.azure.page.blob.extension.size` is the page blob
|
|||
extension size. This defines the amount to extend a page blob if it starts to
|
||||
get full. It must be 128MB or greater, specified as an integer number of bytes.
|
||||
|
||||
### Custom User-Agent
|
||||
WASB passes User-Agent header to the Azure back-end. The default value
|
||||
contains WASB version, Java Runtime version, Azure Client library version, and the
|
||||
value of the configuration option `fs.azure.user.agent.prefix`. Customized User-Agent
|
||||
header enables better troubleshooting and analysis by Azure service.
|
||||
|
||||
```xml
|
||||
<property>
|
||||
<name>fs.azure.user.agent.prefix</name>
|
||||
<value>Identifier</value>
|
||||
</property>
|
||||
```
|
||||
|
||||
### Atomic Folder Rename
|
||||
|
||||
Azure storage stores files as a flat key/value store without formal support
|
||||
|
|
|
@ -43,8 +43,11 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.contract.ContractTestUtils;
|
||||
import org.apache.hadoop.fs.contract.ContractTestUtils.NanoTimer;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
import static org.junit.Assume.*;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assume.assumeNotNull;
|
||||
|
||||
import static org.apache.hadoop.test.LambdaTestUtils.*;
|
||||
|
||||
|
@ -194,6 +197,49 @@ public class TestBlockBlobInputStream extends AbstractWasbTestBase {
|
|||
createTestFileAndSetLength();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test_0200_BasicReadTestV2() throws Exception {
|
||||
assumeHugeFileExists();
|
||||
|
||||
try (
|
||||
FSDataInputStream inputStreamV1
|
||||
= accountUsingInputStreamV1.getFileSystem().open(TEST_FILE_PATH);
|
||||
|
||||
FSDataInputStream inputStreamV2
|
||||
= accountUsingInputStreamV2.getFileSystem().open(TEST_FILE_PATH);
|
||||
) {
|
||||
byte[] bufferV1 = new byte[3 * MEGABYTE];
|
||||
byte[] bufferV2 = new byte[bufferV1.length];
|
||||
|
||||
// v1 forward seek and read a kilobyte into first kilobyte of bufferV1
|
||||
inputStreamV1.seek(5 * MEGABYTE);
|
||||
int numBytesReadV1 = inputStreamV1.read(bufferV1, 0, KILOBYTE);
|
||||
assertEquals(numBytesReadV1, KILOBYTE);
|
||||
|
||||
// v2 forward seek and read a kilobyte into first kilobyte of bufferV2
|
||||
inputStreamV2.seek(5 * MEGABYTE);
|
||||
int numBytesReadV2 = inputStreamV2.read(bufferV2, 0, KILOBYTE);
|
||||
assertEquals(numBytesReadV2, KILOBYTE);
|
||||
|
||||
assertArrayEquals(bufferV1, bufferV2);
|
||||
|
||||
int len = MEGABYTE;
|
||||
int offset = bufferV1.length - len;
|
||||
|
||||
// v1 reverse seek and read a megabyte into last megabyte of bufferV1
|
||||
inputStreamV1.seek(3 * MEGABYTE);
|
||||
numBytesReadV1 = inputStreamV1.read(bufferV1, offset, len);
|
||||
assertEquals(numBytesReadV1, len);
|
||||
|
||||
// v2 reverse seek and read a megabyte into last megabyte of bufferV2
|
||||
inputStreamV2.seek(3 * MEGABYTE);
|
||||
numBytesReadV2 = inputStreamV2.read(bufferV2, offset, len);
|
||||
assertEquals(numBytesReadV2, len);
|
||||
|
||||
assertArrayEquals(bufferV1, bufferV2);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates the implementation of InputStream.markSupported.
|
||||
* @throws IOException
|
||||
|
|
|
@ -566,4 +566,52 @@ public class TestWasbUriAndConfiguration {
|
|||
CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
|
||||
assertEquals(newPath, effectivePath);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUserAgentConfig() throws Exception {
|
||||
// Set the user agent
|
||||
try {
|
||||
testAccount = AzureBlobStorageTestAccount.createMock();
|
||||
Configuration conf = testAccount.getFileSystem().getConf();
|
||||
String authority = testAccount.getFileSystem().getUri().getAuthority();
|
||||
URI defaultUri = new URI("wasbs", authority, null, null, null);
|
||||
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
|
||||
conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
|
||||
|
||||
conf.set(AzureNativeFileSystemStore.USER_AGENT_ID_KEY, "TestClient");
|
||||
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
|
||||
|
||||
assertTrue(afs instanceof Wasbs);
|
||||
assertEquals(-1, afs.getUri().getPort());
|
||||
assertEquals("wasbs", afs.getUri().getScheme());
|
||||
|
||||
} finally {
|
||||
testAccount.cleanup();
|
||||
FileSystem.closeAll();
|
||||
}
|
||||
|
||||
// Unset the user agent
|
||||
try {
|
||||
testAccount = AzureBlobStorageTestAccount.createMock();
|
||||
Configuration conf = testAccount.getFileSystem().getConf();
|
||||
String authority = testAccount.getFileSystem().getUri().getAuthority();
|
||||
URI defaultUri = new URI("wasbs", authority, null, null, null);
|
||||
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
|
||||
conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
|
||||
|
||||
conf.unset(AzureNativeFileSystemStore.USER_AGENT_ID_KEY);
|
||||
|
||||
FileSystem fs = FileSystem.get(conf);
|
||||
AbstractFileSystem afs = FileContext.getFileContext(conf).getDefaultFileSystem();
|
||||
assertTrue(afs instanceof Wasbs);
|
||||
assertEquals(-1, afs.getUri().getPort());
|
||||
assertEquals("wasbs", afs.getUri().getScheme());
|
||||
|
||||
} finally {
|
||||
testAccount.cleanup();
|
||||
FileSystem.closeAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,11 @@
|
|||
<value>true</value>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.azure.user.agent.prefix</name>
|
||||
<value>MSFT</value>
|
||||
</property>
|
||||
|
||||
<!-- Save the above configuration properties in a separate file named -->
|
||||
<!-- azure-auth-keys.xml in the same directory as this file. -->
|
||||
<!-- DO NOT ADD azure-auth-keys.xml TO REVISION CONTROL. The keys to your -->
|
||||
|
|
|
@ -217,7 +217,7 @@ Command Line Options
|
|||
|
||||
Flag | Description | Notes
|
||||
----------------- | ------------------------------------ | --------
|
||||
`-p[rbugpcaxt]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL x: XAttr t: timestamp | When `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions. The option -pr is only valid if both source and target directory are not erasure coded.
|
||||
`-p[rbugpcaxt]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL x: XAttr t: timestamp | When `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions. The option -pr is only valid if both source and target directory are not erasure coded. **Note:** If -p option's are not specified, then by default block size is preserved.
|
||||
`-i` | Ignore failures | As explained in the Appendix, this option will keep more accurate statistics about the copy than the default case. It also preserves logs from failed copies, which can be valuable for debugging. Finally, a failing map will not cause the job to fail before all splits are attempted.
|
||||
`-log <logdir>` | Write logs to \<logdir\> | DistCp keeps logs of each file it attempts to copy as map output. If a map fails, the log output will not be retained if it is re-executed.
|
||||
`-m <num_maps>` | Maximum number of simultaneous copies | Specify the number of maps to copy data. Note that more maps may not necessarily improve throughput.
|
||||
|
|
|
@ -420,6 +420,7 @@ namespace HadoopPipes {
|
|||
}
|
||||
|
||||
string createDigest(string &password, string& msg) {
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10100000L
|
||||
HMAC_CTX ctx;
|
||||
unsigned char digest[EVP_MAX_MD_SIZE];
|
||||
HMAC_Init(&ctx, (const unsigned char *)password.c_str(),
|
||||
|
@ -428,7 +429,16 @@ namespace HadoopPipes {
|
|||
unsigned int digestLen;
|
||||
HMAC_Final(&ctx, digest, &digestLen);
|
||||
HMAC_cleanup(&ctx);
|
||||
|
||||
#else
|
||||
HMAC_CTX *ctx = HMAC_CTX_new();
|
||||
unsigned char digest[EVP_MAX_MD_SIZE];
|
||||
HMAC_Init_ex(ctx, (const unsigned char *)password.c_str(),
|
||||
password.length(), EVP_sha1(), NULL);
|
||||
HMAC_Update(ctx, (const unsigned char *)msg.c_str(), msg.length());
|
||||
unsigned int digestLen;
|
||||
HMAC_Final(ctx, digest, &digestLen);
|
||||
HMAC_CTX_free(ctx);
|
||||
#endif
|
||||
//now apply base64 encoding
|
||||
BIO *bmem, *b64;
|
||||
BUF_MEM *bptr;
|
||||
|
|
|
@ -395,18 +395,28 @@ public class SLSRunner extends Configured implements Tool {
|
|||
String queue = jsonJob.get("job.queue.name").toString();
|
||||
increaseQueueAppNum(queue);
|
||||
|
||||
String oldAppId = (String)jsonJob.get("job.id");
|
||||
if (oldAppId == null) {
|
||||
oldAppId = Integer.toString(AM_ID);
|
||||
}
|
||||
|
||||
String amType = (String)jsonJob.get("am.type");
|
||||
if (amType == null) {
|
||||
amType = SLSUtils.DEFAULT_JOB_TYPE;
|
||||
}
|
||||
|
||||
runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
|
||||
getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
|
||||
int jobCount = 1;
|
||||
if (jsonJob.containsKey("job.count")) {
|
||||
jobCount = Integer.parseInt(jsonJob.get("job.count").toString());
|
||||
}
|
||||
jobCount = Math.max(jobCount, 1);
|
||||
|
||||
String oldAppId = (String)jsonJob.get("job.id");
|
||||
// Job id is generated automatically if this job configuration allows
|
||||
// multiple job instances
|
||||
if(jobCount > 1) {
|
||||
oldAppId = null;
|
||||
}
|
||||
|
||||
for (int i = 0; i < jobCount; i++) {
|
||||
runNewAM(amType, user, queue, oldAppId, jobStartTime, jobFinishTime,
|
||||
getTaskContainers(jsonJob), null, getAMContainerResource(jsonJob));
|
||||
}
|
||||
}
|
||||
|
||||
private List<ContainerSimulator> getTaskContainers(Map jsonJob)
|
||||
|
@ -732,6 +742,10 @@ public class SLSRunner extends Configured implements Tool {
|
|||
SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS,
|
||||
SLSConfiguration.AM_HEARTBEAT_INTERVAL_MS_DEFAULT);
|
||||
boolean isTracked = trackedApps.contains(oldJobId);
|
||||
|
||||
if (oldJobId == null) {
|
||||
oldJobId = Integer.toString(AM_ID);
|
||||
}
|
||||
AM_ID++;
|
||||
|
||||
amSim.init(heartbeatInterval, containerList, rm, this, jobStartTimeMS,
|
||||
|
|
|
@ -336,8 +336,9 @@ Here we provide an example format of the sls json file, which contains 2 jobs. T
|
|||
"job.start.ms" : 0, // job start time
|
||||
"job.end.ms" : 95375, // job finish time, optional, the default value is 0
|
||||
"job.queue.name" : "sls_queue_1", // the queue job will be submitted to
|
||||
"job.id" : "job_1", // the job id used to track the job, optional, the default value is an zero-based integer increasing with number of jobs
|
||||
"job.id" : "job_1", // the job id used to track the job, optional. The default value, an zero-based integer increasing with number of jobs, is used if this is not specified or job.count > 1
|
||||
"job.user" : "default", // user, optional, the default value is "default"
|
||||
"job.count" : 1, // number of jobs, optional, the default value is 1
|
||||
"job.tasks" : [ {
|
||||
"count": 1, // number of tasks, optional, the default value is 1
|
||||
"container.host" : "/default-rack/node1", // host the container asks for
|
||||
|
|
|
@ -18,18 +18,13 @@
|
|||
|
||||
package org.apache.hadoop.yarn.api.records;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.util.FastNumberFormat;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
import com.google.common.base.Splitter;
|
||||
|
||||
/**
|
||||
* <p><code>ApplicationAttemptId</code> denotes the particular <em>attempt</em>
|
||||
* of an <code>ApplicationMaster</code> for a given {@link ApplicationId}.</p>
|
||||
|
@ -42,12 +37,14 @@ import com.google.common.base.Splitter;
|
|||
@Stable
|
||||
public abstract class ApplicationAttemptId implements
|
||||
Comparable<ApplicationAttemptId> {
|
||||
private static Splitter _spliter = Splitter.on('_').trimResults();
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public static final String appAttemptIdStrPrefix = "appattempt";
|
||||
|
||||
private static final String APP_ATTEMPT_ID_PREFIX = appAttemptIdStrPrefix
|
||||
+ '_';
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public static ApplicationAttemptId newInstance(ApplicationId appId,
|
||||
|
@ -84,16 +81,8 @@ public abstract class ApplicationAttemptId implements
|
|||
@Unstable
|
||||
protected abstract void setAttemptId(int attemptId);
|
||||
|
||||
static final ThreadLocal<NumberFormat> attemptIdFormat =
|
||||
new ThreadLocal<NumberFormat>() {
|
||||
@Override
|
||||
public NumberFormat initialValue() {
|
||||
NumberFormat fmt = NumberFormat.getInstance();
|
||||
fmt.setGroupingUsed(false);
|
||||
fmt.setMinimumIntegerDigits(6);
|
||||
return fmt;
|
||||
}
|
||||
};
|
||||
private static final int ATTEMPT_ID_MIN_DIGITS = 6;
|
||||
private static final int APP_ID_MIN_DIGITS = 4;
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
|
@ -135,12 +124,14 @@ public abstract class ApplicationAttemptId implements
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder(appAttemptIdStrPrefix);
|
||||
sb.append("_");
|
||||
sb.append(this.getApplicationId().getClusterTimestamp()).append("_");
|
||||
sb.append(ApplicationId.appIdFormat.get().format(
|
||||
this.getApplicationId().getId()));
|
||||
sb.append("_").append(attemptIdFormat.get().format(getAttemptId()));
|
||||
StringBuilder sb = new StringBuilder(64);
|
||||
sb.append(APP_ATTEMPT_ID_PREFIX);
|
||||
ApplicationId appId = getApplicationId();
|
||||
sb.append(appId.getClusterTimestamp());
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, appId.getId(), APP_ID_MIN_DIGITS);
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, getAttemptId(), ATTEMPT_ID_MIN_DIGITS);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -148,29 +139,33 @@ public abstract class ApplicationAttemptId implements
|
|||
|
||||
@Public
|
||||
@Stable
|
||||
public static ApplicationAttemptId fromString(String applicationAttemptIdStr) {
|
||||
Iterator<String> it = _spliter.split(applicationAttemptIdStr).iterator();
|
||||
if (!it.next().equals(appAttemptIdStrPrefix)) {
|
||||
public static ApplicationAttemptId fromString(String appAttemptIdStr) {
|
||||
if (!appAttemptIdStr.startsWith(APP_ATTEMPT_ID_PREFIX)) {
|
||||
throw new IllegalArgumentException("Invalid AppAttemptId prefix: "
|
||||
+ applicationAttemptIdStr);
|
||||
+ appAttemptIdStr);
|
||||
}
|
||||
try {
|
||||
return toApplicationAttemptId(it);
|
||||
int pos1 = APP_ATTEMPT_ID_PREFIX.length() - 1;
|
||||
int pos2 = appAttemptIdStr.indexOf('_', pos1 + 1);
|
||||
if (pos2 < 0) {
|
||||
throw new IllegalArgumentException("Invalid AppAttemptId: "
|
||||
+ appAttemptIdStr);
|
||||
}
|
||||
long rmId = Long.parseLong(appAttemptIdStr.substring(pos1 + 1, pos2));
|
||||
int pos3 = appAttemptIdStr.indexOf('_', pos2 + 1);
|
||||
if (pos3 < 0) {
|
||||
throw new IllegalArgumentException("Invalid AppAttemptId: "
|
||||
+ appAttemptIdStr);
|
||||
}
|
||||
int appId = Integer.parseInt(appAttemptIdStr.substring(pos2 + 1, pos3));
|
||||
ApplicationId applicationId = ApplicationId.newInstance(rmId, appId);
|
||||
int attemptId = Integer.parseInt(appAttemptIdStr.substring(pos3 + 1));
|
||||
ApplicationAttemptId applicationAttemptId =
|
||||
ApplicationAttemptId.newInstance(applicationId, attemptId);
|
||||
return applicationAttemptId;
|
||||
} catch (NumberFormatException n) {
|
||||
throw new IllegalArgumentException("Invalid AppAttemptId: "
|
||||
+ applicationAttemptIdStr, n);
|
||||
} catch (NoSuchElementException e) {
|
||||
throw new IllegalArgumentException("Invalid AppAttemptId: "
|
||||
+ applicationAttemptIdStr, e);
|
||||
+ appAttemptIdStr, n);
|
||||
}
|
||||
}
|
||||
|
||||
private static ApplicationAttemptId toApplicationAttemptId(
|
||||
Iterator<String> it) throws NumberFormatException {
|
||||
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
|
||||
Integer.parseInt(it.next()));
|
||||
ApplicationAttemptId appAttemptId =
|
||||
ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next()));
|
||||
return appAttemptId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,18 +18,13 @@
|
|||
|
||||
package org.apache.hadoop.yarn.api.records;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.util.FastNumberFormat;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
import com.google.common.base.Splitter;
|
||||
|
||||
/**
|
||||
* <p><code>ApplicationId</code> represents the <em>globally unique</em>
|
||||
* identifier for an application.</p>
|
||||
|
@ -42,12 +37,13 @@ import com.google.common.base.Splitter;
|
|||
@Public
|
||||
@Stable
|
||||
public abstract class ApplicationId implements Comparable<ApplicationId> {
|
||||
private static Splitter _spliter = Splitter.on('_').trimResults();
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public static final String appIdStrPrefix = "application";
|
||||
|
||||
private static final String APPLICATION_ID_PREFIX = appIdStrPrefix + '_';
|
||||
|
||||
@Public
|
||||
@Unstable
|
||||
public static ApplicationId newInstance(long clusterTimestamp, int id) {
|
||||
|
@ -87,16 +83,7 @@ public abstract class ApplicationId implements Comparable<ApplicationId> {
|
|||
|
||||
protected abstract void build();
|
||||
|
||||
static final ThreadLocal<NumberFormat> appIdFormat =
|
||||
new ThreadLocal<NumberFormat>() {
|
||||
@Override
|
||||
public NumberFormat initialValue() {
|
||||
NumberFormat fmt = NumberFormat.getInstance();
|
||||
fmt.setGroupingUsed(false);
|
||||
fmt.setMinimumIntegerDigits(4);
|
||||
return fmt;
|
||||
}
|
||||
};
|
||||
private static final int APP_ID_MIN_DIGITS = 4;
|
||||
|
||||
@Override
|
||||
public int compareTo(ApplicationId other) {
|
||||
|
@ -110,37 +97,38 @@ public abstract class ApplicationId implements Comparable<ApplicationId> {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return appIdStrPrefix + "_" + this.getClusterTimestamp() + "_" + appIdFormat
|
||||
.get().format(getId());
|
||||
}
|
||||
|
||||
private static ApplicationId toApplicationId(
|
||||
Iterator<String> it) throws NumberFormatException {
|
||||
ApplicationId appId = ApplicationId.newInstance(Long.parseLong(it.next()),
|
||||
Integer.parseInt(it.next()));
|
||||
return appId;
|
||||
StringBuilder sb = new StringBuilder(64);
|
||||
sb.append(APPLICATION_ID_PREFIX);
|
||||
sb.append(getClusterTimestamp());
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, getId(), APP_ID_MIN_DIGITS);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Public
|
||||
@Stable
|
||||
public static ApplicationId fromString(String appIdStr) {
|
||||
Iterator<String> it = _spliter.split((appIdStr)).iterator();
|
||||
if (!it.next().equals(appIdStrPrefix)) {
|
||||
if (!appIdStr.startsWith(APPLICATION_ID_PREFIX)) {
|
||||
throw new IllegalArgumentException("Invalid ApplicationId prefix: "
|
||||
+ appIdStr + ". The valid ApplicationId should start with prefix "
|
||||
+ appIdStrPrefix);
|
||||
}
|
||||
try {
|
||||
return toApplicationId(it);
|
||||
int pos1 = APPLICATION_ID_PREFIX.length() - 1;
|
||||
int pos2 = appIdStr.indexOf('_', pos1 + 1);
|
||||
if (pos2 < 0) {
|
||||
throw new IllegalArgumentException("Invalid ApplicationId: "
|
||||
+ appIdStr);
|
||||
}
|
||||
long rmId = Long.parseLong(appIdStr.substring(pos1 + 1, pos2));
|
||||
int appId = Integer.parseInt(appIdStr.substring(pos2 + 1));
|
||||
ApplicationId applicationId = ApplicationId.newInstance(rmId, appId);
|
||||
return applicationId;
|
||||
} catch (NumberFormatException n) {
|
||||
throw new IllegalArgumentException("Invalid ApplicationId: "
|
||||
+ appIdStr, n);
|
||||
} catch (NoSuchElementException e) {
|
||||
throw new IllegalArgumentException("Invalid ApplicationId: "
|
||||
+ appIdStr, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
// Generated by eclipse.
|
||||
|
|
|
@ -18,16 +18,11 @@
|
|||
|
||||
package org.apache.hadoop.yarn.api.records;
|
||||
|
||||
import com.google.common.base.Splitter;
|
||||
|
||||
import java.text.NumberFormat;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.util.FastNumberFormat;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
/**
|
||||
|
@ -38,8 +33,7 @@ import org.apache.hadoop.yarn.util.Records;
|
|||
@Stable
|
||||
public abstract class ContainerId implements Comparable<ContainerId>{
|
||||
public static final long CONTAINER_ID_BITMASK = 0xffffffffffL;
|
||||
private static final Splitter _SPLITTER = Splitter.on('_').trimResults();
|
||||
private static final String CONTAINER_PREFIX = "container";
|
||||
private static final String CONTAINER_PREFIX = "container_";
|
||||
private static final String EPOCH_PREFIX = "e";
|
||||
|
||||
@Public
|
||||
|
@ -115,29 +109,13 @@ public abstract class ContainerId implements Comparable<ContainerId>{
|
|||
protected abstract void setContainerId(long id);
|
||||
|
||||
|
||||
// TODO: fail the app submission if attempts are more than 10 or something
|
||||
private static final ThreadLocal<NumberFormat> appAttemptIdAndEpochFormat =
|
||||
new ThreadLocal<NumberFormat>() {
|
||||
@Override
|
||||
public NumberFormat initialValue() {
|
||||
NumberFormat fmt = NumberFormat.getInstance();
|
||||
fmt.setGroupingUsed(false);
|
||||
fmt.setMinimumIntegerDigits(2);
|
||||
return fmt;
|
||||
}
|
||||
};
|
||||
// TODO: Why thread local?
|
||||
// ^ NumberFormat instances are not threadsafe
|
||||
private static final ThreadLocal<NumberFormat> containerIdFormat =
|
||||
new ThreadLocal<NumberFormat>() {
|
||||
@Override
|
||||
public NumberFormat initialValue() {
|
||||
NumberFormat fmt = NumberFormat.getInstance();
|
||||
fmt.setGroupingUsed(false);
|
||||
fmt.setMinimumIntegerDigits(6);
|
||||
return fmt;
|
||||
}
|
||||
};
|
||||
private static final int APP_ID_MIN_DIGITS = 4;
|
||||
|
||||
private static final int ATTEMPT_ID_MIN_DIGITS = 2;
|
||||
|
||||
private static final int EPOCH_MIN_DIGITS = 2;
|
||||
|
||||
private static final int CONTAINER_ID_MIN_DIGITS = 6;
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
|
@ -185,72 +163,85 @@ public abstract class ContainerId implements Comparable<ContainerId>{
|
|||
*/
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(CONTAINER_PREFIX + "_");
|
||||
StringBuilder sb = new StringBuilder(64);
|
||||
sb.append(CONTAINER_PREFIX);
|
||||
long epoch = getContainerId() >> 40;
|
||||
if (epoch > 0) {
|
||||
sb.append(EPOCH_PREFIX)
|
||||
.append(appAttemptIdAndEpochFormat.get().format(epoch)).append("_");;
|
||||
sb.append(EPOCH_PREFIX);
|
||||
FastNumberFormat.format(sb, epoch, EPOCH_MIN_DIGITS);
|
||||
sb.append('_');
|
||||
}
|
||||
ApplicationId appId = getApplicationAttemptId().getApplicationId();
|
||||
sb.append(appId.getClusterTimestamp()).append("_");
|
||||
sb.append(ApplicationId.appIdFormat.get().format(appId.getId()))
|
||||
.append("_");
|
||||
sb.append(
|
||||
appAttemptIdAndEpochFormat.get().format(
|
||||
getApplicationAttemptId().getAttemptId())).append("_");
|
||||
sb.append(containerIdFormat.get()
|
||||
.format(CONTAINER_ID_BITMASK & getContainerId()));
|
||||
sb.append(appId.getClusterTimestamp());
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, appId.getId(), APP_ID_MIN_DIGITS);
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, getApplicationAttemptId().getAttemptId(),
|
||||
ATTEMPT_ID_MIN_DIGITS);
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, CONTAINER_ID_BITMASK & getContainerId(),
|
||||
CONTAINER_ID_MIN_DIGITS);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
@Public
|
||||
@Stable
|
||||
public static ContainerId fromString(String containerIdStr) {
|
||||
Iterator<String> it = _SPLITTER.split(containerIdStr).iterator();
|
||||
if (!it.next().equals(CONTAINER_PREFIX)) {
|
||||
if (!containerIdStr.startsWith(CONTAINER_PREFIX)) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId prefix: "
|
||||
+ containerIdStr);
|
||||
}
|
||||
try {
|
||||
String epochOrClusterTimestampStr = it.next();
|
||||
int pos1 = CONTAINER_PREFIX.length() - 1;
|
||||
|
||||
long epoch = 0;
|
||||
ApplicationAttemptId appAttemptID = null;
|
||||
if (epochOrClusterTimestampStr.startsWith(EPOCH_PREFIX)) {
|
||||
String epochStr = epochOrClusterTimestampStr;
|
||||
epoch = Integer.parseInt(epochStr.substring(EPOCH_PREFIX.length()));
|
||||
appAttemptID = toApplicationAttemptId(it);
|
||||
} else {
|
||||
String clusterTimestampStr = epochOrClusterTimestampStr;
|
||||
long clusterTimestamp = Long.parseLong(clusterTimestampStr);
|
||||
appAttemptID = toApplicationAttemptId(clusterTimestamp, it);
|
||||
if (containerIdStr.regionMatches(pos1 + 1, EPOCH_PREFIX, 0,
|
||||
EPOCH_PREFIX.length())) {
|
||||
int pos2 = containerIdStr.indexOf('_', pos1 + 1);
|
||||
if (pos2 < 0) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId: "
|
||||
+ containerIdStr);
|
||||
}
|
||||
String epochStr = containerIdStr.substring(
|
||||
pos1 + 1 + EPOCH_PREFIX.length(), pos2);
|
||||
epoch = Integer.parseInt(epochStr);
|
||||
// rewind the current position
|
||||
pos1 = pos2;
|
||||
}
|
||||
long id = Long.parseLong(it.next());
|
||||
int pos2 = containerIdStr.indexOf('_', pos1 + 1);
|
||||
if (pos2 < 0) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId: "
|
||||
+ containerIdStr);
|
||||
}
|
||||
long clusterTimestamp = Long.parseLong(
|
||||
containerIdStr.substring(pos1 + 1, pos2));
|
||||
|
||||
int pos3 = containerIdStr.indexOf('_', pos2 + 1);
|
||||
if (pos3 < 0) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId: "
|
||||
+ containerIdStr);
|
||||
}
|
||||
int appId = Integer.parseInt(containerIdStr.substring(pos2 + 1, pos3));
|
||||
ApplicationId applicationId = ApplicationId.newInstance(clusterTimestamp,
|
||||
appId);
|
||||
int pos4 = containerIdStr.indexOf('_', pos3 + 1);
|
||||
if (pos4 < 0) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId: "
|
||||
+ containerIdStr);
|
||||
}
|
||||
int attemptId = Integer.parseInt(
|
||||
containerIdStr.substring(pos3 + 1, pos4));
|
||||
ApplicationAttemptId appAttemptId =
|
||||
ApplicationAttemptId.newInstance(applicationId, attemptId);
|
||||
long id = Long.parseLong(containerIdStr.substring(pos4 + 1));
|
||||
long cid = (epoch << 40) | id;
|
||||
ContainerId containerId = ContainerId.newContainerId(appAttemptID, cid);
|
||||
ContainerId containerId = ContainerId.newContainerId(appAttemptId, cid);
|
||||
return containerId;
|
||||
} catch (NumberFormatException n) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId: "
|
||||
+ containerIdStr, n);
|
||||
} catch (NoSuchElementException e) {
|
||||
throw new IllegalArgumentException("Invalid ContainerId: "
|
||||
+ containerIdStr, e);
|
||||
}
|
||||
}
|
||||
|
||||
private static ApplicationAttemptId toApplicationAttemptId(
|
||||
Iterator<String> it) throws NumberFormatException {
|
||||
return toApplicationAttemptId(Long.parseLong(it.next()), it);
|
||||
}
|
||||
|
||||
private static ApplicationAttemptId toApplicationAttemptId(
|
||||
long clusterTimestamp, Iterator<String> it) throws NumberFormatException {
|
||||
ApplicationId appId = ApplicationId.newInstance(clusterTimestamp,
|
||||
Integer.parseInt(it.next()));
|
||||
ApplicationAttemptId appAttemptId =
|
||||
ApplicationAttemptId.newInstance(appId, Integer.parseInt(it.next()));
|
||||
return appAttemptId;
|
||||
}
|
||||
|
||||
protected abstract void build();
|
||||
}
|
||||
|
|
|
@ -19,11 +19,11 @@
|
|||
package org.apache.hadoop.yarn.api.records;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.text.NumberFormat;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.util.FastNumberFormat;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
||||
/**
|
||||
|
@ -89,16 +89,7 @@ public abstract class ReservationId implements Comparable<ReservationId> {
|
|||
|
||||
protected abstract void build();
|
||||
|
||||
static final ThreadLocal<NumberFormat> reservIdFormat =
|
||||
new ThreadLocal<NumberFormat>() {
|
||||
@Override
|
||||
public NumberFormat initialValue() {
|
||||
NumberFormat fmt = NumberFormat.getInstance();
|
||||
fmt.setGroupingUsed(false);
|
||||
fmt.setMinimumIntegerDigits(4);
|
||||
return fmt;
|
||||
}
|
||||
};
|
||||
private static final int RESERVATION_ID_MIN_DIGITS = 4;
|
||||
|
||||
@Override
|
||||
public int compareTo(ReservationId other) {
|
||||
|
@ -112,8 +103,12 @@ public abstract class ReservationId implements Comparable<ReservationId> {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return reserveIdStrPrefix + this.getClusterTimestamp() + "_"
|
||||
+ reservIdFormat.get().format(getId());
|
||||
StringBuilder sb = new StringBuilder(64);
|
||||
sb.append(reserveIdStrPrefix);
|
||||
sb.append(getClusterTimestamp());
|
||||
sb.append('_');
|
||||
FastNumberFormat.format(sb, getId(), RESERVATION_ID_MIN_DIGITS);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -772,10 +772,10 @@ public class AMRMClientImpl<T extends ContainerRequest> extends AMRMClient<T> {
|
|||
return;
|
||||
}
|
||||
|
||||
// Don't support specifying >= 2 node labels in a node label expression now
|
||||
// Don't support specifying > 1 node labels in a node label expression now
|
||||
if (exp.contains("&&") || exp.contains("||")) {
|
||||
throw new InvalidContainerRequestException(
|
||||
"Cannot specify more than two node labels"
|
||||
"Cannot specify more than one node label"
|
||||
+ " in a single node label expression");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -222,6 +222,14 @@ extends ContainerLaunchContext {
|
|||
throw new NullPointerException(
|
||||
"Null resource URL for local resource " + rsrcEntry.getKey() + " : "
|
||||
+ rsrcEntry.getValue());
|
||||
} else if (rsrcEntry.getValue().getType() == null) {
|
||||
throw new NullPointerException(
|
||||
"Null resource type for local resource " + rsrcEntry.getKey() + " : "
|
||||
+ rsrcEntry.getValue());
|
||||
} else if (rsrcEntry.getValue().getVisibility() == null) {
|
||||
throw new NullPointerException(
|
||||
"Null resource visibility for local resource " + rsrcEntry.getKey() + " : "
|
||||
+ rsrcEntry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.security.Credentials;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||
|
@ -32,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
|||
import org.apache.hadoop.yarn.api.records.LocalResource;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResourceType;
|
||||
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
|
||||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.junit.Assert;
|
||||
|
@ -95,4 +97,54 @@ public class TestApplicationClientProtocolRecords {
|
|||
Assert.assertTrue(e.getMessage().contains("Null resource URL for local resource"));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This test validates the scenario in which the client sets a null value for
|
||||
* local resource type.
|
||||
*/
|
||||
@Test
|
||||
public void testCLCPBImplNullResourceType() throws IOException {
|
||||
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
|
||||
try {
|
||||
LocalResource resource = recordFactory.newRecordInstance(LocalResource.class);
|
||||
resource.setResource(URL.fromPath(new Path(".")));
|
||||
resource.setSize(-1);
|
||||
resource.setVisibility(LocalResourceVisibility.APPLICATION);
|
||||
resource.setType(null);
|
||||
resource.setTimestamp(System.currentTimeMillis());
|
||||
Map<String, LocalResource> localResources =
|
||||
new HashMap<String, LocalResource>();
|
||||
localResources.put("null_type_resource", resource);
|
||||
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
|
||||
containerLaunchContext.setLocalResources(localResources);
|
||||
Assert.fail("Setting an invalid local resource should be an error!");
|
||||
} catch (NullPointerException e) {
|
||||
Assert.assertTrue(e.getMessage().contains("Null resource type for local resource"));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* This test validates the scenario in which the client sets a null value for
|
||||
* local resource type.
|
||||
*/
|
||||
@Test
|
||||
public void testCLCPBImplNullResourceVisibility() throws IOException {
|
||||
RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
|
||||
try {
|
||||
LocalResource resource = recordFactory.newRecordInstance(LocalResource.class);
|
||||
resource.setResource(URL.fromPath(new Path(".")));
|
||||
resource.setSize(-1);
|
||||
resource.setVisibility(null);
|
||||
resource.setType(LocalResourceType.FILE);
|
||||
resource.setTimestamp(System.currentTimeMillis());
|
||||
Map<String, LocalResource> localResources =
|
||||
new HashMap<String, LocalResource>();
|
||||
localResources.put("null_visibility_resource", resource);
|
||||
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
|
||||
containerLaunchContext.setLocalResources(localResources);
|
||||
Assert.fail("Setting an invalid local resource should be an error!");
|
||||
} catch (NullPointerException e) {
|
||||
Assert.assertTrue(e.getMessage().contains("Null resource visibility for local resource"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -213,6 +213,6 @@ public class RegistryPathUtils {
|
|||
* @return a string suitable for use in registry paths.
|
||||
*/
|
||||
public static String encodeYarnID(String yarnId) {
|
||||
return yarnId.replace("_", "-");
|
||||
return yarnId.replace("container", "ctr").replace("_", "-");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
package org.apache.hadoop.registry.client.types;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -46,7 +46,7 @@ import java.util.Map;
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
@JsonIgnoreProperties(ignoreUnknown = true)
|
||||
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public final class Endpoint implements Cloneable {
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.registry.client.types;
|
|||
|
||||
import com.fasterxml.jackson.annotation.JsonAnyGetter;
|
||||
import com.fasterxml.jackson.annotation.JsonAnySetter;
|
||||
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
|
||||
import com.fasterxml.jackson.annotation.JsonInclude;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -37,7 +37,7 @@ import java.util.Map;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
@JsonSerialize(include = JsonSerialize.Inclusion.NON_NULL)
|
||||
@JsonInclude(JsonInclude.Include.NON_NULL)
|
||||
public class ServiceRecord implements Cloneable {
|
||||
|
||||
/**
|
||||
|
|
|
@ -51,6 +51,10 @@
|
|||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-api</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-yarn-registry</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
<artifactId>jaxb-api</artifactId>
|
||||
|
|
|
@ -1018,6 +1018,12 @@ public class ContainerManagerImpl extends CompositeService implements
|
|||
if (rsrc.getValue() == null || rsrc.getValue().getResource() == null) {
|
||||
throw new YarnException(
|
||||
"Null resource URL for local resource " + rsrc.getKey() + " : " + rsrc.getValue());
|
||||
} else if (rsrc.getValue().getType() == null) {
|
||||
throw new YarnException(
|
||||
"Null resource type for local resource " + rsrc.getKey() + " : " + rsrc.getValue());
|
||||
} else if (rsrc.getValue().getVisibility() == null) {
|
||||
throw new YarnException(
|
||||
"Null resource visibility for local resource " + rsrc.getKey() + " : " + rsrc.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -101,6 +102,11 @@ import static org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.r
|
|||
* property.
|
||||
* </li>
|
||||
* <li>
|
||||
* {@code YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME} sets the
|
||||
* hostname to be used by the Docker container. If not specified, a
|
||||
* hostname will be derived from the container ID.
|
||||
* </li>
|
||||
* <li>
|
||||
* {@code YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER}
|
||||
* controls whether the Docker container is a privileged container. In order
|
||||
* to use privileged containers, the
|
||||
|
@ -134,6 +140,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
"^(([a-zA-Z0-9.-]+)(:\\d+)?/)?([a-z0-9_./-]+)(:[\\w.-]+)?$";
|
||||
private static final Pattern dockerImagePattern =
|
||||
Pattern.compile(DOCKER_IMAGE_PATTERN);
|
||||
public static final String HOSTNAME_PATTERN =
|
||||
"^[a-zA-Z0-9][a-zA-Z0-9_.-]+$";
|
||||
private static final Pattern hostnamePattern = Pattern.compile(
|
||||
HOSTNAME_PATTERN);
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public static final String ENV_DOCKER_CONTAINER_IMAGE =
|
||||
|
@ -147,6 +157,10 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
@InterfaceAudience.Private
|
||||
public static final String ENV_DOCKER_CONTAINER_NETWORK =
|
||||
"YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK";
|
||||
@InterfaceAudience.Private
|
||||
public static final String ENV_DOCKER_CONTAINER_HOSTNAME =
|
||||
"YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_HOSTNAME";
|
||||
@InterfaceAudience.Private
|
||||
public static final String ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER =
|
||||
"YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER";
|
||||
@InterfaceAudience.Private
|
||||
|
@ -211,9 +225,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
this.privilegedOperationExecutor = privilegedOperationExecutor;
|
||||
|
||||
if (cGroupsHandler == null) {
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("cGroupsHandler is null - cgroups not in use.");
|
||||
}
|
||||
LOG.info("cGroupsHandler is null - cgroups not in use.");
|
||||
} else {
|
||||
this.cGroupsHandler = cGroupsHandler;
|
||||
}
|
||||
|
@ -267,6 +279,29 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
throw new ContainerExecutionException(msg);
|
||||
}
|
||||
|
||||
public static void validateHostname(String hostname) throws
|
||||
ContainerExecutionException {
|
||||
if (hostname != null && !hostname.isEmpty()) {
|
||||
if (!hostnamePattern.matcher(hostname).matches()) {
|
||||
throw new ContainerExecutionException("Hostname '" + hostname
|
||||
+ "' doesn't match docker hostname pattern");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Set a DNS friendly hostname. */
|
||||
private void setHostname(DockerRunCommand runCommand, String
|
||||
containerIdStr, String name)
|
||||
throws ContainerExecutionException {
|
||||
if (name == null || name.isEmpty()) {
|
||||
name = RegistryPathUtils.encodeYarnID(containerIdStr);
|
||||
validateHostname(name);
|
||||
}
|
||||
|
||||
LOG.info("setting hostname in container to: " + name);
|
||||
runCommand.setHostname(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* If CGROUPS in enabled and not set to none, then set the CGROUP parent for
|
||||
* the command instance.
|
||||
|
@ -343,10 +378,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("Privileged container requested for : " + container
|
||||
.getContainerId().toString());
|
||||
}
|
||||
LOG.info("Privileged container requested for : " + container
|
||||
.getContainerId().toString());
|
||||
|
||||
//Ok, so we have been asked to run a privileged container. Security
|
||||
// checks need to be run. Each violation is an error.
|
||||
|
@ -375,10 +408,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
throw new ContainerExecutionException(message);
|
||||
}
|
||||
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("All checks pass. Launching privileged container for : "
|
||||
+ container.getContainerId().toString());
|
||||
}
|
||||
LOG.info("All checks pass. Launching privileged container for : "
|
||||
+ container.getContainerId().toString());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -413,6 +444,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
.getEnvironment();
|
||||
String imageName = environment.get(ENV_DOCKER_CONTAINER_IMAGE);
|
||||
String network = environment.get(ENV_DOCKER_CONTAINER_NETWORK);
|
||||
String hostname = environment.get(ENV_DOCKER_CONTAINER_HOSTNAME);
|
||||
|
||||
if(network == null || network.isEmpty()) {
|
||||
network = defaultNetwork;
|
||||
|
@ -420,6 +452,8 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
|
||||
validateContainerNetworkType(network);
|
||||
|
||||
validateHostname(hostname);
|
||||
|
||||
validateImageName(imageName);
|
||||
|
||||
String containerIdStr = container.getContainerId().toString();
|
||||
|
@ -450,12 +484,13 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
runAsUser, imageName)
|
||||
.detachOnRun()
|
||||
.setContainerWorkDir(containerWorkDir.toString())
|
||||
.setNetworkType(network)
|
||||
.setCapabilities(capabilities)
|
||||
.setNetworkType(network);
|
||||
setHostname(runCommand, containerIdStr, hostname);
|
||||
runCommand.setCapabilities(capabilities)
|
||||
.addMountLocation(CGROUPS_ROOT_DIRECTORY,
|
||||
CGROUPS_ROOT_DIRECTORY + ":ro", false);
|
||||
List<String> allDirs = new ArrayList<>(containerLocalDirs);
|
||||
|
||||
List<String> allDirs = new ArrayList<>(containerLocalDirs);
|
||||
allDirs.addAll(filecacheDirs);
|
||||
allDirs.add(containerWorkDir.toString());
|
||||
allDirs.addAll(containerLogDirs);
|
||||
|
@ -493,9 +528,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime {
|
|||
ENV_DOCKER_CONTAINER_RUN_OVERRIDE_DISABLE);
|
||||
|
||||
if (disableOverride != null && disableOverride.equals("true")) {
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("command override disabled");
|
||||
}
|
||||
LOG.info("command override disabled");
|
||||
} else {
|
||||
List<String> overrideCommands = new ArrayList<>();
|
||||
Path launchDst =
|
||||
|
|
|
@ -91,6 +91,12 @@ public class DockerRunCommand extends DockerCommand {
|
|||
|
||||
return this;
|
||||
}
|
||||
|
||||
public DockerRunCommand setHostname(String hostname) {
|
||||
super.addCommandArguments("--hostname=" + hostname);
|
||||
return this;
|
||||
}
|
||||
|
||||
public DockerRunCommand addDevice(String sourceDevice, String
|
||||
destinationDevice) {
|
||||
super.addCommandArguments("--device=" + sourceDevice + ":" +
|
||||
|
|
|
@ -1215,6 +1215,7 @@ char* sanitize_docker_command(const char *line) {
|
|||
{"rm", no_argument, 0, 'r' },
|
||||
{"workdir", required_argument, 0, 'w' },
|
||||
{"net", required_argument, 0, 'e' },
|
||||
{"hostname", required_argument, 0, 'h' },
|
||||
{"cgroup-parent", required_argument, 0, 'g' },
|
||||
{"privileged", no_argument, 0, 'p' },
|
||||
{"cap-add", required_argument, 0, 'a' },
|
||||
|
@ -1256,6 +1257,9 @@ char* sanitize_docker_command(const char *line) {
|
|||
case 'e':
|
||||
quote_and_append_arg(&output, &output_size, "--net=", optarg);
|
||||
break;
|
||||
case 'h':
|
||||
quote_and_append_arg(&output, &output_size, "--hostname=", optarg);
|
||||
break;
|
||||
case 'v':
|
||||
quote_and_append_arg(&output, &output_size, "-v ", optarg);
|
||||
break;
|
||||
|
|
|
@ -1088,17 +1088,17 @@ void test_trim_function() {
|
|||
void test_sanitize_docker_command() {
|
||||
|
||||
char *input[] = {
|
||||
"run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu' || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=$CID --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run --name=cname --user=nobody -d --workdir=/yarn/local/cdir --privileged --rm --device=/sys/fs/cgroup/device:/sys/fs/cgroup/device --detach=true --cgroup-parent=/sys/fs/cgroup/cpu/yarn/cid --net=host --hostname=test.host.name --cap-drop=ALL --cap-add=SYS_CHROOT --cap-add=MKNOD --cap-add=SETFCAP --cap-add=SETPCAP --cap-add=FSETID --cap-add=CHOWN --cap-add=AUDIT_WRITE --cap-add=SETGID --cap-add=NET_RAW --cap-add=FOWNER --cap-add=SETUID --cap-add=DAC_OVERRIDE --cap-add=KILL --cap-add=NET_BIND_SERVICE -v /sys/fs/cgroup:/sys/fs/cgroup:ro -v /yarn/local/cdir:/yarn/local/cdir -v /yarn/local/usercache/test/:/yarn/local/usercache/test/ ubuntu' || touch /tmp/file # bash /yarn/local/usercache/test/appcache/aid/cid/launch_container.sh",
|
||||
"run ''''''''"
|
||||
};
|
||||
char *expected_output[] = {
|
||||
"run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='$CID' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu'\"'\"'' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='$CID' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run --name='cname' --user='nobody' -d --workdir='/yarn/local/cdir' --privileged --rm --device='/sys/fs/cgroup/device:/sys/fs/cgroup/device' --detach='true' --cgroup-parent='/sys/fs/cgroup/cpu/yarn/cid' --net='host' --hostname='test.host.name' --cap-drop='ALL' --cap-add='SYS_CHROOT' --cap-add='MKNOD' --cap-add='SETFCAP' --cap-add='SETPCAP' --cap-add='FSETID' --cap-add='CHOWN' --cap-add='AUDIT_WRITE' --cap-add='SETGID' --cap-add='NET_RAW' --cap-add='FOWNER' --cap-add='SETUID' --cap-add='DAC_OVERRIDE' --cap-add='KILL' --cap-add='NET_BIND_SERVICE' -v '/sys/fs/cgroup:/sys/fs/cgroup:ro' -v '/yarn/local/cdir:/yarn/local/cdir' -v '/yarn/local/usercache/test/:/yarn/local/usercache/test/' 'ubuntu'\"'\"'' '||' 'touch' '/tmp/file' '#' 'bash' '/yarn/local/usercache/test/appcache/aid/cid/launch_container.sh' ",
|
||||
"run ''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"''\"'\"'' ",
|
||||
};
|
||||
|
||||
|
|
|
@ -1899,4 +1899,94 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
|
||||
.contains("Null resource URL for local resource"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStartContainerFailureWithNullTypeLocalResource()
|
||||
throws Exception {
|
||||
containerManager.start();
|
||||
LocalResource rsrc_alpha =
|
||||
recordFactory.newRecordInstance(LocalResource.class);
|
||||
rsrc_alpha.setResource(URL.fromPath(new Path("./")));
|
||||
rsrc_alpha.setSize(-1);
|
||||
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
|
||||
rsrc_alpha.setType(null);
|
||||
rsrc_alpha.setTimestamp(System.currentTimeMillis());
|
||||
Map<String, LocalResource> localResources =
|
||||
new HashMap<String, LocalResource>();
|
||||
localResources.put("null_type_resource", rsrc_alpha);
|
||||
ContainerLaunchContext containerLaunchContext =
|
||||
recordFactory.newRecordInstance(ContainerLaunchContext.class);
|
||||
ContainerLaunchContext spyContainerLaunchContext =
|
||||
Mockito.spy(containerLaunchContext);
|
||||
Mockito.when(spyContainerLaunchContext.getLocalResources())
|
||||
.thenReturn(localResources);
|
||||
|
||||
ContainerId cId = createContainerId(0);
|
||||
String user = "start_container_fail";
|
||||
Token containerToken =
|
||||
createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(),
|
||||
user, context.getContainerTokenSecretManager());
|
||||
StartContainerRequest request = StartContainerRequest
|
||||
.newInstance(spyContainerLaunchContext, containerToken);
|
||||
|
||||
// start containers
|
||||
List<StartContainerRequest> startRequest =
|
||||
new ArrayList<StartContainerRequest>();
|
||||
startRequest.add(request);
|
||||
StartContainersRequest requestList =
|
||||
StartContainersRequest.newInstance(startRequest);
|
||||
|
||||
StartContainersResponse response =
|
||||
containerManager.startContainers(requestList);
|
||||
Assert.assertTrue(response.getFailedRequests().size() == 1);
|
||||
Assert.assertTrue(response.getSuccessfullyStartedContainers().size() == 0);
|
||||
Assert.assertTrue(response.getFailedRequests().containsKey(cId));
|
||||
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
|
||||
.contains("Null resource type for local resource"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStartContainerFailureWithNullVisibilityLocalResource()
|
||||
throws Exception {
|
||||
containerManager.start();
|
||||
LocalResource rsrc_alpha =
|
||||
recordFactory.newRecordInstance(LocalResource.class);
|
||||
rsrc_alpha.setResource(URL.fromPath(new Path("./")));
|
||||
rsrc_alpha.setSize(-1);
|
||||
rsrc_alpha.setVisibility(null);
|
||||
rsrc_alpha.setType(LocalResourceType.FILE);
|
||||
rsrc_alpha.setTimestamp(System.currentTimeMillis());
|
||||
Map<String, LocalResource> localResources =
|
||||
new HashMap<String, LocalResource>();
|
||||
localResources.put("null_visibility_resource", rsrc_alpha);
|
||||
ContainerLaunchContext containerLaunchContext =
|
||||
recordFactory.newRecordInstance(ContainerLaunchContext.class);
|
||||
ContainerLaunchContext spyContainerLaunchContext =
|
||||
Mockito.spy(containerLaunchContext);
|
||||
Mockito.when(spyContainerLaunchContext.getLocalResources())
|
||||
.thenReturn(localResources);
|
||||
|
||||
ContainerId cId = createContainerId(0);
|
||||
String user = "start_container_fail";
|
||||
Token containerToken =
|
||||
createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(),
|
||||
user, context.getContainerTokenSecretManager());
|
||||
StartContainerRequest request = StartContainerRequest
|
||||
.newInstance(spyContainerLaunchContext, containerToken);
|
||||
|
||||
// start containers
|
||||
List<StartContainerRequest> startRequest =
|
||||
new ArrayList<StartContainerRequest>();
|
||||
startRequest.add(request);
|
||||
StartContainersRequest requestList =
|
||||
StartContainersRequest.newInstance(startRequest);
|
||||
|
||||
StartContainersResponse response =
|
||||
containerManager.startContainers(requestList);
|
||||
Assert.assertTrue(response.getFailedRequests().size() == 1);
|
||||
Assert.assertTrue(response.getSuccessfullyStartedContainers().size() == 0);
|
||||
Assert.assertTrue(response.getFailedRequests().containsKey(cId));
|
||||
Assert.assertTrue(response.getFailedRequests().get(cId).getMessage()
|
||||
.contains("Null resource visibility for local resource"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
|
@ -69,6 +70,7 @@ public class TestDockerContainerRuntime {
|
|||
private PrivilegedOperationExecutor mockExecutor;
|
||||
private CGroupsHandler mockCGroupsHandler;
|
||||
private String containerId;
|
||||
private String defaultHostname;
|
||||
private Container container;
|
||||
private ContainerId cId;
|
||||
private ContainerLaunchContext context;
|
||||
|
@ -108,6 +110,7 @@ public class TestDockerContainerRuntime {
|
|||
.mock(PrivilegedOperationExecutor.class);
|
||||
mockCGroupsHandler = Mockito.mock(CGroupsHandler.class);
|
||||
containerId = "container_id";
|
||||
defaultHostname = RegistryPathUtils.encodeYarnID(containerId);
|
||||
container = mock(Container.class);
|
||||
cId = mock(ContainerId.class);
|
||||
context = mock(ContainerLaunchContext.class);
|
||||
|
@ -287,6 +290,7 @@ public class TestDockerContainerRuntime {
|
|||
.append("--user=%2$s -d ")
|
||||
.append("--workdir=%3$s ")
|
||||
.append("--net=host ")
|
||||
.append("--hostname=" + defaultHostname + " ")
|
||||
.append(getExpectedTestCapabilitiesArgumentString())
|
||||
.append(getExpectedCGroupsMountString())
|
||||
.append("-v %4$s:%4$s ")
|
||||
|
@ -365,7 +369,7 @@ public class TestDockerContainerRuntime {
|
|||
String disallowedNetwork = "sdn" + Integer.toString(randEngine.nextInt());
|
||||
|
||||
try {
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
|
||||
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
|
||||
disallowedNetwork);
|
||||
runtime.launchContainer(builder.build());
|
||||
Assert.fail("Network was expected to be disallowed: " +
|
||||
|
@ -378,8 +382,11 @@ public class TestDockerContainerRuntime {
|
|||
.DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_NETWORKS.length;
|
||||
String allowedNetwork = YarnConfiguration
|
||||
.DEFAULT_NM_DOCKER_ALLOWED_CONTAINER_NETWORKS[randEngine.nextInt(size)];
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
|
||||
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
|
||||
allowedNetwork);
|
||||
String expectedHostname = "test.hostname";
|
||||
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_HOSTNAME,
|
||||
expectedHostname);
|
||||
|
||||
//this should cause no failures.
|
||||
|
||||
|
@ -393,6 +400,7 @@ public class TestDockerContainerRuntime {
|
|||
new StringBuffer("run --name=%1$s ").append("--user=%2$s -d ")
|
||||
.append("--workdir=%3$s ")
|
||||
.append("--net=" + allowedNetwork + " ")
|
||||
.append("--hostname=" + expectedHostname + " ")
|
||||
.append(getExpectedTestCapabilitiesArgumentString())
|
||||
.append(getExpectedCGroupsMountString())
|
||||
.append("-v %4$s:%4$s ").append("-v %5$s:%5$s ")
|
||||
|
@ -448,6 +456,7 @@ public class TestDockerContainerRuntime {
|
|||
new StringBuffer("run --name=%1$s ").append("--user=%2$s -d ")
|
||||
.append("--workdir=%3$s ")
|
||||
.append("--net=" + customNetwork1 + " ")
|
||||
.append("--hostname=" + defaultHostname + " ")
|
||||
.append(getExpectedTestCapabilitiesArgumentString())
|
||||
.append(getExpectedCGroupsMountString())
|
||||
.append("-v %4$s:%4$s ").append("-v %5$s:%5$s ")
|
||||
|
@ -471,7 +480,7 @@ public class TestDockerContainerRuntime {
|
|||
//now set an explicit (non-default) allowedNetwork and ensure that it is
|
||||
// used.
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
|
||||
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
|
||||
customNetwork2);
|
||||
runtime.launchContainer(builder.build());
|
||||
|
||||
|
@ -485,6 +494,7 @@ public class TestDockerContainerRuntime {
|
|||
new StringBuffer("run --name=%1$s ").append("--user=%2$s -d ")
|
||||
.append("--workdir=%3$s ")
|
||||
.append("--net=" + customNetwork2 + " ")
|
||||
.append("--hostname=" + defaultHostname + " ")
|
||||
.append(getExpectedTestCapabilitiesArgumentString())
|
||||
.append(getExpectedCGroupsMountString())
|
||||
.append("-v %4$s:%4$s ").append("-v %5$s:%5$s ")
|
||||
|
@ -505,7 +515,7 @@ public class TestDockerContainerRuntime {
|
|||
|
||||
//disallowed network should trigger a launch failure
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_CONTAINER_NETWORK",
|
||||
env.put(DockerLinuxContainerRuntime.ENV_DOCKER_CONTAINER_NETWORK,
|
||||
customNetwork3);
|
||||
try {
|
||||
runtime.launchContainer(builder.build());
|
||||
|
@ -524,8 +534,8 @@ public class TestDockerContainerRuntime {
|
|||
mockExecutor, mockCGroupsHandler);
|
||||
runtime.initialize(conf);
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
|
||||
"invalid-value");
|
||||
env.put(DockerLinuxContainerRuntime
|
||||
.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "invalid-value");
|
||||
runtime.launchContainer(builder.build());
|
||||
|
||||
PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
|
||||
|
@ -552,8 +562,8 @@ public class TestDockerContainerRuntime {
|
|||
mockExecutor, mockCGroupsHandler);
|
||||
runtime.initialize(conf);
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
|
||||
"true");
|
||||
env.put(DockerLinuxContainerRuntime
|
||||
.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
|
||||
|
||||
try {
|
||||
runtime.launchContainer(builder.build());
|
||||
|
@ -575,8 +585,8 @@ public class TestDockerContainerRuntime {
|
|||
mockExecutor, mockCGroupsHandler);
|
||||
runtime.initialize(conf);
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
|
||||
"true");
|
||||
env.put(DockerLinuxContainerRuntime
|
||||
.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
|
||||
//By default
|
||||
// yarn.nodemanager.runtime.linux.docker.privileged-containers.acl
|
||||
// is empty. So we expect this launch to fail.
|
||||
|
@ -605,8 +615,8 @@ public class TestDockerContainerRuntime {
|
|||
mockExecutor, mockCGroupsHandler);
|
||||
runtime.initialize(conf);
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
|
||||
"true");
|
||||
env.put(DockerLinuxContainerRuntime
|
||||
.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
|
||||
|
||||
try {
|
||||
runtime.launchContainer(builder.build());
|
||||
|
@ -632,8 +642,8 @@ public class TestDockerContainerRuntime {
|
|||
mockExecutor, mockCGroupsHandler);
|
||||
runtime.initialize(conf);
|
||||
|
||||
env.put("YARN_CONTAINER_RUNTIME_DOCKER_RUN_PRIVILEGED_CONTAINER",
|
||||
"true");
|
||||
env.put(DockerLinuxContainerRuntime
|
||||
.ENV_DOCKER_CONTAINER_RUN_PRIVILEGED_CONTAINER, "true");
|
||||
|
||||
runtime.launchContainer(builder.build());
|
||||
PrivilegedOperation op = capturePrivilegedOperationAndVerifyArgs();
|
||||
|
@ -927,4 +937,24 @@ public class TestDockerContainerRuntime {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDockerHostnamePattern() throws Exception {
|
||||
String[] validNames = {"ab", "a.b.c.d", "a1-b.cd.ef", "0AB.", "C_D-"};
|
||||
|
||||
String[] invalidNames = {"a", "a#.b.c", "-a.b.c", "a@b.c", "a/b/c"};
|
||||
|
||||
for (String name : validNames) {
|
||||
DockerLinuxContainerRuntime.validateHostname(name);
|
||||
}
|
||||
|
||||
for (String name : invalidNames) {
|
||||
try {
|
||||
DockerLinuxContainerRuntime.validateHostname(name);
|
||||
Assert.fail(name + " is an invalid hostname and should fail the regex");
|
||||
} catch (ContainerExecutionException ce) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
|
|||
new HAServiceProtocol.StateChangeRequestInfo(
|
||||
HAServiceProtocol.RequestSource.REQUEST_BY_ZKFC);
|
||||
|
||||
private RMContext rmContext;
|
||||
private ResourceManager rm;
|
||||
|
||||
private byte[] localActiveNodeInfo;
|
||||
private ActiveStandbyElector elector;
|
||||
|
@ -66,9 +66,9 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
|
|||
@VisibleForTesting
|
||||
final Object zkDisconnectLock = new Object();
|
||||
|
||||
ActiveStandbyElectorBasedElectorService(RMContext rmContext) {
|
||||
ActiveStandbyElectorBasedElectorService(ResourceManager rm) {
|
||||
super(ActiveStandbyElectorBasedElectorService.class.getName());
|
||||
this.rmContext = rmContext;
|
||||
this.rm = rm;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -140,7 +140,7 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
|
|||
cancelDisconnectTimer();
|
||||
|
||||
try {
|
||||
rmContext.getRMAdminService().transitionToActive(req);
|
||||
rm.getRMContext().getRMAdminService().transitionToActive(req);
|
||||
} catch (Exception e) {
|
||||
throw new ServiceFailedException("RM could not transition to Active", e);
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
|
|||
cancelDisconnectTimer();
|
||||
|
||||
try {
|
||||
rmContext.getRMAdminService().transitionToStandby(req);
|
||||
rm.getRMContext().getRMAdminService().transitionToStandby(req);
|
||||
} catch (Exception e) {
|
||||
LOG.error("RM could not transition to Standby", e);
|
||||
}
|
||||
|
@ -205,7 +205,7 @@ public class ActiveStandbyElectorBasedElectorService extends AbstractService
|
|||
@SuppressWarnings(value = "unchecked")
|
||||
@Override
|
||||
public void notifyFatalError(String errorMessage) {
|
||||
rmContext.getDispatcher().getEventHandler().handle(
|
||||
rm.getRMContext().getDispatcher().getEventHandler().handle(
|
||||
new RMFatalEvent(RMFatalEventType.EMBEDDED_ELECTOR_FAILED,
|
||||
errorMessage));
|
||||
}
|
||||
|
|
|
@ -102,7 +102,6 @@ public class AdminService extends CompositeService implements
|
|||
|
||||
private static final Log LOG = LogFactory.getLog(AdminService.class);
|
||||
|
||||
private final RMContext rmContext;
|
||||
private final ResourceManager rm;
|
||||
private String rmId;
|
||||
|
||||
|
@ -123,16 +122,16 @@ public class AdminService extends CompositeService implements
|
|||
@VisibleForTesting
|
||||
boolean isCentralizedNodeLabelConfiguration = true;
|
||||
|
||||
public AdminService(ResourceManager rm, RMContext rmContext) {
|
||||
public AdminService(ResourceManager rm) {
|
||||
super(AdminService.class.getName());
|
||||
this.rm = rm;
|
||||
this.rmContext = rmContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void serviceInit(Configuration conf) throws Exception {
|
||||
autoFailoverEnabled =
|
||||
rmContext.isHAEnabled() && HAUtil.isAutomaticFailoverEnabled(conf);
|
||||
rm.getRMContext().isHAEnabled()
|
||||
&& HAUtil.isAutomaticFailoverEnabled(conf);
|
||||
|
||||
masterServiceBindAddress = conf.getSocketAddr(
|
||||
YarnConfiguration.RM_BIND_HOST,
|
||||
|
@ -189,7 +188,7 @@ public class AdminService extends CompositeService implements
|
|||
RMPolicyProvider.getInstance());
|
||||
}
|
||||
|
||||
if (rmContext.isHAEnabled()) {
|
||||
if (rm.getRMContext().isHAEnabled()) {
|
||||
RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
|
||||
ProtobufRpcEngine.class);
|
||||
|
||||
|
@ -265,7 +264,7 @@ public class AdminService extends CompositeService implements
|
|||
}
|
||||
|
||||
private synchronized boolean isRMActive() {
|
||||
return HAServiceState.ACTIVE == rmContext.getHAServiceState();
|
||||
return HAServiceState.ACTIVE == rm.getRMContext().getHAServiceState();
|
||||
}
|
||||
|
||||
private void throwStandbyException() throws StandbyException {
|
||||
|
@ -304,7 +303,7 @@ public class AdminService extends CompositeService implements
|
|||
// call all refresh*s for active RM to get the updated configurations.
|
||||
refreshAll();
|
||||
} catch (Exception e) {
|
||||
rmContext
|
||||
rm.getRMContext()
|
||||
.getDispatcher()
|
||||
.getEventHandler()
|
||||
.handle(
|
||||
|
@ -363,7 +362,7 @@ public class AdminService extends CompositeService implements
|
|||
@Override
|
||||
public synchronized HAServiceStatus getServiceStatus() throws IOException {
|
||||
checkAccess("getServiceState");
|
||||
HAServiceState haState = rmContext.getHAServiceState();
|
||||
HAServiceState haState = rm.getRMContext().getHAServiceState();
|
||||
HAServiceStatus ret = new HAServiceStatus(haState);
|
||||
if (isRMActive() || haState == HAServiceProtocol.HAServiceState.STANDBY) {
|
||||
ret.setReadyToBecomeActive();
|
||||
|
@ -395,11 +394,12 @@ public class AdminService extends CompositeService implements
|
|||
}
|
||||
|
||||
private void refreshQueues() throws IOException, YarnException {
|
||||
rmContext.getScheduler().reinitialize(getConfig(), this.rmContext);
|
||||
rm.getRMContext().getScheduler().reinitialize(getConfig(),
|
||||
this.rm.getRMContext());
|
||||
// refresh the reservation system
|
||||
ReservationSystem rSystem = rmContext.getReservationSystem();
|
||||
ReservationSystem rSystem = rm.getRMContext().getReservationSystem();
|
||||
if (rSystem != null) {
|
||||
rSystem.reinitialize(getConfig(), rmContext);
|
||||
rSystem.reinitialize(getConfig(), rm.getRMContext());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,14 +418,14 @@ public class AdminService extends CompositeService implements
|
|||
YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
|
||||
switch (request.getDecommissionType()) {
|
||||
case NORMAL:
|
||||
rmContext.getNodesListManager().refreshNodes(conf);
|
||||
rm.getRMContext().getNodesListManager().refreshNodes(conf);
|
||||
break;
|
||||
case GRACEFUL:
|
||||
rmContext.getNodesListManager().refreshNodesGracefully(
|
||||
rm.getRMContext().getNodesListManager().refreshNodesGracefully(
|
||||
conf, request.getDecommissionTimeout());
|
||||
break;
|
||||
case FORCEFUL:
|
||||
rmContext.getNodesListManager().refreshNodesForcefully();
|
||||
rm.getRMContext().getNodesListManager().refreshNodesForcefully();
|
||||
break;
|
||||
}
|
||||
RMAuditLogger.logSuccess(user.getShortUserName(), operation,
|
||||
|
@ -440,7 +440,7 @@ public class AdminService extends CompositeService implements
|
|||
Configuration conf =
|
||||
getConfiguration(new Configuration(false),
|
||||
YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
|
||||
rmContext.getNodesListManager().refreshNodes(conf);
|
||||
rm.getRMContext().getNodesListManager().refreshNodes(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -559,10 +559,11 @@ public class AdminService extends CompositeService implements
|
|||
Configuration conf =
|
||||
getConfiguration(new Configuration(false),
|
||||
YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
|
||||
rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
|
||||
rmContext.getApplicationMasterService().refreshServiceAcls(
|
||||
rm.getRMContext().getClientRMService().refreshServiceAcls(conf,
|
||||
policyProvider);
|
||||
rm.getRMContext().getApplicationMasterService().refreshServiceAcls(
|
||||
conf, policyProvider);
|
||||
rmContext.getResourceTrackerService().refreshServiceAcls(
|
||||
rm.getRMContext().getResourceTrackerService().refreshServiceAcls(
|
||||
conf, policyProvider);
|
||||
}
|
||||
|
||||
|
@ -601,7 +602,7 @@ public class AdminService extends CompositeService implements
|
|||
// if any invalid nodes, throw exception instead of partially updating
|
||||
// valid nodes.
|
||||
for (NodeId nodeId : nodeIds) {
|
||||
RMNode node = this.rmContext.getRMNodes().get(nodeId);
|
||||
RMNode node = this.rm.getRMContext().getRMNodes().get(nodeId);
|
||||
if (node == null) {
|
||||
LOG.error("Resource update get failed on all nodes due to change "
|
||||
+ "resource on an unrecognized node: " + nodeId);
|
||||
|
@ -619,14 +620,14 @@ public class AdminService extends CompositeService implements
|
|||
for (Map.Entry<NodeId, ResourceOption> entry : nodeResourceMap.entrySet()) {
|
||||
ResourceOption newResourceOption = entry.getValue();
|
||||
NodeId nodeId = entry.getKey();
|
||||
RMNode node = this.rmContext.getRMNodes().get(nodeId);
|
||||
RMNode node = this.rm.getRMContext().getRMNodes().get(nodeId);
|
||||
|
||||
if (node == null) {
|
||||
LOG.warn("Resource update get failed on an unrecognized node: " + nodeId);
|
||||
allSuccess = false;
|
||||
} else {
|
||||
// update resource to RMNode
|
||||
this.rmContext.getDispatcher().getEventHandler()
|
||||
this.rm.getRMContext().getDispatcher().getEventHandler()
|
||||
.handle(new RMNodeResourceUpdateEvent(nodeId, newResourceOption));
|
||||
LOG.info("Update resource on node(" + node.getNodeID()
|
||||
+ ") with resource(" + newResourceOption.toString() + ")");
|
||||
|
@ -661,7 +662,8 @@ public class AdminService extends CompositeService implements
|
|||
DynamicResourceConfiguration newConf;
|
||||
|
||||
InputStream drInputStream =
|
||||
this.rmContext.getConfigurationProvider().getConfigurationInputStream(
|
||||
this.rm.getRMContext().getConfigurationProvider()
|
||||
.getConfigurationInputStream(
|
||||
configuration, YarnConfiguration.DR_CONFIGURATION_FILE);
|
||||
|
||||
if (drInputStream != null) {
|
||||
|
@ -679,7 +681,7 @@ public class AdminService extends CompositeService implements
|
|||
updateNodeResource(updateRequest);
|
||||
}
|
||||
// refresh dynamic resource in ResourceTrackerService
|
||||
this.rmContext.getResourceTrackerService().
|
||||
this.rm.getRMContext().getResourceTrackerService().
|
||||
updateDynamicResourceConfiguration(newConf);
|
||||
RMAuditLogger.logSuccess(user.getShortUserName(), operation,
|
||||
"AdminService");
|
||||
|
@ -692,7 +694,8 @@ public class AdminService extends CompositeService implements
|
|||
private synchronized Configuration getConfiguration(Configuration conf,
|
||||
String... confFileNames) throws YarnException, IOException {
|
||||
for (String confFileName : confFileNames) {
|
||||
InputStream confFileInputStream = this.rmContext.getConfigurationProvider()
|
||||
InputStream confFileInputStream =
|
||||
this.rm.getRMContext().getConfigurationProvider()
|
||||
.getConfigurationInputStream(conf, confFileName);
|
||||
if (confFileInputStream != null) {
|
||||
conf.addResource(confFileInputStream);
|
||||
|
@ -746,7 +749,7 @@ public class AdminService extends CompositeService implements
|
|||
AddToClusterNodeLabelsResponse response =
|
||||
recordFactory.newRecordInstance(AddToClusterNodeLabelsResponse.class);
|
||||
try {
|
||||
rmContext.getNodeLabelManager()
|
||||
rm.getRMContext().getNodeLabelManager()
|
||||
.addToCluserNodeLabels(request.getNodeLabels());
|
||||
RMAuditLogger.logSuccess(user.getShortUserName(), operation,
|
||||
"AdminService");
|
||||
|
@ -769,7 +772,8 @@ public class AdminService extends CompositeService implements
|
|||
RemoveFromClusterNodeLabelsResponse response =
|
||||
recordFactory.newRecordInstance(RemoveFromClusterNodeLabelsResponse.class);
|
||||
try {
|
||||
rmContext.getNodeLabelManager().removeFromClusterNodeLabels(request.getNodeLabels());
|
||||
rm.getRMContext().getNodeLabelManager()
|
||||
.removeFromClusterNodeLabels(request.getNodeLabels());
|
||||
RMAuditLogger
|
||||
.logSuccess(user.getShortUserName(), operation, "AdminService");
|
||||
return response;
|
||||
|
@ -805,19 +809,20 @@ public class AdminService extends CompositeService implements
|
|||
boolean isKnown = false;
|
||||
// both active and inactive nodes are recognized as known nodes
|
||||
if (requestedNode.getPort() != 0) {
|
||||
if (rmContext.getRMNodes().containsKey(requestedNode)
|
||||
|| rmContext.getInactiveRMNodes().containsKey(requestedNode)) {
|
||||
if (rm.getRMContext().getRMNodes().containsKey(requestedNode) || rm
|
||||
.getRMContext().getInactiveRMNodes().containsKey(requestedNode)) {
|
||||
isKnown = true;
|
||||
}
|
||||
} else {
|
||||
for (NodeId knownNode : rmContext.getRMNodes().keySet()) {
|
||||
for (NodeId knownNode : rm.getRMContext().getRMNodes().keySet()) {
|
||||
if (knownNode.getHost().equals(requestedNode.getHost())) {
|
||||
isKnown = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!isKnown) {
|
||||
for (NodeId knownNode : rmContext.getInactiveRMNodes().keySet()) {
|
||||
for (NodeId knownNode : rm.getRMContext().getInactiveRMNodes()
|
||||
.keySet()) {
|
||||
if (knownNode.getHost().equals(requestedNode.getHost())) {
|
||||
isKnown = true;
|
||||
break;
|
||||
|
@ -841,7 +846,7 @@ public class AdminService extends CompositeService implements
|
|||
}
|
||||
}
|
||||
try {
|
||||
rmContext.getNodeLabelManager().replaceLabelsOnNode(
|
||||
rm.getRMContext().getNodeLabelManager().replaceLabelsOnNode(
|
||||
request.getNodeToLabels());
|
||||
RMAuditLogger
|
||||
.logSuccess(user.getShortUserName(), operation, "AdminService");
|
||||
|
@ -878,7 +883,7 @@ public class AdminService extends CompositeService implements
|
|||
|
||||
checkRMStatus(user.getShortUserName(), operation, msg);
|
||||
|
||||
Set<NodeId> decommissioningNodes = rmContext.getNodesListManager()
|
||||
Set<NodeId> decommissioningNodes = rm.getRMContext().getNodesListManager()
|
||||
.checkForDecommissioningNodes();
|
||||
RMAuditLogger.logSuccess(user.getShortUserName(), operation,
|
||||
"AdminService");
|
||||
|
@ -914,6 +919,6 @@ public class AdminService extends CompositeService implements
|
|||
getConfiguration(new Configuration(false),
|
||||
YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);
|
||||
|
||||
rmContext.getScheduler().setClusterMaxPriority(conf);
|
||||
rm.getRMContext().getScheduler().setClusterMaxPriority(conf);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,14 +45,12 @@ public class CuratorBasedElectorService extends AbstractService
|
|||
LogFactory.getLog(CuratorBasedElectorService.class);
|
||||
private LeaderLatch leaderLatch;
|
||||
private CuratorFramework curator;
|
||||
private RMContext rmContext;
|
||||
private String latchPath;
|
||||
private String rmId;
|
||||
private ResourceManager rm;
|
||||
|
||||
public CuratorBasedElectorService(RMContext rmContext, ResourceManager rm) {
|
||||
public CuratorBasedElectorService(ResourceManager rm) {
|
||||
super(CuratorBasedElectorService.class.getName());
|
||||
this.rmContext = rmContext;
|
||||
this.rm = rm;
|
||||
}
|
||||
|
||||
|
@ -102,7 +100,8 @@ public class CuratorBasedElectorService extends AbstractService
|
|||
public void isLeader() {
|
||||
LOG.info(rmId + "is elected leader, transitioning to active");
|
||||
try {
|
||||
rmContext.getRMAdminService().transitionToActive(
|
||||
rm.getRMContext().getRMAdminService()
|
||||
.transitionToActive(
|
||||
new HAServiceProtocol.StateChangeRequestInfo(
|
||||
HAServiceProtocol.RequestSource.REQUEST_BY_ZKFC));
|
||||
} catch (Exception e) {
|
||||
|
@ -123,7 +122,8 @@ public class CuratorBasedElectorService extends AbstractService
|
|||
public void notLeader() {
|
||||
LOG.info(rmId + " relinquish leadership");
|
||||
try {
|
||||
rmContext.getRMAdminService().transitionToStandby(
|
||||
rm.getRMContext().getRMAdminService()
|
||||
.transitionToStandby(
|
||||
new HAServiceProtocol.StateChangeRequestInfo(
|
||||
HAServiceProtocol.RequestSource.REQUEST_BY_ZKFC));
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -42,20 +42,20 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor.RMAppLifetime
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
|
||||
import org.apache.hadoop.yarn.util.Clock;
|
||||
import org.apache.hadoop.yarn.util.SystemClock;
|
||||
|
||||
/**
|
||||
* The RMActiveServiceContext is the class that maintains all the
|
||||
* RMActiveService contexts.This is expected to be used only by ResourceManager
|
||||
* and RMContext.
|
||||
* The RMActiveServiceContext is the class that maintains <b>Active</b> service
|
||||
* context. Services that need to run only on the Active RM. This is expected to
|
||||
* be used only by RMContext.
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
|
@ -94,7 +94,6 @@ public class RMActiveServiceContext {
|
|||
private NodesListManager nodesListManager;
|
||||
private ResourceTrackerService resourceTrackerService;
|
||||
private ApplicationMasterService applicationMasterService;
|
||||
private RMTimelineCollectorManager timelineCollectorManager;
|
||||
|
||||
private RMNodeLabelsManager nodeLabelManager;
|
||||
private RMDelegatedNodeLabelsUpdater rmDelegatedNodeLabelsUpdater;
|
||||
|
@ -107,6 +106,7 @@ public class RMActiveServiceContext {
|
|||
private PlacementManager queuePlacementManager = null;
|
||||
|
||||
private RMAppLifetimeMonitor rmAppLifetimeMonitor;
|
||||
private QueueLimitCalculator queueLimitCalculator;
|
||||
|
||||
public RMActiveServiceContext() {
|
||||
queuePlacementManager = new PlacementManager();
|
||||
|
@ -372,19 +372,6 @@ public class RMActiveServiceContext {
|
|||
return this.isWorkPreservingRecoveryEnabled;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public RMTimelineCollectorManager getRMTimelineCollectorManager() {
|
||||
return timelineCollectorManager;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public void setRMTimelineCollectorManager(
|
||||
RMTimelineCollectorManager collectorManager) {
|
||||
this.timelineCollectorManager = collectorManager;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public long getEpoch() {
|
||||
|
@ -483,4 +470,17 @@ public class RMActiveServiceContext {
|
|||
public RMAppLifetimeMonitor getRMAppLifetimeMonitor() {
|
||||
return this.rmAppLifetimeMonitor;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public QueueLimitCalculator getNodeManagerQueueLimitCalculator() {
|
||||
return this.queueLimitCalculator;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public void setContainerQueueLimitCalculator(
|
||||
QueueLimitCalculator limitCalculator) {
|
||||
this.queueLimitCalculator = limitCalculator;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.concurrent.ConcurrentMap;
|
|||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.yarn.LocalConfigurationProvider;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
|
@ -57,37 +56,39 @@ import org.apache.hadoop.yarn.util.Clock;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* RMContextImpl class holds two services context.
|
||||
* <ul>
|
||||
* <li>serviceContext : These services called as <b>Always On</b> services.
|
||||
* Services that need to run always irrespective of the HA state of the RM.</li>
|
||||
* <li>activeServiceCotext : Active services context. Services that need to run
|
||||
* only on the Active RM.</li>
|
||||
* </ul>
|
||||
* <p>
|
||||
* <b>Note:</b> If any new service to be added to context, add it to a right
|
||||
* context as per above description.
|
||||
*/
|
||||
public class RMContextImpl implements RMContext {
|
||||
|
||||
private Dispatcher rmDispatcher;
|
||||
|
||||
private boolean isHAEnabled;
|
||||
|
||||
private HAServiceState haServiceState =
|
||||
HAServiceProtocol.HAServiceState.INITIALIZING;
|
||||
|
||||
private AdminService adminService;
|
||||
|
||||
private ConfigurationProvider configurationProvider;
|
||||
/**
|
||||
* RM service contexts which runs through out RM life span. These are created
|
||||
* once during start of RM.
|
||||
*/
|
||||
private RMServiceContext serviceContext;
|
||||
|
||||
/**
|
||||
* RM Active service context. This will be recreated for every transition from
|
||||
* ACTIVE->STANDBY.
|
||||
*/
|
||||
private RMActiveServiceContext activeServiceContext;
|
||||
|
||||
private Configuration yarnConfiguration;
|
||||
|
||||
private RMApplicationHistoryWriter rmApplicationHistoryWriter;
|
||||
private SystemMetricsPublisher systemMetricsPublisher;
|
||||
private EmbeddedElector elector;
|
||||
|
||||
private QueueLimitCalculator queueLimitCalculator;
|
||||
|
||||
private final Object haServiceStateLock = new Object();
|
||||
|
||||
private ResourceManager resourceManager;
|
||||
/**
|
||||
* Default constructor. To be used in conjunction with setter methods for
|
||||
* individual fields.
|
||||
*/
|
||||
public RMContextImpl() {
|
||||
this.serviceContext = new RMServiceContext();
|
||||
this.activeServiceContext = new RMActiveServiceContext();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
|
@ -138,19 +139,154 @@ public class RMContextImpl implements RMContext {
|
|||
clientToAMTokenSecretManager, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Dispatcher getDispatcher() {
|
||||
return this.rmDispatcher;
|
||||
/**
|
||||
* RM service contexts which runs through out JVM life span. These are created
|
||||
* once during start of RM.
|
||||
* @return serviceContext of RM
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public RMServiceContext getServiceContext() {
|
||||
return serviceContext;
|
||||
}
|
||||
|
||||
/**
|
||||
* <b>Note:</b> setting service context clears all services embedded with it.
|
||||
* @param context rm service context
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public void setServiceContext(RMServiceContext context) {
|
||||
this.serviceContext = context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLeaderElectorService(EmbeddedElector elector) {
|
||||
this.elector = elector;
|
||||
public ResourceManager getResourceManager() {
|
||||
return serviceContext.getResourceManager();
|
||||
}
|
||||
|
||||
public void setResourceManager(ResourceManager rm) {
|
||||
serviceContext.setResourceManager(rm);
|
||||
}
|
||||
|
||||
@Override
|
||||
public EmbeddedElector getLeaderElectorService() {
|
||||
return this.elector;
|
||||
return serviceContext.getLeaderElectorService();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setLeaderElectorService(EmbeddedElector elector) {
|
||||
serviceContext.setLeaderElectorService(elector);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Dispatcher getDispatcher() {
|
||||
return serviceContext.getDispatcher();
|
||||
}
|
||||
|
||||
void setDispatcher(Dispatcher dispatcher) {
|
||||
serviceContext.setDispatcher(dispatcher);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AdminService getRMAdminService() {
|
||||
return serviceContext.getRMAdminService();
|
||||
}
|
||||
|
||||
void setRMAdminService(AdminService adminService) {
|
||||
serviceContext.setRMAdminService(adminService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isHAEnabled() {
|
||||
return serviceContext.isHAEnabled();
|
||||
}
|
||||
|
||||
void setHAEnabled(boolean isHAEnabled) {
|
||||
serviceContext.setHAEnabled(isHAEnabled);
|
||||
}
|
||||
|
||||
@Override
|
||||
public HAServiceState getHAServiceState() {
|
||||
return serviceContext.getHAServiceState();
|
||||
}
|
||||
|
||||
void setHAServiceState(HAServiceState serviceState) {
|
||||
serviceContext.setHAServiceState(serviceState);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RMApplicationHistoryWriter getRMApplicationHistoryWriter() {
|
||||
return serviceContext.getRMApplicationHistoryWriter();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRMApplicationHistoryWriter(
|
||||
RMApplicationHistoryWriter rmApplicationHistoryWriter) {
|
||||
serviceContext.setRMApplicationHistoryWriter(rmApplicationHistoryWriter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public SystemMetricsPublisher getSystemMetricsPublisher() {
|
||||
return serviceContext.getSystemMetricsPublisher();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSystemMetricsPublisher(
|
||||
SystemMetricsPublisher metricsPublisher) {
|
||||
serviceContext.setSystemMetricsPublisher(metricsPublisher);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RMTimelineCollectorManager getRMTimelineCollectorManager() {
|
||||
return serviceContext.getRMTimelineCollectorManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRMTimelineCollectorManager(
|
||||
RMTimelineCollectorManager timelineCollectorManager) {
|
||||
serviceContext.setRMTimelineCollectorManager(timelineCollectorManager);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigurationProvider getConfigurationProvider() {
|
||||
return serviceContext.getConfigurationProvider();
|
||||
}
|
||||
|
||||
public void setConfigurationProvider(
|
||||
ConfigurationProvider configurationProvider) {
|
||||
serviceContext.setConfigurationProvider(configurationProvider);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getYarnConfiguration() {
|
||||
return serviceContext.getYarnConfiguration();
|
||||
}
|
||||
|
||||
public void setYarnConfiguration(Configuration yarnConfiguration) {
|
||||
serviceContext.setYarnConfiguration(yarnConfiguration);
|
||||
}
|
||||
|
||||
public String getHAZookeeperConnectionState() {
|
||||
return serviceContext.getHAZookeeperConnectionState();
|
||||
}
|
||||
|
||||
// ==========================================================================
|
||||
/**
|
||||
* RM Active service context. This will be recreated for every transition from
|
||||
* ACTIVE to STANDBY.
|
||||
* @return activeServiceContext of active services
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public RMActiveServiceContext getActiveServiceContext() {
|
||||
return activeServiceContext;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
void setActiveServiceContext(RMActiveServiceContext activeServiceContext) {
|
||||
this.activeServiceContext = activeServiceContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -228,11 +364,6 @@ public class RMContextImpl implements RMContext {
|
|||
return activeServiceContext.getClientToAMTokenSecretManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public AdminService getRMAdminService() {
|
||||
return this.adminService;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public void setStateStore(RMStateStore store) {
|
||||
activeServiceContext.setStateStore(store);
|
||||
|
@ -253,24 +384,6 @@ public class RMContextImpl implements RMContext {
|
|||
return activeServiceContext.getResourceTrackerService();
|
||||
}
|
||||
|
||||
void setHAEnabled(boolean isHAEnabled) {
|
||||
this.isHAEnabled = isHAEnabled;
|
||||
}
|
||||
|
||||
void setHAServiceState(HAServiceState serviceState) {
|
||||
synchronized (haServiceStateLock) {
|
||||
this.haServiceState = serviceState;
|
||||
}
|
||||
}
|
||||
|
||||
void setDispatcher(Dispatcher dispatcher) {
|
||||
this.rmDispatcher = dispatcher;
|
||||
}
|
||||
|
||||
void setRMAdminService(AdminService adminService) {
|
||||
this.adminService = adminService;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setClientRMService(ClientRMService clientRMService) {
|
||||
activeServiceContext.setClientRMService(clientRMService);
|
||||
|
@ -348,18 +461,6 @@ public class RMContextImpl implements RMContext {
|
|||
activeServiceContext.setResourceTrackerService(resourceTrackerService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isHAEnabled() {
|
||||
return isHAEnabled;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HAServiceState getHAServiceState() {
|
||||
synchronized (haServiceStateLock) {
|
||||
return haServiceState;
|
||||
}
|
||||
}
|
||||
|
||||
public void setWorkPreservingRecoveryEnabled(boolean enabled) {
|
||||
activeServiceContext.setWorkPreservingRecoveryEnabled(enabled);
|
||||
}
|
||||
|
@ -369,50 +470,6 @@ public class RMContextImpl implements RMContext {
|
|||
return activeServiceContext.isWorkPreservingRecoveryEnabled();
|
||||
}
|
||||
|
||||
@Override
|
||||
public RMApplicationHistoryWriter getRMApplicationHistoryWriter() {
|
||||
return this.rmApplicationHistoryWriter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRMTimelineCollectorManager(
|
||||
RMTimelineCollectorManager timelineCollectorManager) {
|
||||
activeServiceContext.setRMTimelineCollectorManager(
|
||||
timelineCollectorManager);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RMTimelineCollectorManager getRMTimelineCollectorManager() {
|
||||
return activeServiceContext.getRMTimelineCollectorManager();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setSystemMetricsPublisher(
|
||||
SystemMetricsPublisher metricsPublisher) {
|
||||
this.systemMetricsPublisher = metricsPublisher;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SystemMetricsPublisher getSystemMetricsPublisher() {
|
||||
return this.systemMetricsPublisher;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRMApplicationHistoryWriter(
|
||||
RMApplicationHistoryWriter rmApplicationHistoryWriter) {
|
||||
this.rmApplicationHistoryWriter = rmApplicationHistoryWriter;
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConfigurationProvider getConfigurationProvider() {
|
||||
return this.configurationProvider;
|
||||
}
|
||||
|
||||
public void setConfigurationProvider(
|
||||
ConfigurationProvider configurationProvider) {
|
||||
this.configurationProvider = configurationProvider;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEpoch() {
|
||||
|
@ -463,27 +520,6 @@ public class RMContextImpl implements RMContext {
|
|||
return activeServiceContext.getSystemCredentialsForApps();
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
public RMActiveServiceContext getActiveServiceContext() {
|
||||
return activeServiceContext;
|
||||
}
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
void setActiveServiceContext(RMActiveServiceContext activeServiceContext) {
|
||||
this.activeServiceContext = activeServiceContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Configuration getYarnConfiguration() {
|
||||
return this.yarnConfiguration;
|
||||
}
|
||||
|
||||
public void setYarnConfiguration(Configuration yarnConfiguration) {
|
||||
this.yarnConfiguration=yarnConfiguration;
|
||||
}
|
||||
|
||||
@Override
|
||||
public PlacementManager getQueuePlacementManager() {
|
||||
return this.activeServiceContext.getQueuePlacementManager();
|
||||
|
@ -496,12 +532,12 @@ public class RMContextImpl implements RMContext {
|
|||
|
||||
@Override
|
||||
public QueueLimitCalculator getNodeManagerQueueLimitCalculator() {
|
||||
return this.queueLimitCalculator;
|
||||
return activeServiceContext.getNodeManagerQueueLimitCalculator();
|
||||
}
|
||||
|
||||
public void setContainerQueueLimitCalculator(
|
||||
QueueLimitCalculator limitCalculator) {
|
||||
this.queueLimitCalculator = limitCalculator;
|
||||
activeServiceContext.setContainerQueueLimitCalculator(limitCalculator);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -515,21 +551,5 @@ public class RMContextImpl implements RMContext {
|
|||
return this.activeServiceContext.getRMAppLifetimeMonitor();
|
||||
}
|
||||
|
||||
public String getHAZookeeperConnectionState() {
|
||||
if (elector == null) {
|
||||
return "Could not find leader elector. Verify both HA and automatic " +
|
||||
"failover are enabled.";
|
||||
} else {
|
||||
return elector.getZookeeperConnectionState();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ResourceManager getResourceManager() {
|
||||
return resourceManager;
|
||||
}
|
||||
|
||||
public void setResourceManager(ResourceManager rm) {
|
||||
this.resourceManager = rm;
|
||||
}
|
||||
// Note: Read java doc before adding any services over here.
|
||||
}
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol;
|
||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||
import org.apache.hadoop.yarn.conf.ConfigurationProvider;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager;
|
||||
|
||||
/**
|
||||
* RMServiceContext class maintains "Always On" services. Services that need to
|
||||
* run always irrespective of the HA state of the RM. This is created during
|
||||
* initialization of RMContextImpl.
|
||||
* <p>
|
||||
* <b>Note:</b> If any services to be added in this class, make sure service
|
||||
* will be running always irrespective of the HA state of the RM
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public class RMServiceContext {
|
||||
|
||||
private Dispatcher rmDispatcher;
|
||||
private boolean isHAEnabled;
|
||||
private HAServiceState haServiceState =
|
||||
HAServiceProtocol.HAServiceState.INITIALIZING;
|
||||
private AdminService adminService;
|
||||
private ConfigurationProvider configurationProvider;
|
||||
private Configuration yarnConfiguration;
|
||||
private RMApplicationHistoryWriter rmApplicationHistoryWriter;
|
||||
private SystemMetricsPublisher systemMetricsPublisher;
|
||||
private EmbeddedElector elector;
|
||||
private final Object haServiceStateLock = new Object();
|
||||
private ResourceManager resourceManager;
|
||||
private RMTimelineCollectorManager timelineCollectorManager;
|
||||
|
||||
public ResourceManager getResourceManager() {
|
||||
return resourceManager;
|
||||
}
|
||||
|
||||
public void setResourceManager(ResourceManager rm) {
|
||||
this.resourceManager = rm;
|
||||
}
|
||||
|
||||
public ConfigurationProvider getConfigurationProvider() {
|
||||
return this.configurationProvider;
|
||||
}
|
||||
|
||||
public void setConfigurationProvider(
|
||||
ConfigurationProvider configurationProvider) {
|
||||
this.configurationProvider = configurationProvider;
|
||||
}
|
||||
|
||||
public Dispatcher getDispatcher() {
|
||||
return this.rmDispatcher;
|
||||
}
|
||||
|
||||
void setDispatcher(Dispatcher dispatcher) {
|
||||
this.rmDispatcher = dispatcher;
|
||||
}
|
||||
|
||||
public EmbeddedElector getLeaderElectorService() {
|
||||
return this.elector;
|
||||
}
|
||||
|
||||
public void setLeaderElectorService(EmbeddedElector embeddedElector) {
|
||||
this.elector = embeddedElector;
|
||||
}
|
||||
|
||||
public AdminService getRMAdminService() {
|
||||
return this.adminService;
|
||||
}
|
||||
|
||||
void setRMAdminService(AdminService service) {
|
||||
this.adminService = service;
|
||||
}
|
||||
|
||||
void setHAEnabled(boolean rmHAEnabled) {
|
||||
this.isHAEnabled = rmHAEnabled;
|
||||
}
|
||||
|
||||
public boolean isHAEnabled() {
|
||||
return isHAEnabled;
|
||||
}
|
||||
|
||||
public HAServiceState getHAServiceState() {
|
||||
synchronized (haServiceStateLock) {
|
||||
return haServiceState;
|
||||
}
|
||||
}
|
||||
|
||||
void setHAServiceState(HAServiceState serviceState) {
|
||||
synchronized (haServiceStateLock) {
|
||||
this.haServiceState = serviceState;
|
||||
}
|
||||
}
|
||||
|
||||
public RMApplicationHistoryWriter getRMApplicationHistoryWriter() {
|
||||
return this.rmApplicationHistoryWriter;
|
||||
}
|
||||
|
||||
public void setRMApplicationHistoryWriter(
|
||||
RMApplicationHistoryWriter applicationHistoryWriter) {
|
||||
this.rmApplicationHistoryWriter = applicationHistoryWriter;
|
||||
}
|
||||
|
||||
public void setSystemMetricsPublisher(
|
||||
SystemMetricsPublisher metricsPublisher) {
|
||||
this.systemMetricsPublisher = metricsPublisher;
|
||||
}
|
||||
|
||||
public SystemMetricsPublisher getSystemMetricsPublisher() {
|
||||
return this.systemMetricsPublisher;
|
||||
}
|
||||
|
||||
public Configuration getYarnConfiguration() {
|
||||
return this.yarnConfiguration;
|
||||
}
|
||||
|
||||
public void setYarnConfiguration(Configuration yarnConfiguration) {
|
||||
this.yarnConfiguration = yarnConfiguration;
|
||||
}
|
||||
|
||||
public RMTimelineCollectorManager getRMTimelineCollectorManager() {
|
||||
return timelineCollectorManager;
|
||||
}
|
||||
|
||||
public void setRMTimelineCollectorManager(
|
||||
RMTimelineCollectorManager collectorManager) {
|
||||
this.timelineCollectorManager = collectorManager;
|
||||
}
|
||||
|
||||
public String getHAZookeeperConnectionState() {
|
||||
if (elector == null) {
|
||||
return "Could not find leader elector. Verify both HA and automatic "
|
||||
+ "failover are enabled.";
|
||||
} else {
|
||||
return elector.getZookeeperConnectionState();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -115,7 +115,6 @@ import org.eclipse.jetty.webapp.WebAppContext;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintStream;
|
||||
|
@ -345,9 +344,9 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
YarnConfiguration.DEFAULT_CURATOR_LEADER_ELECTOR_ENABLED);
|
||||
if (curatorEnabled) {
|
||||
this.curator = createAndStartCurator(conf);
|
||||
elector = new CuratorBasedElectorService(rmContext, this);
|
||||
elector = new CuratorBasedElectorService(this);
|
||||
} else {
|
||||
elector = new ActiveStandbyElectorBasedElectorService(rmContext);
|
||||
elector = new ActiveStandbyElectorBasedElectorService(this);
|
||||
}
|
||||
return elector;
|
||||
}
|
||||
|
@ -497,7 +496,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
}
|
||||
|
||||
private RMTimelineCollectorManager createRMTimelineCollectorManager() {
|
||||
return new RMTimelineCollectorManager(rmContext);
|
||||
return new RMTimelineCollectorManager(this);
|
||||
}
|
||||
|
||||
protected SystemMetricsPublisher createSystemMetricsPublisher() {
|
||||
|
@ -508,7 +507,8 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
// we're dealing with the v.2.x publisher
|
||||
LOG.info("system metrics publisher with the timeline service V2 is " +
|
||||
"configured");
|
||||
publisher = new TimelineServiceV2Publisher(rmContext);
|
||||
publisher = new TimelineServiceV2Publisher(
|
||||
rmContext.getRMTimelineCollectorManager());
|
||||
} else {
|
||||
// we're dealing with the v.1.x publisher
|
||||
LOG.info("system metrics publisher with the timeline service V1 is " +
|
||||
|
@ -560,7 +560,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
private ApplicationMasterLauncher applicationMasterLauncher;
|
||||
private ContainerAllocationExpirer containerAllocationExpirer;
|
||||
private ResourceManager rm;
|
||||
private RMActiveServiceContext activeServiceContext;
|
||||
private boolean fromActive = false;
|
||||
private StandByTransitionRunnable standByTransitionRunnable;
|
||||
|
||||
|
@ -573,9 +572,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
protected void serviceInit(Configuration configuration) throws Exception {
|
||||
standByTransitionRunnable = new StandByTransitionRunnable();
|
||||
|
||||
activeServiceContext = new RMActiveServiceContext();
|
||||
rmContext.setActiveServiceContext(activeServiceContext);
|
||||
|
||||
rmSecretManagerService = createRMSecretManagerService();
|
||||
addService(rmSecretManagerService);
|
||||
|
||||
|
@ -1149,7 +1145,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
ClusterMetrics.destroy();
|
||||
QueueMetrics.clearQueueMetrics();
|
||||
if (initialize) {
|
||||
resetDispatcher();
|
||||
resetRMContext();
|
||||
createAndInitActiveServices(true);
|
||||
}
|
||||
}
|
||||
|
@ -1294,7 +1290,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
}
|
||||
|
||||
protected AdminService createAdminService() {
|
||||
return new AdminService(this, rmContext);
|
||||
return new AdminService(this);
|
||||
}
|
||||
|
||||
protected RMSecretManagerService createRMSecretManagerService() {
|
||||
|
@ -1417,17 +1413,24 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
return dispatcher;
|
||||
}
|
||||
|
||||
private void resetDispatcher() {
|
||||
private void resetRMContext() {
|
||||
RMContextImpl rmContextImpl = new RMContextImpl();
|
||||
// transfer service context to new RM service Context
|
||||
rmContextImpl.setServiceContext(rmContext.getServiceContext());
|
||||
|
||||
// reset dispatcher
|
||||
Dispatcher dispatcher = setupDispatcher();
|
||||
((Service)dispatcher).init(this.conf);
|
||||
((Service)dispatcher).start();
|
||||
removeService((Service)rmDispatcher);
|
||||
((Service) dispatcher).init(this.conf);
|
||||
((Service) dispatcher).start();
|
||||
removeService((Service) rmDispatcher);
|
||||
// Need to stop previous rmDispatcher before assigning new dispatcher
|
||||
// otherwise causes "AsyncDispatcher event handler" thread leak
|
||||
((Service) rmDispatcher).stop();
|
||||
rmDispatcher = dispatcher;
|
||||
addIfService(rmDispatcher);
|
||||
rmContext.setDispatcher(rmDispatcher);
|
||||
rmContextImpl.setDispatcher(dispatcher);
|
||||
|
||||
rmContext = rmContextImpl;
|
||||
}
|
||||
|
||||
private void setSchedulerRecoveryStartAndWaitTime(RMState state,
|
||||
|
|
|
@ -48,7 +48,6 @@ import org.apache.hadoop.yarn.event.EventHandler;
|
|||
import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
|
||||
import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
|
||||
import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
|
||||
|
@ -75,9 +74,10 @@ public class TimelineServiceV2Publisher extends AbstractSystemMetricsPublisher {
|
|||
private RMTimelineCollectorManager rmTimelineCollectorManager;
|
||||
private boolean publishContainerEvents;
|
||||
|
||||
public TimelineServiceV2Publisher(RMContext rmContext) {
|
||||
public TimelineServiceV2Publisher(
|
||||
RMTimelineCollectorManager timelineCollectorManager) {
|
||||
super("TimelineserviceV2Publisher");
|
||||
rmTimelineCollectorManager = rmContext.getRMTimelineCollectorManager();
|
||||
rmTimelineCollectorManager = timelineCollectorManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -52,7 +52,6 @@ public class FSLeafQueue extends FSQueue {
|
|||
private static final Log LOG = LogFactory.getLog(FSLeafQueue.class.getName());
|
||||
private static final List<FSQueue> EMPTY_LIST = Collections.emptyList();
|
||||
|
||||
private FairScheduler scheduler;
|
||||
private FSContext context;
|
||||
|
||||
// apps that are runnable
|
||||
|
@ -76,7 +75,6 @@ public class FSLeafQueue extends FSQueue {
|
|||
public FSLeafQueue(String name, FairScheduler scheduler,
|
||||
FSParentQueue parent) {
|
||||
super(name, scheduler, parent);
|
||||
this.scheduler = scheduler;
|
||||
this.context = scheduler.getContext();
|
||||
this.lastTimeAtMinShare = scheduler.getClock().getTime();
|
||||
activeUsersManager = new ActiveUsersManager(getMetrics());
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
|
|||
import java.util.Collection;
|
||||
import java.util.Comparator;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
|
@ -174,6 +175,7 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy {
|
|||
* by largest share. So if resource=<10 MB, 5 CPU>, and pool=<100 MB, 10 CPU>,
|
||||
* shares will be [.1, .5] and resourceOrder will be [CPU, MEMORY].
|
||||
*/
|
||||
@VisibleForTesting
|
||||
void calculateShares(Resource resource, Resource pool,
|
||||
ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) {
|
||||
shares.setWeight(MEMORY, (float)resource.getMemorySize() /
|
||||
|
|
|
@ -56,24 +56,28 @@ public class FairSharePolicy extends SchedulingPolicy {
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare Schedulables via weighted fair sharing. In addition, Schedulables
|
||||
* below their min share get priority over those whose min share is met.
|
||||
* Compare Schedulables mainly via fair share usage to meet fairness.
|
||||
* Specifically, it goes through following four steps.
|
||||
*
|
||||
* Schedulables without resource demand get lower priority than
|
||||
* ones who have demands.
|
||||
* 1. Compare demands. Schedulables without resource demand get lower priority
|
||||
* than ones who have demands.
|
||||
*
|
||||
* Schedulables below their min share are compared by how far below it they
|
||||
* are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
|
||||
* and job B has 50 out of a min share of 100, then job B is scheduled next,
|
||||
* because B is at 50% of its min share and A is at 80% of its min share.
|
||||
* 2. Compare min share usage. Schedulables below their min share are compared
|
||||
* by how far below it they are as a ratio. For example, if job A has 8 out
|
||||
* of a min share of 10 tasks and job B has 50 out of a min share of 100,
|
||||
* then job B is scheduled next, because B is at 50% of its min share and A
|
||||
* is at 80% of its min share.
|
||||
*
|
||||
* Schedulables above their min share are compared by (runningTasks / weight).
|
||||
* 3. Compare fair share usage. Schedulables above their min share are
|
||||
* compared by fair share usage by checking (resource usage / weight).
|
||||
* If all weights are equal, slots are given to the job with the fewest tasks;
|
||||
* otherwise, jobs with more weight get proportionally more slots. If weight
|
||||
* equals to 0, we can't compare Schedulables by (resource usage/weight).
|
||||
* There are two situations: 1)All weights equal to 0, slots are given
|
||||
* to one with less resource usage. 2)Only one of weight equals to 0, slots
|
||||
* are given to the one with non-zero weight.
|
||||
*
|
||||
* 4. Break the tie by compare submit time and job name.
|
||||
*/
|
||||
private static class FairShareComparator implements Comparator<Schedulable>,
|
||||
Serializable {
|
||||
|
@ -82,37 +86,88 @@ public class FairSharePolicy extends SchedulingPolicy {
|
|||
|
||||
@Override
|
||||
public int compare(Schedulable s1, Schedulable s2) {
|
||||
int res = compareDemand(s1, s2);
|
||||
|
||||
// Pre-compute resource usages to avoid duplicate calculation
|
||||
Resource resourceUsage1 = s1.getResourceUsage();
|
||||
Resource resourceUsage2 = s2.getResourceUsage();
|
||||
|
||||
if (res == 0) {
|
||||
res = compareMinShareUsage(s1, s2, resourceUsage1, resourceUsage2);
|
||||
}
|
||||
|
||||
if (res == 0) {
|
||||
res = compareFairShareUsage(s1, s2, resourceUsage1, resourceUsage2);
|
||||
}
|
||||
|
||||
// Break the tie by submit time
|
||||
if (res == 0) {
|
||||
res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
|
||||
}
|
||||
|
||||
// Break the tie by job name
|
||||
if (res == 0) {
|
||||
res = s1.getName().compareTo(s2.getName());
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
private int compareDemand(Schedulable s1, Schedulable s2) {
|
||||
int res = 0;
|
||||
Resource demand1 = s1.getDemand();
|
||||
Resource demand2 = s2.getDemand();
|
||||
if (demand1.equals(Resources.none()) && Resources.greaterThan(
|
||||
RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
|
||||
return 1;
|
||||
res = 1;
|
||||
} else if (demand2.equals(Resources.none()) && Resources.greaterThan(
|
||||
RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
|
||||
return -1;
|
||||
res = -1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
double minShareRatio1, minShareRatio2;
|
||||
double useToWeightRatio1, useToWeightRatio2;
|
||||
double weight1, weight2;
|
||||
//Do not repeat the getResourceUsage calculation
|
||||
Resource resourceUsage1 = s1.getResourceUsage();
|
||||
Resource resourceUsage2 = s2.getResourceUsage();
|
||||
private int compareMinShareUsage(Schedulable s1, Schedulable s2,
|
||||
Resource resourceUsage1, Resource resourceUsage2) {
|
||||
int res;
|
||||
Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
|
||||
s1.getMinShare(), demand1);
|
||||
s1.getMinShare(), s1.getDemand());
|
||||
Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
|
||||
s2.getMinShare(), demand2);
|
||||
s2.getMinShare(), s2.getDemand());
|
||||
boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
|
||||
resourceUsage1, minShare1);
|
||||
boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
|
||||
resourceUsage2, minShare2);
|
||||
minShareRatio1 = (double) resourceUsage1.getMemorySize()
|
||||
/ Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemorySize();
|
||||
minShareRatio2 = (double) resourceUsage2.getMemorySize()
|
||||
/ Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemorySize();
|
||||
|
||||
weight1 = s1.getWeights().getWeight(ResourceType.MEMORY);
|
||||
weight2 = s2.getWeights().getWeight(ResourceType.MEMORY);
|
||||
if (s1Needy && !s2Needy) {
|
||||
res = -1;
|
||||
} else if (s2Needy && !s1Needy) {
|
||||
res = 1;
|
||||
} else if (s1Needy && s2Needy) {
|
||||
double minShareRatio1 = (double) resourceUsage1.getMemorySize() /
|
||||
Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE)
|
||||
.getMemorySize();
|
||||
double minShareRatio2 = (double) resourceUsage2.getMemorySize() /
|
||||
Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE)
|
||||
.getMemorySize();
|
||||
res = (int) Math.signum(minShareRatio1 - minShareRatio2);
|
||||
} else {
|
||||
res = 0;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* To simplify computation, use weights instead of fair shares to calculate
|
||||
* fair share usage.
|
||||
*/
|
||||
private int compareFairShareUsage(Schedulable s1, Schedulable s2,
|
||||
Resource resourceUsage1, Resource resourceUsage2) {
|
||||
double weight1 = s1.getWeights().getWeight(ResourceType.MEMORY);
|
||||
double weight2 = s2.getWeights().getWeight(ResourceType.MEMORY);
|
||||
double useToWeightRatio1;
|
||||
double useToWeightRatio2;
|
||||
if (weight1 > 0.0 && weight2 > 0.0) {
|
||||
useToWeightRatio1 = resourceUsage1.getMemorySize() / weight1;
|
||||
useToWeightRatio2 = resourceUsage2.getMemorySize() / weight2;
|
||||
|
@ -130,25 +185,7 @@ public class FairSharePolicy extends SchedulingPolicy {
|
|||
}
|
||||
}
|
||||
|
||||
int res = 0;
|
||||
if (s1Needy && !s2Needy)
|
||||
res = -1;
|
||||
else if (s2Needy && !s1Needy)
|
||||
res = 1;
|
||||
else if (s1Needy && s2Needy)
|
||||
res = (int) Math.signum(minShareRatio1 - minShareRatio2);
|
||||
else
|
||||
// Neither schedulable is needy
|
||||
res = (int) Math.signum(useToWeightRatio1 - useToWeightRatio2);
|
||||
if (res == 0) {
|
||||
// Apps are tied in fairness ratio. Break the tie by submit time and job
|
||||
// name to get a deterministic ordering, which is useful for unit tests.
|
||||
res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
|
||||
if (res == 0) {
|
||||
res = s1.getName().compareTo(s2.getName());
|
||||
}
|
||||
}
|
||||
return res;
|
||||
return (int) Math.signum(useToWeightRatio1 - useToWeightRatio2);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollector;
|
||||
import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext;
|
||||
|
@ -41,16 +41,16 @@ public class RMTimelineCollectorManager extends TimelineCollectorManager {
|
|||
private static final Log LOG =
|
||||
LogFactory.getLog(RMTimelineCollectorManager.class);
|
||||
|
||||
private RMContext rmContext;
|
||||
private ResourceManager rm;
|
||||
|
||||
public RMTimelineCollectorManager(RMContext rmContext) {
|
||||
public RMTimelineCollectorManager(ResourceManager resourceManager) {
|
||||
super(RMTimelineCollectorManager.class.getName());
|
||||
this.rmContext = rmContext;
|
||||
this.rm = resourceManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doPostPut(ApplicationId appId, TimelineCollector collector) {
|
||||
RMApp app = rmContext.getRMApps().get(appId);
|
||||
RMApp app = rm.getRMContext().getRMApps().get(appId);
|
||||
if (app == null) {
|
||||
throw new YarnRuntimeException(
|
||||
"Unable to get the timeline collector context info for a " +
|
||||
|
|
|
@ -102,8 +102,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
|
||||
|
||||
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import org.apache.hadoop.yarn.util.YarnVersionInfo;
|
||||
import org.apache.log4j.Level;
|
||||
|
@ -174,14 +172,6 @@ public class MockRM extends ResourceManager {
|
|||
disableDrainEventsImplicitly = false;
|
||||
}
|
||||
|
||||
public class MockRMMemoryStateStore extends MemoryRMStateStore {
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Override
|
||||
protected EventHandler getRMStateStoreEventHandler() {
|
||||
return rmStateStoreEventHandler;
|
||||
}
|
||||
}
|
||||
|
||||
public class MockRMNullStateStore extends NullRMStateStore {
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Override
|
||||
|
@ -1055,7 +1045,7 @@ public class MockRM extends ResourceManager {
|
|||
|
||||
@Override
|
||||
protected AdminService createAdminService() {
|
||||
return new AdminService(this, getRMContext()) {
|
||||
return new AdminService(this) {
|
||||
@Override
|
||||
protected void startServer() {
|
||||
// override to not start rpc handler
|
||||
|
@ -1294,4 +1284,8 @@ public class MockRM extends ResourceManager {
|
|||
((AsyncDispatcher) getRmDispatcher()).disableExitOnDispatchException();
|
||||
}
|
||||
}
|
||||
|
||||
public RMStateStore getRMStateStore() {
|
||||
return getRMContext().getStateStore();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager;
|
||||
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
|
||||
|
||||
/**
|
||||
* Test helper for MemoryRMStateStore will make sure the event.
|
||||
*/
|
||||
public class MockRMMemoryStateStore extends MemoryRMStateStore {
|
||||
@SuppressWarnings("rawtypes")
|
||||
@Override
|
||||
protected EventHandler getRMStateStoreEventHandler() {
|
||||
return rmStateStoreEventHandler;
|
||||
}
|
||||
}
|
|
@ -153,7 +153,6 @@ public class TestApplicationCleanup {
|
|||
rm.stop();
|
||||
}
|
||||
|
||||
@SuppressWarnings("resource")
|
||||
@Test
|
||||
public void testContainerCleanup() throws Exception {
|
||||
|
||||
|
@ -291,11 +290,8 @@ public class TestApplicationCleanup {
|
|||
@Test (timeout = 60000)
|
||||
public void testAppCleanupWhenRMRestartedAfterAppFinished() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -308,7 +304,7 @@ public class TestApplicationCleanup {
|
|||
rm1.waitForState(app0.getApplicationId(), RMAppState.FAILED);
|
||||
|
||||
// start new RM
|
||||
MockRM rm2 = new MockRM(conf, memStore);
|
||||
MockRM rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
|
||||
// nm1 register to rm2, and do a heartbeat
|
||||
|
@ -327,11 +323,9 @@ public class TestApplicationCleanup {
|
|||
@Test(timeout = 60000)
|
||||
public void testAppCleanupWhenRMRestartedBeforeAppFinished() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 1024, rm1.getResourceTrackerService());
|
||||
|
@ -357,7 +351,7 @@ public class TestApplicationCleanup {
|
|||
}
|
||||
|
||||
// start new RM
|
||||
MockRM rm2 = new MockRM(conf, memStore);
|
||||
MockRM rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
|
||||
// nm1/nm2 register to rm2, and do a heartbeat
|
||||
|
@ -383,16 +377,12 @@ public class TestApplicationCleanup {
|
|||
rm2.stop();
|
||||
}
|
||||
|
||||
@SuppressWarnings("resource")
|
||||
@Test (timeout = 60000)
|
||||
public void testContainerCleanupWhenRMRestartedAppNotRegistered() throws
|
||||
Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -405,7 +395,7 @@ public class TestApplicationCleanup {
|
|||
rm1.waitForState(app0.getApplicationId(), RMAppState.RUNNING);
|
||||
|
||||
// start new RM
|
||||
MockRM rm2 = new MockRM(conf, memStore);
|
||||
MockRM rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
|
||||
// nm1 register to rm2, and do a heartbeat
|
||||
|
@ -426,11 +416,9 @@ public class TestApplicationCleanup {
|
|||
@Test (timeout = 60000)
|
||||
public void testAppCleanupWhenNMReconnects() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -466,11 +454,9 @@ public class TestApplicationCleanup {
|
|||
@Test(timeout = 60000)
|
||||
public void testProcessingNMContainerStatusesOnNMRestart() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
int nmMemory = 8192;
|
||||
int amMemory = 1024;
|
||||
|
|
|
@ -138,10 +138,8 @@ public class TestContainerResourceUsage {
|
|||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
|
||||
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm0 = new MockRM(conf, memStore);
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
MockRM rm0 = new MockRM(conf);
|
||||
rm0.start();
|
||||
MockNM nm =
|
||||
new MockNM("127.0.0.1:1234", 65536, rm0.getResourceTrackerService());
|
||||
|
@ -229,7 +227,7 @@ public class TestContainerResourceUsage {
|
|||
vcoreSeconds, metricsBefore.getVcoreSeconds());
|
||||
|
||||
// create new RM to represent RM restart. Load up the state store.
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf, rm0.getRMStateStore());
|
||||
rm1.start();
|
||||
RMApp app0After =
|
||||
rm1.getRMContext().getRMApps().get(app0.getApplicationId());
|
||||
|
|
|
@ -413,11 +413,9 @@ public class TestNodeBlacklistingOnAMFailures {
|
|||
}
|
||||
|
||||
private MockRM startRM(YarnConfiguration conf) {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm = new MockRM(conf, memStore);
|
||||
|
||||
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
MockRM rm = new MockRM(conf);
|
||||
rm.start();
|
||||
return rm;
|
||||
}
|
||||
|
|
|
@ -122,13 +122,15 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
|
|||
throws IOException, InterruptedException {
|
||||
AdminService as = mock(AdminService.class);
|
||||
RMContext rc = mock(RMContext.class);
|
||||
ResourceManager rm = mock(ResourceManager.class);
|
||||
Configuration myConf = new Configuration(conf);
|
||||
|
||||
myConf.setInt(YarnConfiguration.RM_ZK_TIMEOUT_MS, 50);
|
||||
when(rm.getRMContext()).thenReturn(rc);
|
||||
when(rc.getRMAdminService()).thenReturn(as);
|
||||
|
||||
ActiveStandbyElectorBasedElectorService
|
||||
ees = new ActiveStandbyElectorBasedElectorService(rc);
|
||||
ActiveStandbyElectorBasedElectorService ees =
|
||||
new ActiveStandbyElectorBasedElectorService(rm);
|
||||
ees.init(myConf);
|
||||
|
||||
ees.enterNeutralMode();
|
||||
|
@ -290,7 +292,7 @@ public class TestRMEmbeddedElector extends ClientBaseWithFixes {
|
|||
|
||||
@Override
|
||||
protected EmbeddedElector createEmbeddedElector() {
|
||||
return new ActiveStandbyElectorBasedElectorService(getRMContext()) {
|
||||
return new ActiveStandbyElectorBasedElectorService(this) {
|
||||
@Override
|
||||
public void becomeActive() throws
|
||||
ServiceFailedException {
|
||||
|
|
|
@ -71,6 +71,7 @@ public class TestRMHA {
|
|||
private Log LOG = LogFactory.getLog(TestRMHA.class);
|
||||
private Configuration configuration;
|
||||
private MockRM rm = null;
|
||||
private MockNM nm = null;
|
||||
private RMApp app = null;
|
||||
private RMAppAttempt attempt = null;
|
||||
private static final String STATE_ERR =
|
||||
|
@ -135,7 +136,7 @@ public class TestRMHA {
|
|||
|
||||
try {
|
||||
rm.getNewAppId();
|
||||
rm.registerNode("127.0.0.1:1", 2048);
|
||||
nm = rm.registerNode("127.0.0.1:1", 2048);
|
||||
app = rm.submitApp(1024);
|
||||
attempt = app.getCurrentAppAttempt();
|
||||
rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.SCHEDULED);
|
||||
|
@ -414,7 +415,7 @@ public class TestRMHA {
|
|||
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||
Configuration conf = new YarnConfiguration(configuration);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore() {
|
||||
MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
|
||||
int count = 0;
|
||||
|
||||
@Override
|
||||
|
@ -464,7 +465,7 @@ public class TestRMHA {
|
|||
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||
Configuration conf = new YarnConfiguration(configuration);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore() {
|
||||
MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
|
||||
@Override
|
||||
public void updateApplicationState(ApplicationStateData appState) {
|
||||
notifyStoreOperationFailed(new StoreFencedException());
|
||||
|
@ -529,12 +530,10 @@ public class TestRMHA {
|
|||
configuration.setBoolean(YarnConfiguration.AUTO_FAILOVER_ENABLED, false);
|
||||
configuration.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
|
||||
Configuration conf = new YarnConfiguration(configuration);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
|
||||
// 1. start RM
|
||||
rm = new MockRM(conf, memStore);
|
||||
rm = new MockRM(conf);
|
||||
rm.init(conf);
|
||||
rm.start();
|
||||
|
||||
|
@ -551,9 +550,20 @@ public class TestRMHA {
|
|||
verifyClusterMetrics(1, 1, 1, 1, 2048, 1);
|
||||
assertEquals(1, rm.getRMContext().getRMNodes().size());
|
||||
assertEquals(1, rm.getRMContext().getRMApps().size());
|
||||
Assert.assertNotNull("Node not registered", nm);
|
||||
|
||||
rm.adminService.transitionToStandby(requestInfo);
|
||||
checkMonitorHealth();
|
||||
checkStandbyRMFunctionality();
|
||||
// race condition causes to register/node heartbeat node even after service
|
||||
// is stopping/stopped. New RMContext is being created on every transition
|
||||
// to standby, so metrics should be 0 which indicates new context reference
|
||||
// has taken.
|
||||
nm.registerNode();
|
||||
verifyClusterMetrics(0, 0, 0, 0, 0, 0);
|
||||
|
||||
// 3. Create new RM
|
||||
rm = new MockRM(conf, memStore) {
|
||||
rm = new MockRM(conf, rm.getRMStateStore()) {
|
||||
@Override
|
||||
protected ResourceTrackerService createResourceTrackerService() {
|
||||
return new ResourceTrackerService(this.rmContext,
|
||||
|
@ -592,7 +602,7 @@ public class TestRMHA {
|
|||
rm = new MockRM(configuration) {
|
||||
@Override
|
||||
protected AdminService createAdminService() {
|
||||
return new AdminService(this, getRMContext()) {
|
||||
return new AdminService(this) {
|
||||
int counter = 0;
|
||||
@Override
|
||||
protected void setConfig(Configuration conf) {
|
||||
|
|
|
@ -178,24 +178,23 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
return rm;
|
||||
}
|
||||
|
||||
@SuppressWarnings("rawtypes")
|
||||
private MockRM createMockRM(YarnConfiguration config) {
|
||||
MockRM rm = new MockRM(config);
|
||||
rms.add(rm);
|
||||
return rm;
|
||||
}
|
||||
|
||||
@Test (timeout=180000)
|
||||
public void testRMRestart() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
// PHASE 1: create RM and get state
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
|
||||
// PHASE 1: create state in an RM
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
|
||||
memStore.getState().getApplicationState();
|
||||
|
||||
// start like normal because state is empty
|
||||
rm1.start();
|
||||
|
||||
|
@ -451,14 +450,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
public void testRMRestartAppRunningAMFailed() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
// Create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
memStore.getState().getApplicationState();
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -508,14 +505,13 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
// be started immediately.
|
||||
YarnConfiguration conf = new YarnConfiguration(this.conf);
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 40);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
|
||||
// create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
final MockRM rm1 = createMockRM(conf, memStore);
|
||||
rm1.start();
|
||||
AbstractYarnScheduler ys =
|
||||
(AbstractYarnScheduler)rm1.getResourceScheduler();
|
||||
|
@ -674,7 +670,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
@Test (timeout = 60000)
|
||||
public void testRMRestartWaitForPreviousSucceededAttempt() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore() {
|
||||
MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
|
||||
int count = 0;
|
||||
|
||||
@Override
|
||||
|
@ -727,14 +723,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
@Test (timeout = 60000)
|
||||
public void testRMRestartFailedApp() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
// create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -775,14 +769,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
public void testRMRestartKilledApp() throws Exception{
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
// create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -823,7 +815,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
@Test (timeout = 60000)
|
||||
public void testRMRestartKilledAppWithNoAttempts() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore() {
|
||||
MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
|
||||
@Override
|
||||
public synchronized void storeApplicationAttemptStateInternal(
|
||||
ApplicationAttemptId attemptId,
|
||||
|
@ -865,14 +857,13 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
public void testRMRestartSucceededApp() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
// PHASE 1: create RM and get state
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
memStore.getState().getApplicationState();
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
// start like normal because state is empty
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -913,11 +904,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
@Test (timeout = 60000)
|
||||
public void testRMRestartGetApplicationList() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore) {
|
||||
MockRM rm1 = new MockRM(conf) {
|
||||
@Override
|
||||
protected SystemMetricsPublisher createSystemMetricsPublisher() {
|
||||
return spy(super.createSystemMetricsPublisher());
|
||||
|
@ -956,7 +944,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
.appCreated(any(RMApp.class), anyLong());
|
||||
// restart rm
|
||||
|
||||
MockRM rm2 = new MockRM(conf, memStore) {
|
||||
MockRM rm2 = new MockRM(conf, rm1.getRMStateStore()) {
|
||||
@Override
|
||||
protected RMAppManager createRMAppManager() {
|
||||
return spy(super.createRMAppManager());
|
||||
|
@ -1077,13 +1065,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
|
||||
// create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -1146,16 +1133,15 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
public void testRMRestartTimelineCollectorContext() throws Exception {
|
||||
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
|
||||
conf.setFloat(YarnConfiguration.TIMELINE_SERVICE_VERSION, 2.0f);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
MockRM rm1 = null;
|
||||
MockRM rm2 = null;
|
||||
try {
|
||||
rm1 = createMockRM(conf, memStore);
|
||||
rm1 = createMockRM(conf);
|
||||
rm1.start();
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
memStore.getState().getApplicationState();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
|
@ -1212,13 +1198,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
"kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
|
||||
// create RM
|
||||
MockRM rm1 = new TestSecurityMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
rm1.start();
|
||||
|
||||
HashSet<Token<RMDelegationTokenIdentifier>> tokenSet =
|
||||
|
@ -1307,13 +1292,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
"kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
|
||||
// create RM
|
||||
MockRM rm1 = new TestSecurityMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("0.0.0.0:4321", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -1388,8 +1372,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
"kerberos");
|
||||
conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new TestSecurityMockRM(conf);
|
||||
rm1.start();
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
RMState rmState = memStore.getState();
|
||||
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
|
@ -1399,10 +1385,6 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
Set<DelegationKey> rmDTMasterKeyState =
|
||||
rmState.getRMDTSecretManagerState().getMasterKeyState();
|
||||
|
||||
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
|
||||
|
||||
rm1.start();
|
||||
|
||||
// create an empty credential
|
||||
Credentials ts = new Credentials();
|
||||
|
||||
|
@ -1537,10 +1519,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
"kerberos");
|
||||
conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
|
||||
MockRM rm1 = new TestSecurityMockRM(conf);
|
||||
rm1.start();
|
||||
|
||||
GetDelegationTokenRequest request1 =
|
||||
|
@ -1553,7 +1533,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
ConverterUtils.convertFromYarn(response1.getRMDelegationToken(), rmAddr);
|
||||
|
||||
// start new RM
|
||||
MockRM rm2 = new TestSecurityMockRM(conf, memStore);
|
||||
MockRM rm2 = new TestSecurityMockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
|
||||
// submit an app with the old delegation token got from previous RM.
|
||||
|
@ -1631,14 +1611,13 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
@Test (timeout = 60000)
|
||||
public void testFinishedAppRemovalAfterRMRestart() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
conf.setInt(YarnConfiguration.RM_MAX_COMPLETED_APPLICATIONS, 1);
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
rm1.start();
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
RMState rmState = memStore.getState();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
|
@ -1676,7 +1655,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
// This is to test RM does not get hang on shutdown.
|
||||
@Test (timeout = 10000)
|
||||
public void testRMShutdown() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore() {
|
||||
MemoryRMStateStore memStore = new MockRMMemoryStateStore() {
|
||||
@Override
|
||||
public synchronized void checkVersion()
|
||||
throws Exception {
|
||||
|
@ -1742,10 +1721,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
|
||||
"kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new TestSecurityMockRM(conf, memStore) {
|
||||
MockRM rm1 = new TestSecurityMockRM(conf) {
|
||||
class TestDelegationTokenRenewer extends DelegationTokenRenewer {
|
||||
public void addApplicationAsync(ApplicationId applicationId, Credentials ts,
|
||||
boolean shouldCancelAtEnd, String user, Configuration appConf) {
|
||||
|
@ -1758,6 +1734,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
}
|
||||
};
|
||||
rm1.start();
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
RMApp app1 = null;
|
||||
try {
|
||||
app1 = rm1.submitApp(200, "name", "user",
|
||||
|
@ -1781,7 +1758,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
@Test (timeout = 20000)
|
||||
public void testAppRecoveredInOrderOnRMRestart() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
MemoryRMStateStore memStore = new MockRMMemoryStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
for (int i = 10; i > 0; i--) {
|
||||
|
@ -1836,12 +1813,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
public void testQueueMetricsOnRMRestart() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// PHASE 1: create state in an RM
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -1879,7 +1852,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
// PHASE 2: create new RM and start from old state
|
||||
// create new RM to represent restart and recover state
|
||||
MockRM rm2 = createMockRM(conf, memStore);
|
||||
MockRM rm2 = createMockRM(conf, rm1.getRMStateStore());
|
||||
QueueMetrics qm2 = rm2.getResourceScheduler().getRootQueueMetrics();
|
||||
resetQueueMetrics(qm2);
|
||||
assertQueueMetrics(qm2, 0, 0, 0, 0);
|
||||
|
@ -1960,7 +1933,6 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
@Test (timeout = 60000)
|
||||
public void testDecomissionedNMsMetricsOnRMRestart() throws Exception {
|
||||
YarnConfiguration conf = new YarnConfiguration();
|
||||
conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH,
|
||||
hostFile.getAbsolutePath());
|
||||
writeToHostsFile("");
|
||||
|
@ -2039,11 +2011,9 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
|
||||
"kerberos");
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
rm1.start();
|
||||
final MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -2051,7 +2021,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
RMApp app0 = rm1.submitApp(200);
|
||||
final MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
|
||||
|
||||
MockRM rm2 = new MockRM(conf, memStore) {
|
||||
MockRM rm2 = new MockRM(conf, rm1.getRMStateStore()) {
|
||||
@Override
|
||||
protected ResourceTrackerService createResourceTrackerService() {
|
||||
return new ResourceTrackerService(this.rmContext,
|
||||
|
@ -2158,6 +2128,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
super(conf, store);
|
||||
}
|
||||
|
||||
public TestSecurityMockRM(Configuration conf) {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void init(Configuration conf) {
|
||||
// reset localServiceAddress.
|
||||
|
@ -2208,10 +2182,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
|
||||
nodeLabelFsStoreDirURI);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
|
||||
MockRM rm1 = new MockRM(conf, memStore) {
|
||||
MockRM rm1 = new MockRM(conf) {
|
||||
@Override
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
RMNodeLabelsManager mgr = new RMNodeLabelsManager();
|
||||
|
@ -2261,7 +2233,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
Assert.assertEquals(1, nodeLabelManager.getNodeLabels().size());
|
||||
Assert.assertTrue(nodeLabels.get(n1).equals(toSet("y")));
|
||||
|
||||
MockRM rm2 = new MockRM(conf, memStore) {
|
||||
MockRM rm2 = new MockRM(conf, rm1.getRMStateStore()) {
|
||||
@Override
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
RMNodeLabelsManager mgr = new RMNodeLabelsManager();
|
||||
|
@ -2290,14 +2262,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
int maxAttempt =
|
||||
conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
|
||||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
// create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState =
|
||||
rmState.getApplicationState();
|
||||
|
||||
memStore.getState().getApplicationState();
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -2365,10 +2335,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
conf.set(YarnConfiguration.FS_NODE_LABELS_STORE_ROOT_DIR,
|
||||
nodeLabelFsStoreDirURI);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
|
||||
MockRM rm1 = new MockRM(conf, memStore) {
|
||||
MockRM rm1 = new MockRM(conf) {
|
||||
@Override
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
RMNodeLabelsManager mgr = new RMNodeLabelsManager();
|
||||
|
@ -2396,7 +2364,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
nodeLabelManager.replaceLabelsOnNode(ImmutableMap.of(n1, toSet("x")));
|
||||
MockRM rm2 = null;
|
||||
for (int i = 0; i < 2; i++) {
|
||||
rm2 = new MockRM(conf, memStore) {
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore()) {
|
||||
@Override
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
RMNodeLabelsManager mgr = new RMNodeLabelsManager();
|
||||
|
@ -2419,15 +2387,12 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
@Test(timeout = 120000)
|
||||
public void testRMRestartAfterPreemption() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
if (!getSchedulerType().equals(SchedulerType.CAPACITY)) {
|
||||
return;
|
||||
}
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
|
||||
|
||||
|
@ -2466,7 +2431,7 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
MockRM rm2 = null;
|
||||
// start RM2
|
||||
try {
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
Assert.assertTrue("RM start successfully", true);
|
||||
} catch (Exception e) {
|
||||
|
@ -2480,11 +2445,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
@Test(timeout = 60000)
|
||||
public void testRMRestartOnMissingAttempts() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 5);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// create RM
|
||||
MockRM rm1 = createMockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
// start RM
|
||||
MockRM rm1 = createMockRM(conf, memStore);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -2540,13 +2504,10 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
|
||||
@Test(timeout = 60000)
|
||||
public void testRMRestartAfterNodeLabelDisabled() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, true);
|
||||
|
||||
MockRM rm1 = new MockRM(
|
||||
TestUtils.getConfigurationWithDefaultQueueLabels(conf), memStore) {
|
||||
TestUtils.getConfigurationWithDefaultQueueLabels(conf)) {
|
||||
@Override
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
RMNodeLabelsManager mgr = new RMNodeLabelsManager();
|
||||
|
@ -2580,7 +2541,8 @@ public class TestRMRestart extends ParameterizedSchedulerTestBase {
|
|||
// restart rm with node label disabled
|
||||
conf.setBoolean(YarnConfiguration.NODE_LABELS_ENABLED, false);
|
||||
MockRM rm2 = new MockRM(
|
||||
TestUtils.getConfigurationWithDefaultQueueLabels(conf), memStore) {
|
||||
TestUtils.getConfigurationWithDefaultQueueLabels(conf),
|
||||
rm1.getRMStateStore()) {
|
||||
@Override
|
||||
protected RMNodeLabelsManager createNodeLabelManager() {
|
||||
RMNodeLabelsManager mgr = new RMNodeLabelsManager();
|
||||
|
|
|
@ -148,9 +148,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
int containerMemory = 1024;
|
||||
Resource containerResource = Resource.newInstance(containerMemory, 1);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -162,7 +160,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
rm1.clearQueueMetrics(app1);
|
||||
|
||||
// Re-start RM
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
// recover app
|
||||
|
@ -296,9 +294,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
int containerMemory = 1024;
|
||||
Resource containerResource = Resource.newInstance(containerMemory, 1);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(schedulerConf);
|
||||
rm1 = new MockRM(schedulerConf, memStore);
|
||||
rm1 = new MockRM(schedulerConf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -316,7 +312,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
rm1.clearQueueMetrics(app1);
|
||||
|
||||
// 3. Fail over (restart) RM.
|
||||
rm2 = new MockRM(schedulerConf, memStore);
|
||||
rm2 = new MockRM(schedulerConf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
// 4. Validate app is recovered post failover.
|
||||
|
@ -570,9 +566,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
public void testRMRestartWithRemovedQueue() throws Exception{
|
||||
conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||
conf.set(YarnConfiguration.YARN_ADMIN_ACL, "");
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -585,7 +579,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
csConf.setQueues(CapacitySchedulerConfiguration.ROOT, new String[]{QUEUE_DOESNT_EXIST});
|
||||
final String noQueue = CapacitySchedulerConfiguration.ROOT + "." + QUEUE_DOESNT_EXIST;
|
||||
csConf.setCapacity(noQueue, 100);
|
||||
rm2 = new MockRM(csConf,memStore);
|
||||
rm2 = new MockRM(csConf, rm1.getRMStateStore());
|
||||
|
||||
rm2.start();
|
||||
UserGroupInformation user2 = UserGroupInformation.createRemoteUser("user2");
|
||||
|
@ -622,9 +616,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
CapacitySchedulerConfiguration csConf =
|
||||
new CapacitySchedulerConfiguration(conf);
|
||||
setupQueueConfiguration(csConf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(csConf);
|
||||
rm1 = new MockRM(csConf, memStore);
|
||||
rm1 = new MockRM(csConf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -648,7 +640,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
csConf.set(PREFIX + "root.Default.QueueB.state", "STOPPED");
|
||||
|
||||
// Re-start RM
|
||||
rm2 = new MockRM(csConf, memStore);
|
||||
rm2 = new MockRM(csConf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
nm2.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
|
@ -783,9 +775,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
CapacitySchedulerConfiguration csConf =
|
||||
new CapacitySchedulerConfiguration(conf);
|
||||
setupQueueConfiguration(csConf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(csConf);
|
||||
rm1 = new MockRM(csConf, memStore);
|
||||
rm1 = new MockRM(csConf);
|
||||
rm1.start();
|
||||
MockNM nm =
|
||||
new MockNM("127.1.1.1:4321", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -798,7 +788,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
getYarnApplicationState(), YarnApplicationState.RUNNING);
|
||||
|
||||
// Take a copy of state store so that it can be reset to this state.
|
||||
RMState state = memStore.loadState();
|
||||
RMState state = rm1.getRMStateStore().loadState();
|
||||
|
||||
// Change scheduler config with child queues added to QueueB.
|
||||
csConf = new CapacitySchedulerConfiguration(conf);
|
||||
|
@ -806,7 +796,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
|
||||
String diags = "Application killed on recovery as it was submitted to " +
|
||||
"queue QueueB which is no longer a leaf queue after restart.";
|
||||
verifyAppRecoveryWithWrongQueueConfig(csConf, app, diags, memStore, state);
|
||||
verifyAppRecoveryWithWrongQueueConfig(csConf, app, diags,
|
||||
(MemoryRMStateStore) rm1.getRMStateStore(), state);
|
||||
}
|
||||
|
||||
//Test behavior of an app if queue is removed during recovery. Test case does
|
||||
|
@ -829,9 +820,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
CapacitySchedulerConfiguration csConf =
|
||||
new CapacitySchedulerConfiguration(conf);
|
||||
setupQueueConfiguration(csConf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(csConf);
|
||||
rm1 = new MockRM(csConf, memStore);
|
||||
rm1 = new MockRM(csConf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -860,7 +849,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
rm1.clearQueueMetrics(app2);
|
||||
|
||||
// Take a copy of state store so that it can be reset to this state.
|
||||
RMState state = memStore.loadState();
|
||||
RMState state = rm1.getRMStateStore().loadState();
|
||||
|
||||
// Set new configuration with QueueB removed.
|
||||
csConf = new CapacitySchedulerConfiguration(conf);
|
||||
|
@ -868,7 +857,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
|
||||
String diags = "Application killed on recovery as it was submitted to " +
|
||||
"queue QueueB which no longer exists after restart.";
|
||||
verifyAppRecoveryWithWrongQueueConfig(csConf, app2, diags, memStore, state);
|
||||
verifyAppRecoveryWithWrongQueueConfig(csConf, app2, diags,
|
||||
(MemoryRMStateStore) rm1.getRMStateStore(), state);
|
||||
}
|
||||
|
||||
private void checkParentQueue(ParentQueue parentQueue, int numContainers,
|
||||
|
@ -883,10 +873,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
// should not recover the containers that belong to the failed AM.
|
||||
@Test(timeout = 20000)
|
||||
public void testAMfailedBetweenRMRestart() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -894,7 +882,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
RMApp app1 = rm1.submitApp(200);
|
||||
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
|
||||
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
|
||||
|
@ -937,9 +925,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
// recover containers for completed apps.
|
||||
@Test(timeout = 20000)
|
||||
public void testContainersNotRecoveredForCompletedApps() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -948,7 +934,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
|
||||
MockRM.finishAMAndVerifyAppState(app1, rm1, nm1, am1);
|
||||
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
NMContainerStatus runningContainer =
|
||||
|
@ -975,11 +961,9 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
@Test (timeout = 600000)
|
||||
public void testAppReregisterOnRMWorkPreservingRestart() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -993,7 +977,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
am0.registerAppAttempt();
|
||||
|
||||
// start new RM
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
rm2.waitForState(app0.getApplicationId(), RMAppState.ACCEPTED);
|
||||
rm2.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
|
||||
|
@ -1008,9 +992,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
|
||||
@Test (timeout = 30000)
|
||||
public void testAMContainerStatusWithRMRestart() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -1025,7 +1007,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
attempt0.getMasterContainer().getId()).isAMContainer());
|
||||
|
||||
// Re-start RM
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
|
||||
|
@ -1044,9 +1026,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
@Test (timeout = 20000)
|
||||
public void testRecoverSchedulerAppAndAttemptSynchronously() throws Exception {
|
||||
// start RM
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -1056,7 +1036,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
RMApp app0 = rm1.submitApp(200);
|
||||
MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
|
||||
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
// scheduler app/attempt is immediately available after RM is re-started.
|
||||
|
@ -1077,9 +1057,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
// container should not be recovered.
|
||||
@Test (timeout = 50000)
|
||||
public void testReleasedContainerNotRecovered() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
MockNM nm1 = new MockNM("h1:1234", 15120, rm1.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
rm1.start();
|
||||
|
@ -1089,7 +1067,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
|
||||
// Re-start RM
|
||||
conf.setInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 8000);
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
rm2.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
|
||||
|
@ -1175,9 +1153,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
throws Exception {
|
||||
conf.setLong(
|
||||
YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 4000);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -1186,7 +1162,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
|
||||
|
||||
// Restart RM
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
nm1.registerNode();
|
||||
|
@ -1229,11 +1205,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
public void testRetriedFinishApplicationMasterRequest()
|
||||
throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -1253,7 +1226,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
|
||||
|
||||
// start new RM
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
|
||||
am0.setAMRMProtocol(rm2.getApplicationMasterService(), rm2.getRMContext());
|
||||
|
@ -1266,9 +1239,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
"kerberos");
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
MockRM rm1 = new TestSecurityMockRM(conf, memStore);
|
||||
MockRM rm1 = new TestSecurityMockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -1276,7 +1247,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
RMApp app1 = rm1.submitApp(200);
|
||||
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
|
||||
|
||||
MockRM rm2 = new TestSecurityMockRM(conf, memStore) {
|
||||
MockRM rm2 = new TestSecurityMockRM(conf, rm1.getRMStateStore()) {
|
||||
protected DelegationTokenRenewer createDelegationTokenRenewer() {
|
||||
return new DelegationTokenRenewer() {
|
||||
@Override
|
||||
|
@ -1313,9 +1284,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
*/
|
||||
@Test (timeout = 30000)
|
||||
public void testAppFailToValidateResourceRequestOnRecovery() throws Exception{
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -1328,16 +1297,14 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 50);
|
||||
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 100);
|
||||
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
rm2.start();
|
||||
}
|
||||
|
||||
@Test(timeout = 20000)
|
||||
public void testContainerCompleteMsgNotLostAfterAMFailedAndRMRestart() throws Exception {
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
|
||||
MockNM nm1 =
|
||||
|
@ -1370,7 +1337,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
MockAM am1 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
|
||||
|
||||
// rm failover
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
|
||||
|
||||
|
@ -1439,11 +1406,9 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
@Test(timeout = 600000)
|
||||
public void testUAMRecoveryOnRMWorkPreservingRestart() throws Exception {
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
// start RM
|
||||
rm1 = new MockRM(conf, memStore);
|
||||
rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 15120, rm1.getResourceTrackerService());
|
||||
|
@ -1471,7 +1436,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
Assert.assertFalse(conts.isEmpty());
|
||||
|
||||
// start new RM
|
||||
rm2 = new MockRM(conf, memStore);
|
||||
rm2 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm2.start();
|
||||
rm2.waitForState(app0.getApplicationId(), RMAppState.ACCEPTED);
|
||||
rm2.waitForState(am0.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
|
||||
|
@ -1521,7 +1486,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
|||
recoveredApp.getFinalApplicationStatus());
|
||||
|
||||
// Restart RM once more to check UAM is not re-run
|
||||
MockRM rm3 = new MockRM(conf, memStore);
|
||||
MockRM rm3 = new MockRM(conf, rm1.getRMStateStore());
|
||||
rm3.start();
|
||||
recoveredApp = rm3.getRMContext().getRMApps().get(app0.getApplicationId());
|
||||
Assert.assertEquals(RMAppState.FINISHED, recoveredApp.getState());
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
|
||||
|
@ -381,9 +380,7 @@ public class TestAMRestart {
|
|||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
|
||||
|
@ -405,7 +402,9 @@ public class TestAMRestart {
|
|||
Assert.assertTrue(! attempt1.shouldCountTowardsMaxAttemptRetry());
|
||||
rm1.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
|
||||
ApplicationStateData appState =
|
||||
memStore.getState().getApplicationState().get(app1.getApplicationId());
|
||||
((MemoryRMStateStore) rm1.getRMStateStore()).getState()
|
||||
.getApplicationState().get(app1.getApplicationId());
|
||||
|
||||
// AM should be restarted even though max-am-attempt is 1.
|
||||
MockAM am2 =
|
||||
rm1.waitForNewAMToLaunchAndRegister(app1.getApplicationId(), 2, nm1);
|
||||
|
@ -508,9 +507,7 @@ public class TestAMRestart {
|
|||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
|
||||
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
|
||||
|
@ -548,10 +545,9 @@ public class TestAMRestart {
|
|||
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
|
||||
|
@ -630,10 +626,9 @@ public class TestAMRestart {
|
|||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
// explicitly set max-am-retry count as 2.
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
rm1.start();
|
||||
CapacityScheduler scheduler =
|
||||
(CapacityScheduler) rm1.getResourceScheduler();
|
||||
|
@ -706,10 +701,8 @@ public class TestAMRestart {
|
|||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
// explicitly set max-am-retry count as 2.
|
||||
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
|
|||
import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
|
||||
import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
|
||||
|
@ -98,10 +99,12 @@ public class TestSystemMetricsPublisherForV2 {
|
|||
new Path(testRootDir.getAbsolutePath()), true);
|
||||
}
|
||||
|
||||
ResourceManager rm = mock(ResourceManager.class);
|
||||
RMContext rmContext = mock(RMContext.class);
|
||||
rmAppsMapInContext = new ConcurrentHashMap<ApplicationId, RMApp>();
|
||||
when(rmContext.getRMApps()).thenReturn(rmAppsMapInContext);
|
||||
rmTimelineCollectorManager = new RMTimelineCollectorManager(rmContext);
|
||||
when(rm.getRMContext()).thenReturn(rmContext);
|
||||
rmTimelineCollectorManager = new RMTimelineCollectorManager(rm);
|
||||
when(rmContext.getRMTimelineCollectorManager()).thenReturn(
|
||||
rmTimelineCollectorManager);
|
||||
|
||||
|
@ -113,7 +116,8 @@ public class TestSystemMetricsPublisherForV2 {
|
|||
|
||||
dispatcher.init(conf);
|
||||
dispatcher.start();
|
||||
metricsPublisher = new TimelineServiceV2Publisher(rmContext) {
|
||||
metricsPublisher =
|
||||
new TimelineServiceV2Publisher(rmTimelineCollectorManager) {
|
||||
@Override
|
||||
protected Dispatcher getDispatcher() {
|
||||
return dispatcher;
|
||||
|
@ -162,7 +166,7 @@ public class TestSystemMetricsPublisherForV2 {
|
|||
public void testSystemMetricPublisherInitialization() {
|
||||
@SuppressWarnings("resource")
|
||||
TimelineServiceV2Publisher publisher =
|
||||
new TimelineServiceV2Publisher(mock(RMContext.class));
|
||||
new TimelineServiceV2Publisher(mock(RMTimelineCollectorManager.class));
|
||||
try {
|
||||
Configuration conf = getTimelineV2Conf();
|
||||
conf.setBoolean(YarnConfiguration.RM_PUBLISH_CONTAINER_EVENTS_ENABLED,
|
||||
|
@ -174,7 +178,8 @@ public class TestSystemMetricsPublisherForV2 {
|
|||
|
||||
publisher.stop();
|
||||
|
||||
publisher = new TimelineServiceV2Publisher(mock(RMContext.class));
|
||||
publisher = new TimelineServiceV2Publisher(
|
||||
mock(RMTimelineCollectorManager.class));
|
||||
conf = getTimelineV2Conf();
|
||||
publisher.init(conf);
|
||||
assertTrue("Expected to have registered event handlers and set ready to "
|
||||
|
|
|
@ -164,9 +164,8 @@ public class TestApplicationLifetimeMonitor {
|
|||
true);
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
new MockNM("127.0.0.1:1234", 8192, rm1.getResourceTrackerService());
|
||||
|
@ -235,8 +234,6 @@ public class TestApplicationLifetimeMonitor {
|
|||
throws Exception {
|
||||
MockRM rm1 = null;
|
||||
try {
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore() {
|
||||
private int count = 0;
|
||||
|
||||
|
|
|
@ -59,7 +59,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
||||
|
@ -382,10 +381,8 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase {
|
|||
@Test(timeout = 10000)
|
||||
public void testReleasedContainerIfAppAttemptisNull() throws Exception {
|
||||
YarnConfiguration conf=getConf();
|
||||
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
try {
|
||||
rm1.start();
|
||||
MockNM nm1 =
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
|
|||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.apache.hadoop.test.MockitoMaker.make;
|
||||
import static org.apache.hadoop.test.MockitoMaker.stub;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
@ -226,8 +224,8 @@ public class TestQueueMetrics {
|
|||
|
||||
QueueMetrics parentMetrics =
|
||||
QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
|
||||
Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).
|
||||
from.getMetrics());
|
||||
Queue parentQueue = mock(Queue.class);
|
||||
when(parentQueue.getMetrics()).thenReturn(parentMetrics);
|
||||
QueueMetrics metrics =
|
||||
QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
|
||||
MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
|
||||
|
@ -272,8 +270,8 @@ public class TestQueueMetrics {
|
|||
|
||||
QueueMetrics parentMetrics =
|
||||
QueueMetrics.forQueue(ms, parentQueueName, null, true, conf);
|
||||
Queue parentQueue = make(stub(Queue.class).returning(parentMetrics).
|
||||
from.getMetrics());
|
||||
Queue parentQueue = mock(Queue.class);
|
||||
when(parentQueue.getMetrics()).thenReturn(parentMetrics);
|
||||
QueueMetrics metrics =
|
||||
QueueMetrics.forQueue(ms, leafQueueName, parentQueue, true, conf);
|
||||
MetricsSource parentQueueSource = queueSource(ms, parentQueueName);
|
||||
|
@ -359,8 +357,8 @@ public class TestQueueMetrics {
|
|||
|
||||
QueueMetrics p1Metrics =
|
||||
QueueMetrics.forQueue(ms, p1, null, true, conf);
|
||||
Queue parentQueue1 = make(stub(Queue.class).returning(p1Metrics).
|
||||
from.getMetrics());
|
||||
Queue parentQueue1 = mock(Queue.class);
|
||||
when(parentQueue1.getMetrics()).thenReturn(p1Metrics);
|
||||
QueueMetrics metrics =
|
||||
QueueMetrics.forQueue(ms, leafQueueName, parentQueue1, true, conf);
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -42,8 +41,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
|
||||
|
@ -405,16 +402,11 @@ public class TestApplicationPriority {
|
|||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
RMState rmState = memStore.getState();
|
||||
Map<ApplicationId, ApplicationStateData> rmAppState = rmState
|
||||
.getApplicationState();
|
||||
|
||||
// PHASE 1: create state in an RM
|
||||
|
||||
// start RM
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
rm1.start();
|
||||
|
||||
MockNM nm1 = new MockNM("127.0.0.1:1234", 15120,
|
||||
|
@ -611,10 +603,8 @@ public class TestApplicationPriority {
|
|||
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
|
||||
conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
|
||||
|
||||
MemoryRMStateStore memStore = new MemoryRMStateStore();
|
||||
memStore.init(conf);
|
||||
|
||||
MockRM rm1 = new MockRM(conf, memStore);
|
||||
MockRM rm1 = new MockRM(conf);
|
||||
MemoryRMStateStore memStore = (MemoryRMStateStore) rm1.getRMStateStore();
|
||||
rm1.start();
|
||||
|
||||
MockNM nm1 =
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue