Merge from trunk to branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1618700 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2014-08-18 18:41:31 +00:00
commit 0cc08f6da4
167 changed files with 9466 additions and 3473 deletions

View File

@ -202,6 +202,10 @@ Trunk (Unreleased)
HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting
underlying store. (asuresh via tucu) underlying store. (asuresh via tucu)
HADOOP-10770. KMS add delegation token support. (tucu)
HADOOP-10698. KMS, add proxyuser support. (tucu)
BUG FIXES BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled. HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -427,6 +431,9 @@ Trunk (Unreleased)
HADOOP-10862. Miscellaneous trivial corrections to KMS classes. HADOOP-10862. Miscellaneous trivial corrections to KMS classes.
(asuresh via tucu) (asuresh via tucu)
HADOOP-10967. Improve DefaultCryptoExtension#generateEncryptedKey
performance. (hitliuyi via tucu)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -502,8 +509,31 @@ Release 2.6.0 - UNRELEASED
HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication
client/server libraries. (tucu) client/server libraries. (tucu)
HADOOP-10820. Throw an exception in GenericOptionsParser when passed
an empty Path. (Alex Holmes and Zhihai Xu via wang)
HADOOP-10281. Create a scheduler, which assigns schedulables a priority
level. (Chris Li via Arpit Agarwal)
HADOOP-8944. Shell command fs -count should include human readable option
(Jonathan Allen via aw)
HADOOP-10231. Add some components in Native Libraries document (Akira
AJISAKA via aw)
HADOOP-10650. Add ability to specify a reverse ACL (black list) of users
and groups. (Benoy Antony via Arpit Agarwal)
HADOOP-10335. An ip whilelist based implementation to resolve Sasl
properties per connection. (Benoy Antony via Arpit Agarwal)
HADOOP-10975. org.apache.hadoop.util.DataChecksum should support calculating
checksums in native code (James Thomas via Colin Patrick McCabe)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-10838. Byte array native checksumming. (James Thomas via todd)
BUG FIXES BUG FIXES
HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry HADOOP-10781. Unportable getgrouplist() usage breaks FreeBSD (Dmitry
@ -560,6 +590,31 @@ Release 2.6.0 - UNRELEASED
HADOOP-10402. Configuration.getValByRegex does not substitute for HADOOP-10402. Configuration.getValByRegex does not substitute for
variables. (Robert Kanter via kasha) variables. (Robert Kanter via kasha)
HADOOP-10851. NetgroupCache does not remove group memberships. (Benoy
Antony via Arpit Agarwal)
HADOOP-10962. Flags for posix_fadvise are not valid in some architectures
(David Villegas via Colin Patrick McCabe)
HADOOP-10966. Hadoop Common native compilation broken in windows.
(David Villegas via Arpit Agarwal)
HADOOP-10843. TestGridmixRecord unit tests failure on PowerPC (Jinghui Wang
via Colin Patrick McCabe)
HADOOP-10121. Fix javadoc spelling for HadoopArchives#writeTopLevelDirs
(Akira AJISAKA via aw)
HADOOP-10964. Small fix for NetworkTopologyWithNodeGroup#sortByDistance.
(Yi Liu via wang)
HADOOP-10059. RPC authentication and authorization metrics overflow to
negative values on busy clusters (Tsuyoshi OZAWA and Akira AJISAKA
via jlowe)
HADOOP-10973. Native Libraries Guide contains format error. (Peter Klavins
via Arpit Agarwal)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -219,6 +219,13 @@ public class KeyProviderCryptoExtension extends
private static class DefaultCryptoExtension implements CryptoExtension { private static class DefaultCryptoExtension implements CryptoExtension {
private final KeyProvider keyProvider; private final KeyProvider keyProvider;
private static final ThreadLocal<SecureRandom> RANDOM =
new ThreadLocal<SecureRandom>() {
@Override
protected SecureRandom initialValue() {
return new SecureRandom();
}
};
private DefaultCryptoExtension(KeyProvider keyProvider) { private DefaultCryptoExtension(KeyProvider keyProvider) {
this.keyProvider = keyProvider; this.keyProvider = keyProvider;
@ -233,10 +240,10 @@ public class KeyProviderCryptoExtension extends
"No KeyVersion exists for key '%s' ", encryptionKeyName); "No KeyVersion exists for key '%s' ", encryptionKeyName);
// Generate random bytes for new key and IV // Generate random bytes for new key and IV
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding"); Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
SecureRandom random = SecureRandom.getInstance("SHA1PRNG");
final byte[] newKey = new byte[encryptionKey.getMaterial().length]; final byte[] newKey = new byte[encryptionKey.getMaterial().length];
random.nextBytes(newKey); RANDOM.get().nextBytes(newKey);
final byte[] iv = random.generateSeed(cipher.getBlockSize()); final byte[] iv = new byte[cipher.getBlockSize()];
RANDOM.get().nextBytes(iv);
// Encryption key IV is derived from new key's IV // Encryption key IV is derived from new key's IV
final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv); final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
// Encrypt the new key // Encrypt the new key

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.crypto.key;
import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import java.io.IOException;
/** /**
* A KeyProvider extension with the ability to add a renewer's Delegation * A KeyProvider extension with the ability to add a renewer's Delegation
* Tokens to the provided Credentials. * Tokens to the provided Credentials.
@ -45,9 +47,10 @@ public class KeyProviderDelegationTokenExtension extends
* @param renewer the user allowed to renew the delegation tokens * @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add new delegation tokens * @param credentials cache in which to add new delegation tokens
* @return list of new delegation tokens * @return list of new delegation tokens
* @throws IOException thrown if IOException if an IO error occurs.
*/ */
public Token<?>[] addDelegationTokens(final String renewer, public Token<?>[] addDelegationTokens(final String renewer,
Credentials credentials); Credentials credentials) throws IOException;
} }
/** /**
@ -76,9 +79,10 @@ public class KeyProviderDelegationTokenExtension extends
* @param renewer the user allowed to renew the delegation tokens * @param renewer the user allowed to renew the delegation tokens
* @param credentials cache in which to add new delegation tokens * @param credentials cache in which to add new delegation tokens
* @return list of new delegation tokens * @return list of new delegation tokens
* @throws IOException thrown if IOException if an IO error occurs.
*/ */
public Token<?>[] addDelegationTokens(final String renewer, public Token<?>[] addDelegationTokens(final String renewer,
Credentials credentials) { Credentials credentials) throws IOException {
return getExtension().addDelegationTokens(renewer, credentials); return getExtension().addDelegationTokens(renewer, credentials);
} }

View File

@ -22,15 +22,18 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator; import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.http.client.utils.URIBuilder; import org.apache.http.client.utils.URIBuilder;
import org.codehaus.jackson.map.ObjectMapper; import org.codehaus.jackson.map.ObjectMapper;
@ -50,6 +53,7 @@ import java.net.URL;
import java.net.URLEncoder; import java.net.URLEncoder;
import java.security.GeneralSecurityException; import java.security.GeneralSecurityException;
import java.security.NoSuchAlgorithmException; import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedExceptionAction;
import java.text.MessageFormat; import java.text.MessageFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Date; import java.util.Date;
@ -69,7 +73,10 @@ import com.google.common.base.Preconditions;
* KMS client <code>KeyProvider</code> implementation. * KMS client <code>KeyProvider</code> implementation.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class KMSClientProvider extends KeyProvider implements CryptoExtension { public class KMSClientProvider extends KeyProvider implements CryptoExtension,
KeyProviderDelegationTokenExtension.DelegationTokenExtension {
public static final String TOKEN_KIND = "kms-dt";
public static final String SCHEME_NAME = "kms"; public static final String SCHEME_NAME = "kms";
@ -229,6 +236,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
private String kmsUrl; private String kmsUrl;
private SSLFactory sslFactory; private SSLFactory sslFactory;
private ConnectionConfigurator configurator; private ConnectionConfigurator configurator;
private DelegationTokenAuthenticatedURL.Token authToken;
private UserGroupInformation loginUgi;
@Override @Override
public String toString() { public String toString() {
@ -309,6 +318,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
CommonConfigurationKeysPublic. CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT), KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
new EncryptedQueueRefiller()); new EncryptedQueueRefiller());
authToken = new DelegationTokenAuthenticatedURL.Token();
loginUgi = UserGroupInformation.getCurrentUser();
} }
private String createServiceURL(URL url) throws IOException { private String createServiceURL(URL url) throws IOException {
@ -325,12 +336,14 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
try { try {
StringBuilder sb = new StringBuilder(); StringBuilder sb = new StringBuilder();
sb.append(kmsUrl); sb.append(kmsUrl);
sb.append(collection); if (collection != null) {
if (resource != null) { sb.append(collection);
sb.append("/").append(URLEncoder.encode(resource, UTF8)); if (resource != null) {
} sb.append("/").append(URLEncoder.encode(resource, UTF8));
if (subResource != null) { if (subResource != null) {
sb.append("/").append(subResource); sb.append("/").append(subResource);
}
}
} }
URIBuilder uriBuilder = new URIBuilder(sb.toString()); URIBuilder uriBuilder = new URIBuilder(sb.toString());
if (parameters != null) { if (parameters != null) {
@ -365,14 +378,29 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
return conn; return conn;
} }
private HttpURLConnection createConnection(URL url, String method) private HttpURLConnection createConnection(final URL url, String method)
throws IOException { throws IOException {
HttpURLConnection conn; HttpURLConnection conn;
try { try {
AuthenticatedURL authUrl = new AuthenticatedURL(new PseudoAuthenticator(), // if current UGI is different from UGI at constructor time, behave as
configurator); // proxyuser
conn = authUrl.openConnection(url, new AuthenticatedURL.Token()); UserGroupInformation currentUgi = UserGroupInformation.getCurrentUser();
} catch (AuthenticationException ex) { final String doAsUser =
(loginUgi.getShortUserName().equals(currentUgi.getShortUserName()))
? null : currentUgi.getShortUserName();
// creating the HTTP connection using the current UGI at constructor time
conn = loginUgi.doAs(new PrivilegedExceptionAction<HttpURLConnection>() {
@Override
public HttpURLConnection run() throws Exception {
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(configurator);
return authUrl.openConnection(url, authToken, doAsUser);
}
});
} catch (IOException ex) {
throw ex;
} catch (Exception ex) {
throw new IOException(ex); throw new IOException(ex);
} }
conn.setUseCaches(false); conn.setUseCaches(false);
@ -403,20 +431,27 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
if (status != expected) { if (status != expected) {
InputStream es = null; InputStream es = null;
try { try {
es = conn.getErrorStream();
ObjectMapper mapper = new ObjectMapper();
Map json = mapper.readValue(es, Map.class);
String exClass = (String) json.get(
KMSRESTConstants.ERROR_EXCEPTION_JSON);
String exMsg = (String)
json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
Exception toThrow; Exception toThrow;
try { String contentType = conn.getHeaderField(CONTENT_TYPE);
ClassLoader cl = KMSClientProvider.class.getClassLoader(); if (contentType != null &&
Class klass = cl.loadClass(exClass); contentType.toLowerCase().startsWith(APPLICATION_JSON_MIME)) {
Constructor constr = klass.getConstructor(String.class); es = conn.getErrorStream();
toThrow = (Exception) constr.newInstance(exMsg); ObjectMapper mapper = new ObjectMapper();
} catch (Exception ex) { Map json = mapper.readValue(es, Map.class);
String exClass = (String) json.get(
KMSRESTConstants.ERROR_EXCEPTION_JSON);
String exMsg = (String)
json.get(KMSRESTConstants.ERROR_MESSAGE_JSON);
try {
ClassLoader cl = KMSClientProvider.class.getClassLoader();
Class klass = cl.loadClass(exClass);
Constructor constr = klass.getConstructor(String.class);
toThrow = (Exception) constr.newInstance(exMsg);
} catch (Exception ex) {
toThrow = new IOException(MessageFormat.format(
"HTTP status [{0}], {1}", status, conn.getResponseMessage()));
}
} else {
toThrow = new IOException(MessageFormat.format( toThrow = new IOException(MessageFormat.format(
"HTTP status [{0}], {1}", status, conn.getResponseMessage())); "HTTP status [{0}], {1}", status, conn.getResponseMessage()));
} }
@ -729,4 +764,25 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
} }
} }
@Override
public Token<?>[] addDelegationTokens(String renewer,
Credentials credentials) throws IOException {
Token<?>[] tokens;
URL url = createURL(null, null, null, null);
DelegationTokenAuthenticatedURL authUrl =
new DelegationTokenAuthenticatedURL(configurator);
try {
Token<?> token = authUrl.getDelegationToken(url, authToken, renewer);
if (token != null) {
credentials.addToken(token.getService(), token);
tokens = new Token<?>[] { token };
} else {
throw new IOException("Got NULL as delegation token");
}
} catch (AuthenticationException ex) {
throw new IOException(ex);
}
return tokens;
}
} }

View File

@ -134,6 +134,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL = HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL =
"security.service.authorization.default.acl"; "security.service.authorization.default.acl";
public static final String public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL =
"security.service.authorization.default.acl.blocked";
public static final String
HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY = HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_POLICY =
"security.refresh.policy.protocol.acl"; "security.refresh.policy.protocol.acl";
public static final String public static final String

View File

@ -24,6 +24,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import org.apache.hadoop.util.StringUtils;
/** Store the summary of a content (a directory or a file). */ /** Store the summary of a content (a directory or a file). */
@InterfaceAudience.Public @InterfaceAudience.Public
@ -102,7 +103,7 @@ public class ContentSummary implements Writable{
* <----12----> <----12----> <-------18-------> * <----12----> <----12----> <-------18------->
* DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME * DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME
*/ */
private static final String STRING_FORMAT = "%12d %12d %18d "; private static final String STRING_FORMAT = "%12s %12s %18s ";
/** /**
* Output format: * Output format:
* <----12----> <----15----> <----15----> <----15----> <----12----> <----12----> <-------18-------> * <----12----> <----15----> <----15----> <----15----> <----12----> <----12----> <-------18------->
@ -117,7 +118,7 @@ public class ContentSummary implements Writable{
private static final String QUOTA_HEADER = String.format( private static final String QUOTA_HEADER = String.format(
QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT,
"quota", "remaining quota", "space quota", "reamaining quota") + "name quota", "rem name quota", "space quota", "rem space quota") +
HEADER; HEADER;
/** Return the header of the output. /** Return the header of the output.
@ -139,11 +140,25 @@ public class ContentSummary implements Writable{
/** Return the string representation of the object in the output format. /** Return the string representation of the object in the output format.
* if qOption is false, output directory count, file count, and content size; * if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well. * if qOption is true, output quota and remaining quota as well.
* *
* @param qOption a flag indicating if quota needs to be printed or not * @param qOption a flag indicating if quota needs to be printed or not
* @return the string representation of the object * @return the string representation of the object
*/ */
public String toString(boolean qOption) { public String toString(boolean qOption) {
return toString(qOption, false);
}
/** Return the string representation of the object in the output format.
* if qOption is false, output directory count, file count, and content size;
* if qOption is true, output quota and remaining quota as well.
* if hOption is false file sizes are returned in bytes
* if hOption is true file sizes are returned in human readable
*
* @param qOption a flag indicating if quota needs to be printed or not
* @param hOption a flag indicating if human readable output if to be used
* @return the string representation of the object
*/
public String toString(boolean qOption, boolean hOption) {
String prefix = ""; String prefix = "";
if (qOption) { if (qOption) {
String quotaStr = "none"; String quotaStr = "none";
@ -152,19 +167,32 @@ public class ContentSummary implements Writable{
String spaceQuotaRem = "inf"; String spaceQuotaRem = "inf";
if (quota>0) { if (quota>0) {
quotaStr = Long.toString(quota); quotaStr = formatSize(quota, hOption);
quotaRem = Long.toString(quota-(directoryCount+fileCount)); quotaRem = formatSize(quota-(directoryCount+fileCount), hOption);
} }
if (spaceQuota>0) { if (spaceQuota>0) {
spaceQuotaStr = Long.toString(spaceQuota); spaceQuotaStr = formatSize(spaceQuota, hOption);
spaceQuotaRem = Long.toString(spaceQuota - spaceConsumed); spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
} }
prefix = String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT, prefix = String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT,
quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem); quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
} }
return prefix + String.format(STRING_FORMAT, directoryCount, return prefix + String.format(STRING_FORMAT,
fileCount, length); formatSize(directoryCount, hOption),
formatSize(fileCount, hOption),
formatSize(length, hOption));
}
/**
* Formats a size to be human readable or in bytes
* @param size value to be formatted
* @param humanReadable flag indicating human readable or not
* @return String representation of the size
*/
private String formatSize(long size, boolean humanReadable) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
} }
} }

View File

@ -42,16 +42,22 @@ public class Count extends FsCommand {
factory.addClass(Count.class, "-count"); factory.addClass(Count.class, "-count");
} }
private static final String OPTION_QUOTA = "q";
private static final String OPTION_HUMAN = "h";
public static final String NAME = "count"; public static final String NAME = "count";
public static final String USAGE = "[-q] <path> ..."; public static final String USAGE =
"[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] <path> ...";
public static final String DESCRIPTION = public static final String DESCRIPTION =
"Count the number of directories, files and bytes under the paths\n" + "Count the number of directories, files and bytes under the paths\n" +
"that match the specified file pattern. The output columns are:\n" + "that match the specified file pattern. The output columns are:\n" +
"DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or\n" + "DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or\n" +
"QUOTA REMAINING_QUOTA SPACE_QUOTA REMAINING_SPACE_QUOTA \n" + "QUOTA REMAINING_QUOTA SPACE_QUOTA REMAINING_SPACE_QUOTA \n" +
" DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME"; " DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME\n" +
"The -h option shows file sizes in human readable format.";
private boolean showQuotas; private boolean showQuotas;
private boolean humanReadable;
/** Constructor */ /** Constructor */
public Count() {} public Count() {}
@ -70,17 +76,37 @@ public class Count extends FsCommand {
@Override @Override
protected void processOptions(LinkedList<String> args) { protected void processOptions(LinkedList<String> args) {
CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "q"); CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
OPTION_QUOTA, OPTION_HUMAN);
cf.parse(args); cf.parse(args);
if (args.isEmpty()) { // default path is the current working directory if (args.isEmpty()) { // default path is the current working directory
args.add("."); args.add(".");
} }
showQuotas = cf.getOpt("q"); showQuotas = cf.getOpt(OPTION_QUOTA);
humanReadable = cf.getOpt(OPTION_HUMAN);
} }
@Override @Override
protected void processPath(PathData src) throws IOException { protected void processPath(PathData src) throws IOException {
ContentSummary summary = src.fs.getContentSummary(src.path); ContentSummary summary = src.fs.getContentSummary(src.path);
out.println(summary.toString(showQuotas) + src); out.println(summary.toString(showQuotas, isHumanReadable()) + src);
}
/**
* Should quotas get shown as part of the report?
* @return if quotas should be shown then true otherwise false
*/
@InterfaceAudience.Private
boolean isShowQuotas() {
return showQuotas;
}
/**
* Should sizes be shown in human readable format rather than bytes?
* @return true if human readable format
*/
@InterfaceAudience.Private
boolean isHumanReadable() {
return humanReadable;
} }
} }

View File

@ -0,0 +1,522 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import java.lang.ref.WeakReference;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.metrics2.util.MBeans;
import org.codehaus.jackson.map.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
/**
* The decay RPC scheduler counts incoming requests in a map, then
* decays the counts at a fixed time interval. The scheduler is optimized
* for large periods (on the order of seconds), as it offloads work to the
* decay sweep.
*/
public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean {
/**
* Period controls how many milliseconds between each decay sweep.
*/
public static final String IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY =
"faircallqueue.decay-scheduler.period-ms";
public static final long IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT =
5000L;
/**
* Decay factor controls how much each count is suppressed by on each sweep.
* Valid numbers are > 0 and < 1. Decay factor works in tandem with period
* to control how long the scheduler remembers an identity.
*/
public static final String IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY =
"faircallqueue.decay-scheduler.decay-factor";
public static final double IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT =
0.5;
/**
* Thresholds are specified as integer percentages, and specify which usage
* range each queue will be allocated to. For instance, specifying the list
* 10, 40, 80
* implies 4 queues, with
* - q3 from 80% up
* - q2 from 40 up to 80
* - q1 from 10 up to 40
* - q0 otherwise.
*/
public static final String IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY =
"faircallqueue.decay-scheduler.thresholds";
// Specifies the identity to use when the IdentityProvider cannot handle
// a schedulable.
public static final String DECAYSCHEDULER_UNKNOWN_IDENTITY =
"IdentityProvider.Unknown";
public static final Log LOG = LogFactory.getLog(DecayRpcScheduler.class);
// Track the number of calls for each schedulable identity
private final ConcurrentHashMap<Object, AtomicLong> callCounts =
new ConcurrentHashMap<Object, AtomicLong>();
// Should be the sum of all AtomicLongs in callCounts
private final AtomicLong totalCalls = new AtomicLong();
// Pre-computed scheduling decisions during the decay sweep are
// atomically swapped in as a read-only map
private final AtomicReference<Map<Object, Integer>> scheduleCacheRef =
new AtomicReference<Map<Object, Integer>>();
// Tune the behavior of the scheduler
private final long decayPeriodMillis; // How long between each tick
private final double decayFactor; // nextCount = currentCount / decayFactor
private final int numQueues; // affects scheduling decisions, from 0 to numQueues - 1
private final double[] thresholds;
private final IdentityProvider identityProvider;
/**
* This TimerTask will call decayCurrentCounts until
* the scheduler has been garbage collected.
*/
public static class DecayTask extends TimerTask {
private WeakReference<DecayRpcScheduler> schedulerRef;
private Timer timer;
public DecayTask(DecayRpcScheduler scheduler, Timer timer) {
this.schedulerRef = new WeakReference<DecayRpcScheduler>(scheduler);
this.timer = timer;
}
@Override
public void run() {
DecayRpcScheduler sched = schedulerRef.get();
if (sched != null) {
sched.decayCurrentCounts();
} else {
// Our scheduler was garbage collected since it is no longer in use,
// so we should terminate the timer as well
timer.cancel();
timer.purge();
}
}
}
/**
* Create a decay scheduler.
* @param numQueues number of queues to schedule for
* @param ns config prefix, so that we can configure multiple schedulers
* in a single instance.
* @param conf configuration to use.
*/
public DecayRpcScheduler(int numQueues, String ns, Configuration conf) {
if (numQueues < 1) {
throw new IllegalArgumentException("number of queues must be > 0");
}
this.numQueues = numQueues;
this.decayFactor = parseDecayFactor(ns, conf);
this.decayPeriodMillis = parseDecayPeriodMillis(ns, conf);
this.identityProvider = this.parseIdentityProvider(ns, conf);
this.thresholds = parseThresholds(ns, conf, numQueues);
// Setup delay timer
Timer timer = new Timer();
DecayTask task = new DecayTask(this, timer);
timer.scheduleAtFixedRate(task, 0, this.decayPeriodMillis);
MetricsProxy prox = MetricsProxy.getInstance(ns);
prox.setDelegate(this);
}
// Load configs
private IdentityProvider parseIdentityProvider(String ns, Configuration conf) {
List<IdentityProvider> providers = conf.getInstances(
ns + "." + CommonConfigurationKeys.IPC_CALLQUEUE_IDENTITY_PROVIDER_KEY,
IdentityProvider.class);
if (providers.size() < 1) {
LOG.info("IdentityProvider not specified, " +
"defaulting to UserIdentityProvider");
return new UserIdentityProvider();
}
return providers.get(0); // use the first
}
private static double parseDecayFactor(String ns, Configuration conf) {
double factor = conf.getDouble(ns + "." +
IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT
);
if (factor <= 0 || factor >= 1) {
throw new IllegalArgumentException("Decay Factor " +
"must be between 0 and 1");
}
return factor;
}
private static long parseDecayPeriodMillis(String ns, Configuration conf) {
long period = conf.getLong(ns + "." +
IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,
IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT
);
if (period <= 0) {
throw new IllegalArgumentException("Period millis must be >= 0");
}
return period;
}
private static double[] parseThresholds(String ns, Configuration conf,
int numQueues) {
int[] percentages = conf.getInts(ns + "." +
IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY);
if (percentages.length == 0) {
return getDefaultThresholds(numQueues);
} else if (percentages.length != numQueues-1) {
throw new IllegalArgumentException("Number of thresholds should be " +
(numQueues-1) + ". Was: " + percentages.length);
}
// Convert integer percentages to decimals
double[] decimals = new double[percentages.length];
for (int i = 0; i < percentages.length; i++) {
decimals[i] = percentages[i] / 100.0;
}
return decimals;
}
/**
* Generate default thresholds if user did not specify. Strategy is
* to halve each time, since queue usage tends to be exponential.
* So if numQueues is 4, we would generate: double[]{0.125, 0.25, 0.5}
* which specifies the boundaries between each queue's usage.
* @param numQueues number of queues to compute for
* @return array of boundaries of length numQueues - 1
*/
private static double[] getDefaultThresholds(int numQueues) {
double[] ret = new double[numQueues - 1];
double div = Math.pow(2, numQueues - 1);
for (int i = 0; i < ret.length; i++) {
ret[i] = Math.pow(2, i)/div;
}
return ret;
}
/**
* Decay the stored counts for each user and clean as necessary.
* This method should be called periodically in order to keep
* counts current.
*/
private void decayCurrentCounts() {
long total = 0;
Iterator<Map.Entry<Object, AtomicLong>> it =
callCounts.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Object, AtomicLong> entry = it.next();
AtomicLong count = entry.getValue();
// Compute the next value by reducing it by the decayFactor
long currentValue = count.get();
long nextValue = (long)(currentValue * decayFactor);
total += nextValue;
count.set(nextValue);
if (nextValue == 0) {
// We will clean up unused keys here. An interesting optimization might
// be to have an upper bound on keyspace in callCounts and only
// clean once we pass it.
it.remove();
}
}
// Update the total so that we remain in sync
totalCalls.set(total);
// Now refresh the cache of scheduling decisions
recomputeScheduleCache();
}
/**
* Update the scheduleCache to match current conditions in callCounts.
*/
private void recomputeScheduleCache() {
Map<Object, Integer> nextCache = new HashMap<Object, Integer>();
for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) {
Object id = entry.getKey();
AtomicLong value = entry.getValue();
long snapshot = value.get();
int computedLevel = computePriorityLevel(snapshot);
nextCache.put(id, computedLevel);
}
// Swap in to activate
scheduleCacheRef.set(Collections.unmodifiableMap(nextCache));
}
/**
* Get the number of occurrences and increment atomically.
* @param identity the identity of the user to increment
* @return the value before incrementation
*/
private long getAndIncrement(Object identity) throws InterruptedException {
// We will increment the count, or create it if no such count exists
AtomicLong count = this.callCounts.get(identity);
if (count == null) {
// Create the count since no such count exists.
count = new AtomicLong(0);
// Put it in, or get the AtomicInteger that was put in by another thread
AtomicLong otherCount = callCounts.putIfAbsent(identity, count);
if (otherCount != null) {
count = otherCount;
}
}
// Update the total
totalCalls.getAndIncrement();
// At this point value is guaranteed to be not null. It may however have
// been clobbered from callCounts. Nonetheless, we return what
// we have.
return count.getAndIncrement();
}
/**
* Given the number of occurrences, compute a scheduling decision.
* @param occurrences how many occurrences
* @return scheduling decision from 0 to numQueues - 1
*/
private int computePriorityLevel(long occurrences) {
long totalCallSnapshot = totalCalls.get();
double proportion = 0;
if (totalCallSnapshot > 0) {
proportion = (double) occurrences / totalCallSnapshot;
}
// Start with low priority queues, since they will be most common
for(int i = (numQueues - 1); i > 0; i--) {
if (proportion >= this.thresholds[i - 1]) {
return i; // We've found our queue number
}
}
// If we get this far, we're at queue 0
return 0;
}
/**
* Returns the priority level for a given identity by first trying the cache,
* then computing it.
* @param identity an object responding to toString and hashCode
* @return integer scheduling decision from 0 to numQueues - 1
*/
private int cachedOrComputedPriorityLevel(Object identity) {
try {
long occurrences = this.getAndIncrement(identity);
// Try the cache
Map<Object, Integer> scheduleCache = scheduleCacheRef.get();
if (scheduleCache != null) {
Integer priority = scheduleCache.get(identity);
if (priority != null) {
return priority;
}
}
// Cache was no good, compute it
return computePriorityLevel(occurrences);
} catch (InterruptedException ie) {
LOG.warn("Caught InterruptedException, returning low priority queue");
return numQueues - 1;
}
}
/**
* Compute the appropriate priority for a schedulable based on past requests.
* @param obj the schedulable obj to query and remember
* @return the queue index which we recommend scheduling in
*/
@Override
public int getPriorityLevel(Schedulable obj) {
// First get the identity
String identity = this.identityProvider.makeIdentity(obj);
if (identity == null) {
// Identity provider did not handle this
identity = DECAYSCHEDULER_UNKNOWN_IDENTITY;
}
return cachedOrComputedPriorityLevel(identity);
}
// For testing
@VisibleForTesting
public double getDecayFactor() { return decayFactor; }
@VisibleForTesting
public long getDecayPeriodMillis() { return decayPeriodMillis; }
@VisibleForTesting
public double[] getThresholds() { return thresholds; }
@VisibleForTesting
public void forceDecay() { decayCurrentCounts(); }
@VisibleForTesting
public Map<Object, Long> getCallCountSnapshot() {
HashMap<Object, Long> snapshot = new HashMap<Object, Long>();
for (Map.Entry<Object, AtomicLong> entry : callCounts.entrySet()) {
snapshot.put(entry.getKey(), entry.getValue().get());
}
return Collections.unmodifiableMap(snapshot);
}
@VisibleForTesting
public long getTotalCallSnapshot() {
return totalCalls.get();
}
/**
* MetricsProxy is a singleton because we may init multiple schedulers and we
* want to clean up resources when a new scheduler replaces the old one.
*/
private static final class MetricsProxy implements DecayRpcSchedulerMXBean {
// One singleton per namespace
private static final HashMap<String, MetricsProxy> INSTANCES =
new HashMap<String, MetricsProxy>();
// Weakref for delegate, so we don't retain it forever if it can be GC'd
private WeakReference<DecayRpcScheduler> delegate;
private MetricsProxy(String namespace) {
MBeans.register(namespace, "DecayRpcScheduler", this);
}
public static synchronized MetricsProxy getInstance(String namespace) {
MetricsProxy mp = INSTANCES.get(namespace);
if (mp == null) {
// We must create one
mp = new MetricsProxy(namespace);
INSTANCES.put(namespace, mp);
}
return mp;
}
public void setDelegate(DecayRpcScheduler obj) {
this.delegate = new WeakReference<DecayRpcScheduler>(obj);
}
@Override
public String getSchedulingDecisionSummary() {
DecayRpcScheduler scheduler = delegate.get();
if (scheduler == null) {
return "No Active Scheduler";
} else {
return scheduler.getSchedulingDecisionSummary();
}
}
@Override
public String getCallVolumeSummary() {
DecayRpcScheduler scheduler = delegate.get();
if (scheduler == null) {
return "No Active Scheduler";
} else {
return scheduler.getCallVolumeSummary();
}
}
@Override
public int getUniqueIdentityCount() {
DecayRpcScheduler scheduler = delegate.get();
if (scheduler == null) {
return -1;
} else {
return scheduler.getUniqueIdentityCount();
}
}
@Override
public long getTotalCallVolume() {
DecayRpcScheduler scheduler = delegate.get();
if (scheduler == null) {
return -1;
} else {
return scheduler.getTotalCallVolume();
}
}
}
public int getUniqueIdentityCount() {
return callCounts.size();
}
public long getTotalCallVolume() {
return totalCalls.get();
}
public String getSchedulingDecisionSummary() {
Map<Object, Integer> decisions = scheduleCacheRef.get();
if (decisions == null) {
return "{}";
} else {
try {
ObjectMapper om = new ObjectMapper();
return om.writeValueAsString(decisions);
} catch (Exception e) {
return "Error: " + e.getMessage();
}
}
}
public String getCallVolumeSummary() {
try {
ObjectMapper om = new ObjectMapper();
return om.writeValueAsString(callCounts);
} catch (Exception e) {
return "Error: " + e.getMessage();
}
}
}

View File

@ -0,0 +1,30 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
/**
* Provides metrics for Decay scheduler.
*/
public interface DecayRpcSchedulerMXBean {
// Get an overview of the requests in history.
String getSchedulingDecisionSummary();
String getCallVolumeSummary();
int getUniqueIdentityCount();
long getTotalCallVolume();
}

View File

@ -16,16 +16,14 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.lib.service; package org.apache.hadoop.ipc;
import org.apache.hadoop.classification.InterfaceAudience;
import java.io.IOException;
import java.security.AccessControlException;
@InterfaceAudience.Private
public interface ProxyUser {
public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException, AccessControlException;
/**
* Implement this interface to be used for RPC scheduling in the fair call queues.
*/
public interface RpcScheduler {
/**
* Returns priority level greater than zero as a hint for scheduling.
*/
int getPriorityLevel(Schedulable obj);
} }

View File

@ -88,13 +88,13 @@ public class RpcMetrics {
@Metric("Processsing time") MutableRate rpcProcessingTime; @Metric("Processsing time") MutableRate rpcProcessingTime;
MutableQuantiles[] rpcProcessingTimeMillisQuantiles; MutableQuantiles[] rpcProcessingTimeMillisQuantiles;
@Metric("Number of authentication failures") @Metric("Number of authentication failures")
MutableCounterInt rpcAuthenticationFailures; MutableCounterLong rpcAuthenticationFailures;
@Metric("Number of authentication successes") @Metric("Number of authentication successes")
MutableCounterInt rpcAuthenticationSuccesses; MutableCounterLong rpcAuthenticationSuccesses;
@Metric("Number of authorization failures") @Metric("Number of authorization failures")
MutableCounterInt rpcAuthorizationFailures; MutableCounterLong rpcAuthorizationFailures;
@Metric("Number of authorization sucesses") @Metric("Number of authorization sucesses")
MutableCounterInt rpcAuthorizationSuccesses; MutableCounterLong rpcAuthorizationSuccesses;
@Metric("Number of open connections") public int numOpenConnections() { @Metric("Number of open connections") public int numOpenConnections() {
return server.getNumOpenConnections(); return server.getNumOpenConnections();

View File

@ -293,7 +293,7 @@ public class NetworkTopologyWithNodeGroup extends NetworkTopology {
return; return;
} }
} }
super.sortByDistance(reader, nodes, nodes.length, seed, super.sortByDistance(reader, nodes, activeLen, seed,
randomizeBlockLocationsPerBlock); randomizeBlockLocationsPerBlock);
} }

View File

@ -27,12 +27,9 @@ import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/** /**
* Class that caches the netgroups and inverts group-to-user map * Class that caches the netgroups and inverts group-to-user map
* to user-to-group map, primarily intented for use with * to user-to-group map, primarily intended for use with
* netgroups (as returned by getent netgrgoup) which only returns * netgroups (as returned by getent netgrgoup) which only returns
* group to user mapping. * group to user mapping.
*/ */
@ -69,9 +66,7 @@ public class NetgroupCache {
} }
} }
if(userToNetgroupsMap.containsKey(user)) { if(userToNetgroupsMap.containsKey(user)) {
for(String netgroup : userToNetgroupsMap.get(user)) { groups.addAll(userToNetgroupsMap.get(user));
groups.add(netgroup);
}
} }
} }
@ -99,6 +94,7 @@ public class NetgroupCache {
*/ */
public static void clear() { public static void clear() {
netgroupToUsersMap.clear(); netgroupToUsersMap.clear();
userToNetgroupsMap.clear();
} }
/** /**
@ -108,12 +104,7 @@ public class NetgroupCache {
* @param users list of users for a given group * @param users list of users for a given group
*/ */
public static void add(String group, List<String> users) { public static void add(String group, List<String> users) {
if(!isCached(group)) { netgroupToUsersMap.put(group, new HashSet<String>(users));
netgroupToUsersMap.put(group, new HashSet<String>());
for(String user: users) {
netgroupToUsersMap.get(group).add(user);
}
}
netgroupToUsersMapUpdated = true; // at the end to avoid race netgroupToUsersMapUpdated = true; // at the end to avoid race
} }
} }

View File

@ -0,0 +1,149 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Map;
import java.util.TreeMap;
import javax.security.sasl.Sasl;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.util.CombinedIPWhiteList;
import org.apache.hadoop.util.StringUtils;
/**
* An implementation of the SaslPropertiesResolver.
* Uses a white list of IPs.
* If the connection's IP address is in the list of IP addresses, the salProperties
* will be unchanged.
* If the connection's IP is not in the list of IP addresses, then QOP for the
* connection will be restricted to "hadoop.rpc.protection.non-whitelist"
*
* Uses 3 IPList implementations together to form an aggregate whitelist.
* 1. ConstantIPList - to check against a set of hardcoded IPs
* 2. Fixed IP List - to check against a list of IP addresses which are specified externally, but
* will not change over runtime.
* 3. Variable IP List - to check against a list of IP addresses which are specified externally and
* could change during runtime.
* A connection IP address will checked against these 3 IP Lists in the order specified above.
* Once a match is found , the IP address is determined to be in whitelist.
*
* The behavior can be configured using a bunch of configuration parameters.
*
*/
public class WhitelistBasedResolver extends SaslPropertiesResolver {
public static final Log LOG = LogFactory.getLog(WhitelistBasedResolver.class);
private static final String FIXEDWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/fixedwhitelist";
private static final String VARIABLEWHITELIST_DEFAULT_LOCATION = "/etc/hadoop/whitelist";
/**
* Path to the file to containing subnets and ip addresses to form fixed whitelist.
*/
public static final String HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE =
"hadoop.security.sasl.fixedwhitelist.file";
/**
* Enables/Disables variable whitelist
*/
public static final String HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE =
"hadoop.security.sasl.variablewhitelist.enable";
/**
* Path to the file to containing subnets and ip addresses to form variable whitelist.
*/
public static final String HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE =
"hadoop.security.sasl.variablewhitelist.file";
/**
* time in seconds by which the variable whitelist file is checked for updates
*/
public static final String HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS =
"hadoop.security.sasl.variablewhitelist.cache.secs";
/**
* comma separated list containing alternate hadoop.rpc.protection values for
* clients which are not in whitelist
*/
public static final String HADOOP_RPC_PROTECTION_NON_WHITELIST =
"hadoop.rpc.protection.non-whitelist";
private CombinedIPWhiteList whiteList;
private Map<String, String> saslProps;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
String fixedFile = conf.get(HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE,
FIXEDWHITELIST_DEFAULT_LOCATION);
String variableFile = null;
long expiryTime = 0;
if (conf.getBoolean(HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE, false)) {
variableFile = conf.get(HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE,
VARIABLEWHITELIST_DEFAULT_LOCATION);
expiryTime =
conf.getLong(HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,3600) * 1000;
}
whiteList = new CombinedIPWhiteList(fixedFile,variableFile,expiryTime);
this.saslProps = getSaslProperties(conf);
}
/**
* Identify the Sasl Properties to be used for a connection with a client.
* @param clientAddress client's address
* @return the sasl properties to be used for the connection.
*/
@Override
public Map<String, String> getServerProperties(InetAddress clientAddress) {
if (clientAddress == null) {
return saslProps;
}
return whiteList.isIn(clientAddress.getHostAddress())?getDefaultProperties():saslProps;
}
public Map<String, String> getServerProperties(String clientAddress) throws UnknownHostException {
if (clientAddress == null) {
return saslProps;
}
return getServerProperties(InetAddress.getByName(clientAddress));
}
static Map<String, String> getSaslProperties(Configuration conf) {
Map<String, String> saslProps =new TreeMap<String, String>();
String[] qop = conf.getStrings(HADOOP_RPC_PROTECTION_NON_WHITELIST,
QualityOfProtection.PRIVACY.toString());
for (int i=0; i < qop.length; i++) {
qop[i] = QualityOfProtection.valueOf(qop[i].toUpperCase()).getSaslQop();
}
saslProps.put(Sasl.QOP, StringUtils.join(",", qop));
saslProps.put(Sasl.SERVER_AUTH, "true");
return saslProps;
}
}

View File

@ -43,10 +43,14 @@ import com.google.common.annotations.VisibleForTesting;
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class ServiceAuthorizationManager { public class ServiceAuthorizationManager {
static final String BLOCKED = ".blocked";
private static final String HADOOP_POLICY_FILE = "hadoop-policy.xml"; private static final String HADOOP_POLICY_FILE = "hadoop-policy.xml";
private volatile Map<Class<?>, AccessControlList> protocolToAcl = // For each class, first ACL in the array specifies the allowed entries
new IdentityHashMap<Class<?>, AccessControlList>(); // and second ACL specifies blocked entries.
private volatile Map<Class<?>, AccessControlList[]> protocolToAcls =
new IdentityHashMap<Class<?>, AccessControlList[]>();
/** /**
* Configuration key for controlling service-level authorization for Hadoop. * Configuration key for controlling service-level authorization for Hadoop.
@ -80,8 +84,8 @@ public class ServiceAuthorizationManager {
Configuration conf, Configuration conf,
InetAddress addr InetAddress addr
) throws AuthorizationException { ) throws AuthorizationException {
AccessControlList acl = protocolToAcl.get(protocol); AccessControlList[] acls = protocolToAcls.get(protocol);
if (acl == null) { if (acls == null) {
throw new AuthorizationException("Protocol " + protocol + throw new AuthorizationException("Protocol " + protocol +
" is not known."); " is not known.");
} }
@ -104,7 +108,7 @@ public class ServiceAuthorizationManager {
} }
} }
if((clientPrincipal != null && !clientPrincipal.equals(user.getUserName())) || if((clientPrincipal != null && !clientPrincipal.equals(user.getUserName())) ||
!acl.isUserAllowed(user)) { acls.length != 2 || !acls[0].isUserAllowed(user) || acls[1].isUserAllowed(user)) {
AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol AUDITLOG.warn(AUTHZ_FAILED_FOR + user + " for protocol=" + protocol
+ ", expected client Kerberos principal is " + clientPrincipal); + ", expected client Kerberos principal is " + clientPrincipal);
throw new AuthorizationException("User " + user + throw new AuthorizationException("User " + user +
@ -129,13 +133,16 @@ public class ServiceAuthorizationManager {
@Private @Private
public void refreshWithLoadedConfiguration(Configuration conf, public void refreshWithLoadedConfiguration(Configuration conf,
PolicyProvider provider) { PolicyProvider provider) {
final Map<Class<?>, AccessControlList> newAcls = final Map<Class<?>, AccessControlList[]> newAcls =
new IdentityHashMap<Class<?>, AccessControlList>(); new IdentityHashMap<Class<?>, AccessControlList[]>();
String defaultAcl = conf.get( String defaultAcl = conf.get(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL,
AccessControlList.WILDCARD_ACL_VALUE); AccessControlList.WILDCARD_ACL_VALUE);
String defaultBlockedAcl = conf.get(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL, "");
// Parse the config file // Parse the config file
Service[] services = provider.getServices(); Service[] services = provider.getServices();
if (services != null) { if (services != null) {
@ -145,21 +152,30 @@ public class ServiceAuthorizationManager {
conf.get(service.getServiceKey(), conf.get(service.getServiceKey(),
defaultAcl) defaultAcl)
); );
newAcls.put(service.getProtocol(), acl); AccessControlList blockedAcl =
new AccessControlList(
conf.get(service.getServiceKey() + BLOCKED,
defaultBlockedAcl));
newAcls.put(service.getProtocol(), new AccessControlList[] {acl, blockedAcl});
} }
} }
// Flip to the newly parsed permissions // Flip to the newly parsed permissions
protocolToAcl = newAcls; protocolToAcls = newAcls;
} }
@VisibleForTesting @VisibleForTesting
public Set<Class<?>> getProtocolsWithAcls() { public Set<Class<?>> getProtocolsWithAcls() {
return protocolToAcl.keySet(); return protocolToAcls.keySet();
} }
@VisibleForTesting @VisibleForTesting
public AccessControlList getProtocolsAcls(Class<?> className) { public AccessControlList getProtocolsAcls(Class<?> className) {
return protocolToAcl.get(className); return protocolToAcls.get(className)[0];
}
@VisibleForTesting
public AccessControlList getProtocolsBlockedAcls(Class<?> className) {
return protocolToAcls.get(className)[1];
} }
} }

View File

@ -75,7 +75,7 @@ public abstract class DelegationTokenAuthenticationHandler
public static final String PREFIX = "delegation-token."; public static final String PREFIX = "delegation-token.";
public static final String TOKEN_KIND = PREFIX + "token-kind.sec"; public static final String TOKEN_KIND = PREFIX + "token-kind";
public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec"; public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60; public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
/**
* CacheableIPList loads a list of subnets from a file.
* The list is cached and the cache can be refreshed by specifying cache timeout.
* A negative value of cache timeout disables any caching.
*
* Thread safe.
*/
public class CacheableIPList implements IPList {
private final long cacheTimeout;
private volatile long cacheExpiryTimeStamp;
private volatile FileBasedIPList ipList;
public CacheableIPList(FileBasedIPList ipList, long cacheTimeout) {
this.cacheTimeout = cacheTimeout;
this.ipList = ipList;
updateCacheExpiryTime();
}
/**
* Reloads the ip list
*/
private void reset() {
ipList = ipList.reload();
updateCacheExpiryTime();
}
private void updateCacheExpiryTime() {
if (cacheTimeout < 0) {
cacheExpiryTimeStamp = -1; // no automatic cache expiry.
}else {
cacheExpiryTimeStamp = System.currentTimeMillis() + cacheTimeout;
}
}
/**
* Refreshes the ip list
*/
public void refresh () {
cacheExpiryTimeStamp = 0;
}
@Override
public boolean isIn(String ipAddress) {
//is cache expired
//Uses Double Checked Locking using volatile
if (cacheExpiryTimeStamp >= 0 && cacheExpiryTimeStamp < System.currentTimeMillis()) {
synchronized(this) {
//check if cache expired again
if (cacheExpiryTimeStamp < System.currentTimeMillis()) {
reset();
}
}
}
return ipList.isIn(ipAddress);
}
}

View File

@ -0,0 +1,60 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
public class CombinedIPWhiteList implements IPList {
public static final Log LOG = LogFactory.getLog(CombinedIPWhiteList.class);
private static final String LOCALHOST_IP = "127.0.0.1";
private final IPList[] networkLists;
public CombinedIPWhiteList(String fixedWhiteListFile,
String variableWhiteListFile, long cacheExpiryInSeconds) {
IPList fixedNetworkList = new FileBasedIPList(fixedWhiteListFile);
if (variableWhiteListFile != null){
IPList variableNetworkList = new CacheableIPList(
new FileBasedIPList(variableWhiteListFile),cacheExpiryInSeconds);
networkLists = new IPList[] {fixedNetworkList, variableNetworkList};
}
else {
networkLists = new IPList[] {fixedNetworkList};
}
}
@Override
public boolean isIn(String ipAddress) {
if (ipAddress == null) {
throw new IllegalArgumentException("ipAddress is null");
}
if (LOCALHOST_IP.equals(ipAddress)) {
return true;
}
for (IPList networkList:networkLists) {
if (networkList.isIn(ipAddress)) {
return true;
}
}
return false;
}
}

View File

@ -339,6 +339,12 @@ public class DataChecksum implements Checksum {
byte[] data, int dataOff, int dataLen, byte[] data, int dataOff, int dataLen,
byte[] checksums, int checksumsOff, String fileName, byte[] checksums, int checksumsOff, String fileName,
long basePos) throws ChecksumException { long basePos) throws ChecksumException {
if (NativeCrc32.isAvailable()) {
NativeCrc32.verifyChunkedSumsByteArray(bytesPerChecksum, type.id,
checksums, checksumsOff, data, dataOff, dataLen, fileName, basePos);
return;
}
int remaining = dataLen; int remaining = dataLen;
int dataPos = 0; int dataPos = 0;
@ -384,6 +390,12 @@ public class DataChecksum implements Checksum {
checksums.array(), checksums.arrayOffset() + checksums.position()); checksums.array(), checksums.arrayOffset() + checksums.position());
return; return;
} }
if (NativeCrc32.isAvailable()) {
NativeCrc32.calculateChunkedSums(bytesPerChecksum, type.id,
checksums, data);
return;
}
data.mark(); data.mark();
checksums.mark(); checksums.mark();
@ -406,10 +418,16 @@ public class DataChecksum implements Checksum {
* Implementation of chunked calculation specifically on byte arrays. This * Implementation of chunked calculation specifically on byte arrays. This
* is to avoid the copy when dealing with ByteBuffers that have array backing. * is to avoid the copy when dealing with ByteBuffers that have array backing.
*/ */
private void calculateChunkedSums( public void calculateChunkedSums(
byte[] data, int dataOffset, int dataLength, byte[] data, int dataOffset, int dataLength,
byte[] sums, int sumsOffset) { byte[] sums, int sumsOffset) {
if (NativeCrc32.isAvailable()) {
NativeCrc32.calculateChunkedSumsByteArray(bytesPerChecksum, type.id,
sums, sumsOffset, data, dataOffset, dataLength);
return;
}
int remaining = dataLength; int remaining = dataLength;
while (remaining > 0) { while (remaining > 0) {
int n = Math.min(remaining, bytesPerChecksum); int n = Math.min(remaining, bytesPerChecksum);

View File

@ -0,0 +1,102 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
/**
* FileBasedIPList loads a list of subnets in CIDR format and ip addresses from a file.
*
* Given an ip address, isIn method returns true if ip belongs to one of the subnets.
*
* Thread safe.
*/
public class FileBasedIPList implements IPList {
private static final Log LOG = LogFactory.getLog(FileBasedIPList.class);
private final String fileName;
private final MachineList addressList;
public FileBasedIPList(String fileName) {
this.fileName = fileName;
String[] lines = readLines(fileName);
if (lines != null) {
addressList = new MachineList(new HashSet<String>(Arrays.asList(lines)));
} else {
addressList = null;
}
}
public FileBasedIPList reload() {
return new FileBasedIPList(fileName);
}
@Override
public boolean isIn(String ipAddress) {
if (ipAddress == null || addressList == null) {
return false;
}
return addressList.includes(ipAddress);
}
/**
* reads the lines in a file.
* @param fileName
* @return lines in a String array; null if the file does not exist or if the
* file name is null
* @throws IOException
*/
private static String[] readLines(String fileName) {
try {
if (fileName != null) {
File file = new File (fileName);
if (file.exists()) {
FileReader fileReader = new FileReader(file);
BufferedReader bufferedReader = new BufferedReader(fileReader);
List<String> lines = new ArrayList<String>();
String line = null;
while ((line = bufferedReader.readLine()) != null) {
lines.add(line);
}
bufferedReader.close();
LOG.debug("Loaded IP list of size = " + lines.size() +" from file = " + fileName);
return(lines.toArray(new String[lines.size()]));
}
else {
LOG.debug("Missing ip list file : "+ fileName);
}
}
}
catch (Throwable t) {
LOG.error(t);
}
return null;
}
}

View File

@ -378,9 +378,15 @@ public class GenericOptionsParser {
if (files == null) if (files == null)
return null; return null;
String[] fileArr = files.split(","); String[] fileArr = files.split(",");
if (fileArr.length == 0) {
throw new IllegalArgumentException("File name can't be empty string");
}
String[] finalArr = new String[fileArr.length]; String[] finalArr = new String[fileArr.length];
for (int i =0; i < fileArr.length; i++) { for (int i =0; i < fileArr.length; i++) {
String tmp = fileArr[i]; String tmp = fileArr[i];
if (tmp.isEmpty()) {
throw new IllegalArgumentException("File name can't be empty string");
}
String finalPath; String finalPath;
URI pathURI; URI pathURI;
try { try {

View File

@ -0,0 +1,33 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@InterfaceStability.Unstable
@InterfaceAudience.Public
public interface IPList {
/**
* returns true if the ipAddress is in the IPList.
* @param ipAddress
* @return boolean value indicating whether the ipAddress is in the IPList
*/
public abstract boolean isIn(String ipAddress);
}

View File

@ -37,7 +37,7 @@ import com.google.common.net.InetAddresses;
/** /**
* Container class which holds a list of ip/host addresses and * Container class which holds a list of ip/host addresses and
* answers membership queries. * answers membership queries.
* . *
* Accepts list of ip addresses, ip addreses in CIDR format and/or * Accepts list of ip addresses, ip addreses in CIDR format and/or
* host addresses. * host addresses.
*/ */
@ -71,8 +71,15 @@ public class MachineList {
* @param hostEntries comma separated ip/cidr/host addresses * @param hostEntries comma separated ip/cidr/host addresses
*/ */
public MachineList(String hostEntries) { public MachineList(String hostEntries) {
this(StringUtils.getTrimmedStringCollection(hostEntries), this(StringUtils.getTrimmedStringCollection(hostEntries));
InetAddressFactory.S_INSTANCE); }
/**
*
* @param hostEntries collection of separated ip/cidr/host addresses
*/
public MachineList(Collection<String> hostEntries) {
this(hostEntries, InetAddressFactory.S_INSTANCE);
} }
/** /**

View File

@ -54,17 +54,50 @@ class NativeCrc32 {
public static void verifyChunkedSums(int bytesPerSum, int checksumType, public static void verifyChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data, String fileName, long basePos) ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
throws ChecksumException { throws ChecksumException {
nativeVerifyChunkedSums(bytesPerSum, checksumType, nativeComputeChunkedSums(bytesPerSum, checksumType,
sums, sums.position(), sums, sums.position(),
data, data.position(), data.remaining(), data, data.position(), data.remaining(),
fileName, basePos); fileName, basePos, true);
}
public static void verifyChunkedSumsByteArray(int bytesPerSum,
int checksumType, byte[] sums, int sumsOffset, byte[] data,
int dataOffset, int dataLength, String fileName, long basePos)
throws ChecksumException {
nativeComputeChunkedSumsByteArray(bytesPerSum, checksumType,
sums, sumsOffset,
data, dataOffset, dataLength,
fileName, basePos, true);
}
public static void calculateChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data) {
nativeComputeChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
"", 0, false);
}
public static void calculateChunkedSumsByteArray(int bytesPerSum,
int checksumType, byte[] sums, int sumsOffset, byte[] data,
int dataOffset, int dataLength) {
nativeComputeChunkedSumsByteArray(bytesPerSum, checksumType,
sums, sumsOffset,
data, dataOffset, dataLength,
"", 0, false);
} }
private static native void nativeVerifyChunkedSums( private static native void nativeComputeChunkedSums(
int bytesPerSum, int checksumType, int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset, ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength, ByteBuffer data, int dataOffset, int dataLength,
String fileName, long basePos); String fileName, long basePos, boolean verify);
private static native void nativeComputeChunkedSumsByteArray(
int bytesPerSum, int checksumType,
byte[] sums, int sumsOffset,
byte[] data, int dataOffset, int dataLength,
String fileName, long basePos, boolean verify);
// Copy the constants over from DataChecksum so that javah will pick them up // Copy the constants over from DataChecksum so that javah will pick them up
// and make them available in the native code header. // and make them available in the native code header.

View File

@ -171,6 +171,39 @@ static void nioe_deinit(JNIEnv *env) {
nioe_ctor = NULL; nioe_ctor = NULL;
} }
/*
* Compatibility mapping for fadvise flags. Return the proper value from fnctl.h.
* If the value is not known, return the argument unchanged.
*/
static int map_fadvise_flag(jint flag) {
#ifdef HAVE_POSIX_FADVISE
switch(flag) {
case org_apache_hadoop_io_nativeio_NativeIO_POSIX_POSIX_FADV_NORMAL:
return POSIX_FADV_NORMAL;
break;
case org_apache_hadoop_io_nativeio_NativeIO_POSIX_POSIX_FADV_RANDOM:
return POSIX_FADV_RANDOM;
break;
case org_apache_hadoop_io_nativeio_NativeIO_POSIX_POSIX_FADV_SEQUENTIAL:
return POSIX_FADV_SEQUENTIAL;
break;
case org_apache_hadoop_io_nativeio_NativeIO_POSIX_POSIX_FADV_WILLNEED:
return POSIX_FADV_WILLNEED;
break;
case org_apache_hadoop_io_nativeio_NativeIO_POSIX_POSIX_FADV_DONTNEED:
return POSIX_FADV_DONTNEED;
break;
case org_apache_hadoop_io_nativeio_NativeIO_POSIX_POSIX_FADV_NOREUSE:
return POSIX_FADV_NOREUSE;
break;
default:
return flag;
}
#else
return flag;
#endif
}
/* /*
* private static native void initNative(); * private static native void initNative();
* *
@ -303,7 +336,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_posix_1fadvise(
PASS_EXCEPTIONS(env); PASS_EXCEPTIONS(env);
int err = 0; int err = 0;
if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, flags))) { if ((err = posix_fadvise(fd, (off_t)offset, (off_t)len, map_fadvise_flag(flags)))) {
#ifdef __FreeBSD__ #ifdef __FreeBSD__
throw_ioe(env, errno); throw_ioe(env, errno);
#else #else

View File

@ -34,6 +34,10 @@
#include "bulk_crc32.h" #include "bulk_crc32.h"
#define MBYTE 1048576
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
static void throw_checksum_exception(JNIEnv *env, static void throw_checksum_exception(JNIEnv *env,
uint32_t got_crc, uint32_t expected_crc, uint32_t got_crc, uint32_t expected_crc,
jstring j_filename, jlong pos) { jstring j_filename, jlong pos) {
@ -113,12 +117,12 @@ static int convert_java_crc_type(JNIEnv *env, jint crc_type) {
} }
} }
JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunkedSums JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeComputeChunkedSums
(JNIEnv *env, jclass clazz, (JNIEnv *env, jclass clazz,
jint bytes_per_checksum, jint j_crc_type, jint bytes_per_checksum, jint j_crc_type,
jobject j_sums, jint sums_offset, jobject j_sums, jint sums_offset,
jobject j_data, jint data_offset, jint data_len, jobject j_data, jint data_offset, jint data_len,
jstring j_filename, jlong base_pos) jstring j_filename, jlong base_pos, jboolean verify)
{ {
uint8_t *sums_addr; uint8_t *sums_addr;
uint8_t *data_addr; uint8_t *data_addr;
@ -162,21 +166,99 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunk
if (crc_type == -1) return; // exception already thrown if (crc_type == -1) return; // exception already thrown
// Setup complete. Actually verify checksums. // Setup complete. Actually verify checksums.
ret = bulk_verify_crc(data, data_len, sums, crc_type, ret = bulk_crc(data, data_len, sums, crc_type,
bytes_per_checksum, &error_data); bytes_per_checksum, verify ? &error_data : NULL);
if (likely(ret == CHECKSUMS_VALID)) { if (likely(verify && ret == CHECKSUMS_VALID || !verify && ret == 0)) {
return; return;
} else if (unlikely(ret == INVALID_CHECKSUM_DETECTED)) { } else if (unlikely(verify && ret == INVALID_CHECKSUM_DETECTED)) {
long pos = base_pos + (error_data.bad_data - data); long pos = base_pos + (error_data.bad_data - data);
throw_checksum_exception( throw_checksum_exception(
env, error_data.got_crc, error_data.expected_crc, env, error_data.got_crc, error_data.expected_crc,
j_filename, pos); j_filename, pos);
} else { } else {
THROW(env, "java/lang/AssertionError", THROW(env, "java/lang/AssertionError",
"Bad response code from native bulk_verify_crc"); "Bad response code from native bulk_crc");
} }
} }
JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeComputeChunkedSumsByteArray
(JNIEnv *env, jclass clazz,
jint bytes_per_checksum, jint j_crc_type,
jarray j_sums, jint sums_offset,
jarray j_data, jint data_offset, jint data_len,
jstring j_filename, jlong base_pos, jboolean verify)
{
uint8_t *sums_addr;
uint8_t *data_addr;
uint32_t *sums;
uint8_t *data;
int crc_type;
crc32_error_t error_data;
int ret;
int numChecksumsPerIter;
int checksumNum;
if (unlikely(!j_sums || !j_data)) {
THROW(env, "java/lang/NullPointerException",
"input byte arrays must not be null");
return;
}
if (unlikely(sums_offset < 0 || data_offset < 0 || data_len < 0)) {
THROW(env, "java/lang/IllegalArgumentException",
"bad offsets or lengths");
return;
}
if (unlikely(bytes_per_checksum) <= 0) {
THROW(env, "java/lang/IllegalArgumentException",
"invalid bytes_per_checksum");
return;
}
// Convert to correct internal C constant for CRC type
crc_type = convert_java_crc_type(env, j_crc_type);
if (crc_type == -1) return; // exception already thrown
numChecksumsPerIter = MAX(1, MBYTE / bytes_per_checksum);
checksumNum = 0;
while (checksumNum * bytes_per_checksum < data_len) {
// Convert byte arrays to C pointers
sums_addr = (*env)->GetPrimitiveArrayCritical(env, j_sums, NULL);
data_addr = (*env)->GetPrimitiveArrayCritical(env, j_data, NULL);
if (unlikely(!sums_addr || !data_addr)) {
if (data_addr) (*env)->ReleasePrimitiveArrayCritical(env, j_data, data_addr, 0);
if (sums_addr) (*env)->ReleasePrimitiveArrayCritical(env, j_sums, sums_addr, 0);
THROW(env, "java/lang/OutOfMemoryError",
"not enough memory for byte arrays in JNI code");
return;
}
sums = (uint32_t *)(sums_addr + sums_offset) + checksumNum;
data = data_addr + data_offset + checksumNum * bytes_per_checksum;
// Setup complete. Actually verify checksums.
ret = bulk_crc(data, MIN(numChecksumsPerIter * bytes_per_checksum,
data_len - checksumNum * bytes_per_checksum),
sums, crc_type, bytes_per_checksum, verify ? &error_data : NULL);
(*env)->ReleasePrimitiveArrayCritical(env, j_data, data_addr, 0);
(*env)->ReleasePrimitiveArrayCritical(env, j_sums, sums_addr, 0);
if (unlikely(verify && ret == INVALID_CHECKSUM_DETECTED)) {
long pos = base_pos + (error_data.bad_data - data) + checksumNum *
bytes_per_checksum;
throw_checksum_exception(
env, error_data.got_crc, error_data.expected_crc,
j_filename, pos);
return;
} else if (unlikely(verify && ret != CHECKSUMS_VALID || !verify && ret != 0)) {
THROW(env, "java/lang/AssertionError",
"Bad response code from native bulk_crc");
return;
}
checksumNum += numChecksumsPerIter;
}
}
/** /**
* vim: sw=2: ts=2: et: * vim: sw=2: ts=2: et:
*/ */

View File

@ -55,40 +55,23 @@ static void pipelined_crc32c(uint32_t *crc1, uint32_t *crc2, uint32_t *crc3, con
static int cached_cpu_supports_crc32; // initialized by constructor below static int cached_cpu_supports_crc32; // initialized by constructor below
static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length); static uint32_t crc32c_hardware(uint32_t crc, const uint8_t* data, size_t length);
int bulk_calculate_crc(const uint8_t *data, size_t data_len, static inline int store_or_verify(uint32_t *sums, uint32_t crc,
uint32_t *sums, int checksum_type, int is_verify) {
int bytes_per_checksum) { if (!is_verify) {
uint32_t crc; *sums = crc;
crc_update_func_t crc_update_func; return 1;
} else {
switch (checksum_type) { return crc == *sums;
case CRC32_ZLIB_POLYNOMIAL:
crc_update_func = crc32_zlib_sb8;
break;
case CRC32C_POLYNOMIAL:
crc_update_func = crc32c_sb8;
break;
default:
return -EINVAL;
break;
} }
while (likely(data_len > 0)) {
int len = likely(data_len >= bytes_per_checksum) ? bytes_per_checksum : data_len;
crc = CRC_INITIAL_VAL;
crc = crc_update_func(crc, data, len);
*sums = ntohl(crc_val(crc));
data += len;
data_len -= len;
sums++;
}
return 0;
} }
int bulk_verify_crc(const uint8_t *data, size_t data_len, int bulk_crc(const uint8_t *data, size_t data_len,
const uint32_t *sums, int checksum_type, uint32_t *sums, int checksum_type,
int bytes_per_checksum, int bytes_per_checksum,
crc32_error_t *error_info) { crc32_error_t *error_info) {
int is_verify = error_info != NULL;
#ifdef USE_PIPELINED #ifdef USE_PIPELINED
uint32_t crc1, crc2, crc3; uint32_t crc1, crc2, crc3;
int n_blocks = data_len / bytes_per_checksum; int n_blocks = data_len / bytes_per_checksum;
@ -112,7 +95,7 @@ int bulk_verify_crc(const uint8_t *data, size_t data_len,
} }
break; break;
default: default:
return INVALID_CHECKSUM_TYPE; return is_verify ? INVALID_CHECKSUM_TYPE : -EINVAL;
} }
#ifdef USE_PIPELINED #ifdef USE_PIPELINED
@ -122,16 +105,15 @@ int bulk_verify_crc(const uint8_t *data, size_t data_len,
crc1 = crc2 = crc3 = CRC_INITIAL_VAL; crc1 = crc2 = crc3 = CRC_INITIAL_VAL;
pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, 3); pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, 3);
crc = ntohl(crc_val(crc1)); if (unlikely(!store_or_verify(sums, (crc = ntohl(crc_val(crc1))), is_verify)))
if ((crc = ntohl(crc_val(crc1))) != *sums)
goto return_crc_error; goto return_crc_error;
sums++; sums++;
data += bytes_per_checksum; data += bytes_per_checksum;
if ((crc = ntohl(crc_val(crc2))) != *sums) if (unlikely(!store_or_verify(sums, (crc = ntohl(crc_val(crc2))), is_verify)))
goto return_crc_error; goto return_crc_error;
sums++; sums++;
data += bytes_per_checksum; data += bytes_per_checksum;
if ((crc = ntohl(crc_val(crc3))) != *sums) if (unlikely(!store_or_verify(sums, (crc = ntohl(crc_val(crc3))), is_verify)))
goto return_crc_error; goto return_crc_error;
sums++; sums++;
data += bytes_per_checksum; data += bytes_per_checksum;
@ -143,12 +125,12 @@ int bulk_verify_crc(const uint8_t *data, size_t data_len,
crc1 = crc2 = crc3 = CRC_INITIAL_VAL; crc1 = crc2 = crc3 = CRC_INITIAL_VAL;
pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, n_blocks); pipelined_crc32c(&crc1, &crc2, &crc3, data, bytes_per_checksum, n_blocks);
if ((crc = ntohl(crc_val(crc1))) != *sums) if (unlikely(!store_or_verify(sums, (crc = ntohl(crc_val(crc1))), is_verify)))
goto return_crc_error; goto return_crc_error;
data += bytes_per_checksum; data += bytes_per_checksum;
sums++; sums++;
if (n_blocks == 2) { if (n_blocks == 2) {
if ((crc = ntohl(crc_val(crc2))) != *sums) if (unlikely(!store_or_verify(sums, (crc = ntohl(crc_val(crc2))), is_verify)))
goto return_crc_error; goto return_crc_error;
sums++; sums++;
data += bytes_per_checksum; data += bytes_per_checksum;
@ -160,10 +142,10 @@ int bulk_verify_crc(const uint8_t *data, size_t data_len,
crc1 = crc2 = crc3 = CRC_INITIAL_VAL; crc1 = crc2 = crc3 = CRC_INITIAL_VAL;
pipelined_crc32c(&crc1, &crc2, &crc3, data, remainder, 1); pipelined_crc32c(&crc1, &crc2, &crc3, data, remainder, 1);
if ((crc = ntohl(crc_val(crc1))) != *sums) if (unlikely(!store_or_verify(sums, (crc = ntohl(crc_val(crc1))), is_verify)))
goto return_crc_error; goto return_crc_error;
} }
return CHECKSUMS_VALID; return is_verify ? CHECKSUMS_VALID : 0;
} }
#endif #endif
@ -172,14 +154,14 @@ int bulk_verify_crc(const uint8_t *data, size_t data_len,
crc = CRC_INITIAL_VAL; crc = CRC_INITIAL_VAL;
crc = crc_update_func(crc, data, len); crc = crc_update_func(crc, data, len);
crc = ntohl(crc_val(crc)); crc = ntohl(crc_val(crc));
if (unlikely(crc != *sums)) { if (unlikely(!store_or_verify(sums, crc, is_verify))) {
goto return_crc_error; goto return_crc_error;
} }
data += len; data += len;
data_len -= len; data_len -= len;
sums++; sums++;
} }
return CHECKSUMS_VALID; return is_verify ? CHECKSUMS_VALID : 0;
return_crc_error: return_crc_error:
if (error_info != NULL) { if (error_info != NULL) {

View File

@ -42,49 +42,32 @@ typedef struct crc32_error {
/** /**
* Verify a buffer of data which is checksummed in chunks * Either calculates checksums for or verifies a buffer of data.
* of bytes_per_checksum bytes. The checksums are each 32 bits * Checksums performed in chunks of bytes_per_checksum bytes. The checksums
* and are stored in sequential indexes of the 'sums' array. * are each 32 bits and are stored in sequential indexes of the 'sums' array.
* Verification is done (sums is assumed to already contain the checksums)
* if error_info is non-null; otherwise calculation is done and checksums
* are stored into sums.
* *
* @param data The data to checksum * @param data The data to checksum
* @param dataLen Length of the data buffer * @param dataLen Length of the data buffer
* @param sums (out param) buffer to write checksums into. * @param sums (out param) buffer to write checksums into or
* It must contain at least dataLen * 4 bytes. * where checksums are already stored.
* It must contain at least
* ((dataLen - 1) / bytes_per_checksum + 1) * 4 bytes.
* @param checksum_type One of the CRC32 algorithm constants defined * @param checksum_type One of the CRC32 algorithm constants defined
* above * above
* @param bytes_per_checksum How many bytes of data to process per checksum. * @param bytes_per_checksum How many bytes of data to process per checksum.
* @param error_info If non-NULL, will be filled in if an error * @param error_info If non-NULL, verification will be performed and
* is detected * it will be filled in if an error
* is detected. Otherwise calculation is performed.
* *
* @return 0 for success, non-zero for an error, result codes * @return 0 for success, non-zero for an error, result codes
* for which are defined above * for verification are defined above
*/ */
extern int bulk_verify_crc(const uint8_t *data, size_t data_len, extern int bulk_crc(const uint8_t *data, size_t data_len,
const uint32_t *sums, int checksum_type, uint32_t *sums, int checksum_type,
int bytes_per_checksum, int bytes_per_checksum,
crc32_error_t *error_info); crc32_error_t *error_info);
/**
* Calculate checksums for some data.
*
* The checksums are each 32 bits and are stored in sequential indexes of the
* 'sums' array.
*
* This function is not (yet) optimized. It is provided for testing purposes
* only.
*
* @param data The data to checksum
* @param dataLen Length of the data buffer
* @param sums (out param) buffer to write checksums into.
* It must contain at least dataLen * 4 bytes.
* @param checksum_type One of the CRC32 algorithm constants defined
* above
* @param bytesPerChecksum How many bytes of data to process per checksum.
*
* @return 0 for success, non-zero for an error
*/
int bulk_calculate_crc(const uint8_t *data, size_t data_len,
uint32_t *sums, int checksum_type,
int bytes_per_checksum);
#endif #endif

View File

@ -48,9 +48,9 @@ static int testBulkVerifyCrc(int dataLen, int crcType, int bytesPerChecksum)
sums = calloc(sizeof(uint32_t), sums = calloc(sizeof(uint32_t),
(dataLen + bytesPerChecksum - 1) / bytesPerChecksum); (dataLen + bytesPerChecksum - 1) / bytesPerChecksum);
EXPECT_ZERO(bulk_calculate_crc(data, dataLen, sums, crcType, EXPECT_ZERO(bulk_crc(data, dataLen, sums, crcType,
bytesPerChecksum)); bytesPerChecksum, NULL));
EXPECT_ZERO(bulk_verify_crc(data, dataLen, sums, crcType, EXPECT_ZERO(bulk_crc(data, dataLen, sums, crcType,
bytesPerChecksum, &errorData)); bytesPerChecksum, &errorData));
free(data); free(data);
free(sums); free(sums);

View File

@ -81,36 +81,15 @@ User Commands
* <<<archive>>> * <<<archive>>>
Creates a hadoop archive. More information can be found at Hadoop Creates a hadoop archive. More information can be found at
Archives. {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/HadoopArchives.html}
Hadoop Archives Guide}}.
Usage: <<<hadoop archive -archiveName NAME <src>* <dest> >>>
*-------------------+-------------------------------------------------------+
||COMMAND_OPTION || Description
*-------------------+-------------------------------------------------------+
| -archiveName NAME | Name of the archive to be created.
*-------------------+-------------------------------------------------------+
| src | Filesystem pathnames which work as usual with regular
| expressions.
*-------------------+-------------------------------------------------------+
| dest | Destination directory which would contain the archive.
*-------------------+-------------------------------------------------------+
* <<<distcp>>> * <<<distcp>>>
Copy file or directories recursively. More information can be found at Copy file or directories recursively. More information can be found at
Hadoop DistCp Guide. {{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/DistCp.html}
Hadoop DistCp Guide}}.
Usage: <<<hadoop distcp <srcurl> <desturl> >>>
*-------------------+--------------------------------------------+
||COMMAND_OPTION || Description
*-------------------+--------------------------------------------+
| srcurl | Source Url
*-------------------+--------------------------------------------+
| desturl | Destination Url
*-------------------+--------------------------------------------+
* <<<fs>>> * <<<fs>>>
@ -142,103 +121,21 @@ User Commands
* <<<job>>> * <<<job>>>
Command to interact with Map Reduce Jobs. Deprecated. Use
{{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredCommands.html#job}
Usage: <<<hadoop job [GENERIC_OPTIONS] [-submit <job-file>] | [-status <job-id>] | [-counter <job-id> <group-name> <counter-name>] | [-kill <job-id>] | [-events <job-id> <from-event-#> <#-of-events>] | [-history [all] <jobOutputDir>] | [-list [all]] | [-kill-task <task-id>] | [-fail-task <task-id>] | [-set-priority <job-id> <priority>]>>> <<<mapred job>>>}} instead.
*------------------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
*------------------------------+---------------------------------------------+
| -submit <job-file> | Submits the job.
*------------------------------+---------------------------------------------+
| -status <job-id> | Prints the map and reduce completion
| percentage and all job counters.
*------------------------------+---------------------------------------------+
| -counter <job-id> <group-name> <counter-name> | Prints the counter value.
*------------------------------+---------------------------------------------+
| -kill <job-id> | Kills the job.
*------------------------------+---------------------------------------------+
| -events <job-id> <from-event-#> <#-of-events> | Prints the events' details
| received by jobtracker for the given range.
*------------------------------+---------------------------------------------+
| -history [all]<jobOutputDir> | Prints job details, failed and killed tip
| details. More details about the job such as
| successful tasks and task attempts made for
| each task can be viewed by specifying the [all]
| option.
*------------------------------+---------------------------------------------+
| -list [all] | Displays jobs which are yet to complete.
| <<<-list all>>> displays all jobs.
*------------------------------+---------------------------------------------+
| -kill-task <task-id> | Kills the task. Killed tasks are NOT counted
| against failed attempts.
*------------------------------+---------------------------------------------+
| -fail-task <task-id> | Fails the task. Failed tasks are counted
| against failed attempts.
*------------------------------+---------------------------------------------+
| -set-priority <job-id> <priority> | Changes the priority of the job. Allowed
| priority values are VERY_HIGH, HIGH, NORMAL,
| LOW, VERY_LOW
*------------------------------+---------------------------------------------+
* <<<pipes>>> * <<<pipes>>>
Runs a pipes job. Deprecated. Use
{{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredCommands.html#pipes}
Usage: <<<hadoop pipes [-conf <path>] [-jobconf <key=value>, <key=value>, <<<mapred pipes>>>}} instead.
...] [-input <path>] [-output <path>] [-jar <jar file>] [-inputformat
<class>] [-map <class>] [-partitioner <class>] [-reduce <class>] [-writer
<class>] [-program <executable>] [-reduces <num>]>>>
*----------------------------------------+------------------------------------+
|| COMMAND_OPTION || Description
*----------------------------------------+------------------------------------+
| -conf <path> | Configuration for job
*----------------------------------------+------------------------------------+
| -jobconf <key=value>, <key=value>, ... | Add/override configuration for job
*----------------------------------------+------------------------------------+
| -input <path> | Input directory
*----------------------------------------+------------------------------------+
| -output <path> | Output directory
*----------------------------------------+------------------------------------+
| -jar <jar file> | Jar filename
*----------------------------------------+------------------------------------+
| -inputformat <class> | InputFormat class
*----------------------------------------+------------------------------------+
| -map <class> | Java Map class
*----------------------------------------+------------------------------------+
| -partitioner <class> | Java Partitioner
*----------------------------------------+------------------------------------+
| -reduce <class> | Java Reduce class
*----------------------------------------+------------------------------------+
| -writer <class> | Java RecordWriter
*----------------------------------------+------------------------------------+
| -program <executable> | Executable URI
*----------------------------------------+------------------------------------+
| -reduces <num> | Number of reduces
*----------------------------------------+------------------------------------+
* <<<queue>>> * <<<queue>>>
command to interact and view Job Queue information Deprecated. Use
{{{../../hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapredCommands.html#queue}
Usage: <<<hadoop queue [-list] | [-info <job-queue-name> [-showJobs]] | [-showacls]>>> <<<mapred queue>>>}} instead.
*-----------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-----------------+-----------------------------------------------------------+
| -list | Gets list of Job Queues configured in the system.
| Along with scheduling information associated with the job queues.
*-----------------+-----------------------------------------------------------+
| -info <job-queue-name> [-showJobs] | Displays the job queue information and
| associated scheduling information of particular job queue.
| If <<<-showJobs>>> options is present a list of jobs
| submitted to the particular job queue is displayed.
*-----------------+-----------------------------------------------------------+
| -showacls | Displays the queue name and associated queue operations
| allowed for the current user. The list consists of only
| those queues to which the user has access.
*-----------------+-----------------------------------------------------------+
* <<<version>>> * <<<version>>>
@ -314,35 +211,6 @@ Administration Commands
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#dfsadmin} Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#dfsadmin}
<<<hdfs dfsadmin>>>}} instead. <<<hdfs dfsadmin>>>}} instead.
* <<<mradmin>>>
Runs MR admin client
Usage: <<<hadoop mradmin [ GENERIC_OPTIONS ] [-refreshQueueAcls]>>>
*-------------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-------------------+-----------------------------------------------------------+
| -refreshQueueAcls | Refresh the queue acls used by hadoop, to check access
| during submissions and administration of the job by the
| user. The properties present in mapred-queue-acls.xml is
| reloaded by the queue manager.
*-------------------+-----------------------------------------------------------+
* <<<jobtracker>>>
Runs the MapReduce job Tracker node.
Usage: <<<hadoop jobtracker [-dumpConfiguration]>>>
*--------------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*--------------------+-----------------------------------------------------------+
| -dumpConfiguration | Dumps the configuration used by the JobTracker alongwith
| queue configuration in JSON format into Standard output
| used by the jobtracker and exits.
*--------------------+-----------------------------------------------------------+
* <<<namenode>>> * <<<namenode>>>
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#namenode} Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#namenode}
@ -352,9 +220,3 @@ Administration Commands
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#secondarynamenode} Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#secondarynamenode}
<<<hdfs secondarynamenode>>>}} instead. <<<hdfs secondarynamenode>>>}} instead.
* <<<tasktracker>>>
Runs a MapReduce task Tracker node.
Usage: <<<hadoop tasktracker>>>

View File

@ -138,7 +138,7 @@ copyToLocal
count count
Usage: <<<hdfs dfs -count [-q] <paths> >>> Usage: <<<hdfs dfs -count [-q] [-h] <paths> >>>
Count the number of directories, files and bytes under the paths that match Count the number of directories, files and bytes under the paths that match
the specified file pattern. The output columns with -count are: DIR_COUNT, the specified file pattern. The output columns with -count are: DIR_COUNT,
@ -147,12 +147,16 @@ count
The output columns with -count -q are: QUOTA, REMAINING_QUATA, SPACE_QUOTA, The output columns with -count -q are: QUOTA, REMAINING_QUATA, SPACE_QUOTA,
REMAINING_SPACE_QUOTA, DIR_COUNT, FILE_COUNT, CONTENT_SIZE, FILE_NAME REMAINING_SPACE_QUOTA, DIR_COUNT, FILE_COUNT, CONTENT_SIZE, FILE_NAME
The -h option shows sizes in human readable format.
Example: Example:
* <<<hdfs dfs -count hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2>>> * <<<hdfs dfs -count hdfs://nn1.example.com/file1 hdfs://nn2.example.com/file2>>>
* <<<hdfs dfs -count -q hdfs://nn1.example.com/file1>>> * <<<hdfs dfs -count -q hdfs://nn1.example.com/file1>>>
* <<<hdfs dfs -count -q -h hdfs://nn1.example.com/file1>>>
Exit Code: Exit Code:
Returns 0 on success and -1 on error. Returns 0 on success and -1 on error.

View File

@ -30,6 +30,8 @@ Native Libraries Guide
compression" could refer to all *.so's you need to compile that are compression" could refer to all *.so's you need to compile that are
specifically related to compression. Currently, however, this document specifically related to compression. Currently, however, this document
only addresses the native hadoop library (<<<libhadoop.so>>>). only addresses the native hadoop library (<<<libhadoop.so>>>).
The document for libhdfs library (<<<libhdfs.so>>>) is
{{{../hadoop-hdfs/LibHdfs.html}here}}.
* Native Hadoop Library * Native Hadoop Library
@ -54,24 +56,28 @@ Native Libraries Guide
[[4]] Install the compression codec development packages (>zlib-1.2, [[4]] Install the compression codec development packages (>zlib-1.2,
>gzip-1.2): >gzip-1.2):
+ If you download the library, install one or more development
* If you download the library, install one or more development
packages - whichever compression codecs you want to use with packages - whichever compression codecs you want to use with
your deployment. your deployment.
+ If you build the library, it is mandatory to install both
* If you build the library, it is mandatory to install both
development packages. development packages.
[[5]] Check the runtime log files. [[5]] Check the runtime log files.
* Components * Components
The native hadoop library includes two components, the zlib and gzip The native hadoop library includes various components:
compression codecs:
* zlib * Compression Codecs (bzip2, lz4, snappy, zlib)
* gzip * Native IO utilities for {{{../hadoop-hdfs/ShortCircuitLocalReads.html}
HDFS Short-Circuit Local Reads}} and
{{{../hadoop-hdfs/CentralizedCacheManagement.html}Centralized Cache
Management in HDFS}}
The native hadoop library is imperative for gzip to work. * CRC32 checksum implementation
* Supported Platforms * Supported Platforms

View File

@ -110,6 +110,27 @@ security.ha.service.protocol.acl | ACL for HAService protocol used by HAAdm
<<<security.service.authorization.default.acl>>> is applied. If <<<security.service.authorization.default.acl>>> is applied. If
<<<security.service.authorization.default.acl>>> is not defined, <<<*>>> is applied. <<<security.service.authorization.default.acl>>> is not defined, <<<*>>> is applied.
** Blocked Access Control Lists
In some cases, it is required to specify blocked access control list for a service. This specifies
the list of users and groups who are not authorized to access the service. The format of
the blocked access control list is same as that of access control list. The blocked access
control list can be specified via <<<${HADOOP_CONF_DIR}/hadoop-policy.xml>>>. The property name
is derived by suffixing with ".blocked".
Example: The property name of blocked access control list for <<<security.client.protocol.acl>>
will be <<<security.client.protocol.acl.blocked>>>
For a service, it is possible to specify both an access control list and a blocked control
list. A user is authorized to access the service if the user is in the access control and not in
the blocked access control list.
If blocked access control list is not defined for a service, the value of
<<<security.service.authorization.default.acl.blocked>>> is applied. If
<<<security.service.authorization.default.acl.blocked>>> is not defined,
empty blocked access control list is applied.
** Refreshing Service Level Authorization Configuration ** Refreshing Service Level Authorization Configuration
The service-level authorization configuration for the NameNode and The service-level authorization configuration for the NameNode and

View File

@ -0,0 +1,248 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import org.junit.Test;
import org.mockito.InOrder;
public class TestContentSummary {
// check the empty constructor correctly initialises the object
@Test
public void testConstructorEmpty() {
ContentSummary contentSummary = new ContentSummary();
assertEquals("getLength", 0, contentSummary.getLength());
assertEquals("getFileCount", 0, contentSummary.getFileCount());
assertEquals("getDirectoryCount", 0, contentSummary.getDirectoryCount());
assertEquals("getQuota", 0, contentSummary.getQuota());
assertEquals("getSpaceConsumed", 0, contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", 0, contentSummary.getSpaceQuota());
}
// check the full constructor with quota information
@Test
public void testConstructorWithQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
assertEquals("getLength", length, contentSummary.getLength());
assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
assertEquals("getDirectoryCount", directoryCount,
contentSummary.getDirectoryCount());
assertEquals("getQuota", quota, contentSummary.getQuota());
assertEquals("getSpaceConsumed", spaceConsumed,
contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", spaceQuota, contentSummary.getSpaceQuota());
}
// check the constructor with quota information
@Test
public void testConstructorNoQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount);
assertEquals("getLength", length, contentSummary.getLength());
assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
assertEquals("getDirectoryCount", directoryCount,
contentSummary.getDirectoryCount());
assertEquals("getQuota", -1, contentSummary.getQuota());
assertEquals("getSpaceConsumed", length, contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", -1, contentSummary.getSpaceQuota());
}
// check the write method
@Test
public void testWrite() throws IOException {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
DataOutput out = mock(DataOutput.class);
InOrder inOrder = inOrder(out);
contentSummary.write(out);
inOrder.verify(out).writeLong(length);
inOrder.verify(out).writeLong(fileCount);
inOrder.verify(out).writeLong(directoryCount);
inOrder.verify(out).writeLong(quota);
inOrder.verify(out).writeLong(spaceConsumed);
inOrder.verify(out).writeLong(spaceQuota);
}
// check the readFields method
@Test
public void testReadFields() throws IOException {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
ContentSummary contentSummary = new ContentSummary();
DataInput in = mock(DataInput.class);
when(in.readLong()).thenReturn(length).thenReturn(fileCount)
.thenReturn(directoryCount).thenReturn(quota).thenReturn(spaceConsumed)
.thenReturn(spaceQuota);
contentSummary.readFields(in);
assertEquals("getLength", length, contentSummary.getLength());
assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
assertEquals("getDirectoryCount", directoryCount,
contentSummary.getDirectoryCount());
assertEquals("getQuota", quota, contentSummary.getQuota());
assertEquals("getSpaceConsumed", spaceConsumed,
contentSummary.getSpaceConsumed());
assertEquals("getSpaceQuota", spaceQuota, contentSummary.getSpaceQuota());
}
// check the header with quotas
@Test
public void testGetHeaderWithQuota() {
String header = " name quota rem name quota space quota "
+ "rem space quota directories files bytes ";
assertEquals(header, ContentSummary.getHeader(true));
}
// check the header without quotas
@Test
public void testGetHeaderNoQuota() {
String header = " directories files bytes ";
assertEquals(header, ContentSummary.getHeader(false));
}
// check the toString method with quotas
@Test
public void testToStringWithQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
String expected = " 44444 -11111 66665 11110"
+ " 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(true));
}
// check the toString method with quotas
@Test
public void testToStringNoQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount);
String expected = " none inf none"
+ " inf 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(true));
}
// check the toString method with quotas
@Test
public void testToStringNoShowQuota() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
String expected = " 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString(false));
}
// check the toString method (defaults to with quotas)
@Test
public void testToString() {
long length = 11111;
long fileCount = 22222;
long directoryCount = 33333;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
String expected = " 44444 -11111 66665"
+ " 11110 33333 22222 11111 ";
assertEquals(expected, contentSummary.toString());
}
// check the toString method with quotas
@Test
public void testToStringHumanWithQuota() {
long length = Long.MAX_VALUE;
long fileCount = 222222222;
long directoryCount = 33333;
long quota = 222256578;
long spaceConsumed = 1073741825;
long spaceQuota = 1;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
String expected = " 212.0 M 1023 1 "
+ " -1 G 32.6 K 211.9 M 8.0 E ";
assertEquals(expected, contentSummary.toString(true, true));
}
// check the toString method with quotas
@Test
public void testToStringHumanNoShowQuota() {
long length = Long.MAX_VALUE;
long fileCount = 222222222;
long directoryCount = 33333;
long quota = 222256578;
long spaceConsumed = 55555;
long spaceQuota = Long.MAX_VALUE;
ContentSummary contentSummary = new ContentSummary(length, fileCount,
directoryCount, quota, spaceConsumed, spaceQuota);
String expected = " 32.6 K 211.9 M 8.0 E ";
assertEquals(expected, contentSummary.toString(false, true));
}
}

View File

@ -0,0 +1,270 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.shell;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.PrintStream;
import java.io.IOException;
import java.net.URI;
import java.util.LinkedList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.Test;
import org.junit.Before;
import org.junit.BeforeClass;
/**
* JUnit test class for {@link org.apache.hadoop.fs.shell.Count}
*
*/
public class TestCount {
private static final String WITH_QUOTAS = "Content summary with quotas";
private static final String NO_QUOTAS = "Content summary without quotas";
private static final String HUMAN = "human: ";
private static final String BYTES = "bytes: ";
private static Configuration conf;
private static FileSystem mockFs;
private static FileStatus fileStat;
private static ContentSummary mockCs;
@BeforeClass
public static void setup() {
conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
mockFs = mock(FileSystem.class);
fileStat = mock(FileStatus.class);
mockCs = mock(ContentSummary.class);
when(fileStat.isFile()).thenReturn(true);
}
@Before
public void resetMock() {
reset(mockFs);
}
@Test
public void processOptionsHumanReadable() {
LinkedList<String> options = new LinkedList<String>();
options.add("-h");
options.add("dummy");
Count count = new Count();
count.processOptions(options);
assertFalse(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
@Test
public void processOptionsAll() {
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-h");
options.add("dummy");
Count count = new Count();
count.processOptions(options);
assertTrue(count.isShowQuotas());
assertTrue(count.isHumanReadable());
}
// check quotas are reported correctly
@Test
public void processPathShowQuotas() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(BYTES + WITH_QUOTAS + path.toString());
verifyNoMoreInteractions(out);
}
// check counts without quotas are reported correctly
@Test
public void processPathNoQuotas() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(BYTES + NO_QUOTAS + path.toString());
verifyNoMoreInteractions(out);
}
@Test
public void processPathShowQuotasHuman() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add("-h");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(HUMAN + WITH_QUOTAS + path.toString());
}
@Test
public void processPathNoQuotasHuman() throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-h");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
verify(out).println(HUMAN + NO_QUOTAS + path.toString());
}
@Test
public void getCommandName() {
Count count = new Count();
String actual = count.getCommandName();
String expected = "count";
assertEquals("Count.getCommandName", expected, actual);
}
@Test
public void isDeprecated() {
Count count = new Count();
boolean actual = count.isDeprecated();
boolean expected = false;
assertEquals("Count.isDeprecated", expected, actual);
}
@Test
public void getReplacementCommand() {
Count count = new Count();
String actual = count.getReplacementCommand();
String expected = null;
assertEquals("Count.getReplacementCommand", expected, actual);
}
@Test
public void getName() {
Count count = new Count();
String actual = count.getName();
String expected = "count";
assertEquals("Count.getName", expected, actual);
}
@Test
public void getUsage() {
Count count = new Count();
String actual = count.getUsage();
String expected = "-count [-q] [-h] <path> ...";
assertEquals("Count.getUsage", expected, actual);
}
// mock content system
static class MockContentSummary extends ContentSummary {
public MockContentSummary() {}
@Override
public String toString(boolean qOption, boolean hOption) {
if (qOption) {
if (hOption) {
return(HUMAN + WITH_QUOTAS);
} else {
return(BYTES + WITH_QUOTAS);
}
} else {
if (hOption) {
return(HUMAN + NO_QUOTAS);
} else {
return(BYTES + NO_QUOTAS);
}
}
}
}
// mock file system for use in testing
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
MockFileSystem() {
super(mockFs);
}
@Override
public void initialize(URI uri, Configuration conf) {
this.conf = conf;
}
@Override
public Path makeQualified(Path path) {
return path;
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
return new MockContentSummary();
}
@Override
public Configuration getConf() {
return conf;
}
}
}

View File

@ -0,0 +1,225 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.Arrays;
import org.junit.Test;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
public class TestDecayRpcScheduler {
private Schedulable mockCall(String id) {
Schedulable mockCall = mock(Schedulable.class);
UserGroupInformation ugi = mock(UserGroupInformation.class);
when(ugi.getUserName()).thenReturn(id);
when(mockCall.getUserGroupInformation()).thenReturn(ugi);
return mockCall;
}
private DecayRpcScheduler scheduler;
@Test(expected=IllegalArgumentException.class)
public void testNegativeScheduler() {
scheduler = new DecayRpcScheduler(-1, "", new Configuration());
}
@Test(expected=IllegalArgumentException.class)
public void testZeroScheduler() {
scheduler = new DecayRpcScheduler(0, "", new Configuration());
}
@Test
public void testParsePeriod() {
// By default
scheduler = new DecayRpcScheduler(1, "", new Configuration());
assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_DEFAULT,
scheduler.getDecayPeriodMillis());
// Custom
Configuration conf = new Configuration();
conf.setLong("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY,
1058);
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(1058L, scheduler.getDecayPeriodMillis());
}
@Test
public void testParseFactor() {
// Default
scheduler = new DecayRpcScheduler(1, "", new Configuration());
assertEquals(DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_DEFAULT,
scheduler.getDecayFactor(), 0.00001);
// Custom
Configuration conf = new Configuration();
conf.set("prefix." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY,
"0.125");
scheduler = new DecayRpcScheduler(1, "prefix", conf);
assertEquals(0.125, scheduler.getDecayFactor(), 0.00001);
}
public void assertEqualDecimalArrays(double[] a, double[] b) {
assertEquals(a.length, b.length);
for(int i = 0; i < a.length; i++) {
assertEquals(a[i], b[i], 0.00001);
}
}
@Test
public void testParseThresholds() {
// Defaults vary by number of queues
Configuration conf = new Configuration();
scheduler = new DecayRpcScheduler(1, "", conf);
assertEqualDecimalArrays(new double[]{}, scheduler.getThresholds());
scheduler = new DecayRpcScheduler(2, "", conf);
assertEqualDecimalArrays(new double[]{0.5}, scheduler.getThresholds());
scheduler = new DecayRpcScheduler(3, "", conf);
assertEqualDecimalArrays(new double[]{0.25, 0.5}, scheduler.getThresholds());
scheduler = new DecayRpcScheduler(4, "", conf);
assertEqualDecimalArrays(new double[]{0.125, 0.25, 0.5}, scheduler.getThresholds());
// Custom
conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY,
"1, 10, 20, 50, 85");
scheduler = new DecayRpcScheduler(6, "ns", conf);
assertEqualDecimalArrays(new double[]{0.01, 0.1, 0.2, 0.5, 0.85}, scheduler.getThresholds());
}
@Test
public void testAccumulate() {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(0, scheduler.getCallCountSnapshot().size()); // empty first
scheduler.getPriorityLevel(mockCall("A"));
assertEquals(1, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(1, scheduler.getCallCountSnapshot().get("A").longValue());
scheduler.getPriorityLevel(mockCall("A"));
scheduler.getPriorityLevel(mockCall("B"));
scheduler.getPriorityLevel(mockCall("A"));
assertEquals(3, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(1, scheduler.getCallCountSnapshot().get("B").longValue());
}
@Test
public void testDecay() {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "999999999"); // Never
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY, "0.5");
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(0, scheduler.getTotalCallSnapshot());
for (int i = 0; i < 4; i++) {
scheduler.getPriorityLevel(mockCall("A"));
}
for (int i = 0; i < 8; i++) {
scheduler.getPriorityLevel(mockCall("B"));
}
assertEquals(12, scheduler.getTotalCallSnapshot());
assertEquals(4, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(8, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(6, scheduler.getTotalCallSnapshot());
assertEquals(2, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(4, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(3, scheduler.getTotalCallSnapshot());
assertEquals(1, scheduler.getCallCountSnapshot().get("A").longValue());
assertEquals(2, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(1, scheduler.getTotalCallSnapshot());
assertEquals(null, scheduler.getCallCountSnapshot().get("A"));
assertEquals(1, scheduler.getCallCountSnapshot().get("B").longValue());
scheduler.forceDecay();
assertEquals(0, scheduler.getTotalCallSnapshot());
assertEquals(null, scheduler.getCallCountSnapshot().get("A"));
assertEquals(null, scheduler.getCallCountSnapshot().get("B"));
}
@Test
public void testPriority() {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "99999999"); // Never flush
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_THRESHOLDS_KEY,
"25, 50, 75");
scheduler = new DecayRpcScheduler(4, "ns", conf);
assertEquals(0, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(2, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(0, scheduler.getPriorityLevel(mockCall("B")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("B")));
assertEquals(0, scheduler.getPriorityLevel(mockCall("C")));
assertEquals(0, scheduler.getPriorityLevel(mockCall("C")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(1, scheduler.getPriorityLevel(mockCall("A")));
assertEquals(2, scheduler.getPriorityLevel(mockCall("A")));
}
@Test(timeout=2000)
public void testPeriodic() throws InterruptedException {
Configuration conf = new Configuration();
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_PERIOD_KEY, "10");
conf.set("ns." + DecayRpcScheduler.IPC_CALLQUEUE_DECAYSCHEDULER_FACTOR_KEY, "0.5");
scheduler = new DecayRpcScheduler(1, "ns", conf);
assertEquals(10, scheduler.getDecayPeriodMillis());
assertEquals(0, scheduler.getTotalCallSnapshot());
for (int i = 0; i < 64; i++) {
scheduler.getPriorityLevel(mockCall("A"));
}
// It should eventually decay to zero
while (scheduler.getTotalCallSnapshot() > 0) {
Thread.sleep(10);
}
}
}

View File

@ -583,14 +583,14 @@ public class TestRPC {
} }
MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name()); MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name());
if (expectFailure) { if (expectFailure) {
assertCounter("RpcAuthorizationFailures", 1, rb); assertCounter("RpcAuthorizationFailures", 1L, rb);
} else { } else {
assertCounter("RpcAuthorizationSuccesses", 1, rb); assertCounter("RpcAuthorizationSuccesses", 1L, rb);
} }
//since we don't have authentication turned ON, we should see //since we don't have authentication turned ON, we should see
// 0 for the authentication successes and 0 for failure // 0 for the authentication successes and 0 for failure
assertCounter("RpcAuthenticationFailures", 0, rb); assertCounter("RpcAuthenticationFailures", 0L, rb);
assertCounter("RpcAuthenticationSuccesses", 0, rb); assertCounter("RpcAuthenticationSuccesses", 0L, rb);
} }
} }

View File

@ -0,0 +1,127 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with this
* work for additional information regarding copyright ownership. The ASF
* licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.apache.hadoop.security;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.List;
import org.junit.After;
import org.junit.Test;
public class TestNetgroupCache {
private static final String USER1 = "user1";
private static final String USER2 = "user2";
private static final String USER3 = "user3";
private static final String GROUP1 = "group1";
private static final String GROUP2 = "group2";
@After
public void teardown() {
NetgroupCache.clear();
}
/**
* Cache two groups with a set of users.
* Test membership correctness.
*/
@Test
public void testMembership() {
List<String> users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
users = new ArrayList<String>();
users.add(USER1);
users.add(USER3);
NetgroupCache.add(GROUP2, users);
verifyGroupMembership(USER1, 2, GROUP1);
verifyGroupMembership(USER1, 2, GROUP2);
verifyGroupMembership(USER2, 1, GROUP1);
verifyGroupMembership(USER3, 1, GROUP2);
}
/**
* Cache a group with a set of users.
* Test membership correctness.
* Clear cache, remove a user from the group and cache the group
* Test membership correctness.
*/
@Test
public void testUserRemoval() {
List<String> users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
verifyGroupMembership(USER1, 1, GROUP1);
verifyGroupMembership(USER2, 1, GROUP1);
users.remove(USER2);
NetgroupCache.clear();
NetgroupCache.add(GROUP1, users);
verifyGroupMembership(USER1, 1, GROUP1);
verifyGroupMembership(USER2, 0, null);
}
/**
* Cache two groups with a set of users.
* Test membership correctness.
* Clear cache, cache only one group.
* Test membership correctness.
*/
@Test
public void testGroupRemoval() {
List<String> users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
users = new ArrayList<String>();
users.add(USER1);
users.add(USER3);
NetgroupCache.add(GROUP2, users);
verifyGroupMembership(USER1, 2, GROUP1);
verifyGroupMembership(USER1, 2, GROUP2);
verifyGroupMembership(USER2, 1, GROUP1);
verifyGroupMembership(USER3, 1, GROUP2);
NetgroupCache.clear();
users = new ArrayList<String>();
users.add(USER1);
users.add(USER2);
NetgroupCache.add(GROUP1, users);
verifyGroupMembership(USER1, 1, GROUP1);
verifyGroupMembership(USER2, 1, GROUP1);
verifyGroupMembership(USER3, 0, null);
}
private void verifyGroupMembership(String user, int size, String group) {
List<String> groups = new ArrayList<String>();
NetgroupCache.getNetgroups(user, groups);
assertEquals(size, groups.size());
if (size > 0) {
boolean present = false;
for (String groupEntry:groups) {
if (groupEntry.equals(group)) {
present = true;
break;
}
}
assertTrue(present);
}
}
}

View File

@ -0,0 +1,163 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security;
import java.io.IOException;
import java.net.InetAddress;
import java.util.Map;
import junit.framework.TestCase;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.WhitelistBasedResolver;
import org.apache.hadoop.util.TestFileBasedIPList;
public class TestWhitelistBasedResolver extends TestCase {
public static final Map<String, String> SASL_PRIVACY_PROPS =
WhitelistBasedResolver.getSaslProperties(new Configuration());
public void testFixedVariableAndLocalWhiteList() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
true);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
1);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver ();
wqr.setConf(conf);
assertEquals (wqr.getDefaultProperties(),
wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.222.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.113.221.221"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
/**
* Add a bunch of subnets and IPSs to the whitelist
* Check for inclusion in whitelist
* Check for exclusion from whitelist
*/
public void testFixedAndLocalWhiteList() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
false);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
100);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver();
wqr.setConf(conf);
assertEquals (wqr.getDefaultProperties(),
wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221"));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));;
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
/**
* Add a bunch of subnets and IPSs to the whitelist
* Check for inclusion in whitelist with a null value
*/
public void testNullIPAddress() throws IOException {
String[] fixedIps = {"10.119.103.112", "10.221.102.0/23"};
TestFileBasedIPList.createFileWithEntries ("fixedwhitelist.txt", fixedIps);
String[] variableIps = {"10.222.0.0/16", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("variablewhitelist.txt", variableIps);
Configuration conf = new Configuration();
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_FIXEDWHITELIST_FILE ,
"fixedwhitelist.txt");
conf.setBoolean(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_ENABLE,
true);
conf.setLong(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_CACHE_SECS,
100);
conf.set(WhitelistBasedResolver.HADOOP_SECURITY_SASL_VARIABLEWHITELIST_FILE ,
"variablewhitelist.txt");
WhitelistBasedResolver wqr = new WhitelistBasedResolver();
wqr.setConf(conf);
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((InetAddress)null));
assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties((String)null));
TestFileBasedIPList.removeFile("fixedwhitelist.txt");
TestFileBasedIPList.removeFile("variablewhitelist.txt");
}
}

View File

@ -18,16 +18,22 @@
package org.apache.hadoop.security.authorize; package org.apache.hadoop.security.authorize;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.ipc.TestRPC.TestProtocol; import org.apache.hadoop.ipc.TestRPC.TestProtocol;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Test; import org.junit.Test;
public class TestServiceAuthorization { public class TestServiceAuthorization {
private static final String ACL_CONFIG = "test.protocol.acl"; private static final String ACL_CONFIG = "test.protocol.acl";
private static final String ACL_CONFIG1 = "test.protocol1.acl"; private static final String ACL_CONFIG1 = "test.protocol1.acl";
private static final String ADDRESS = "0.0.0.0";
public interface TestProtocol1 extends TestProtocol {}; public interface TestProtocol1 extends TestProtocol {};
@ -64,4 +70,115 @@ public class TestServiceAuthorization {
acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class); acl = serviceAuthorizationManager.getProtocolsAcls(TestProtocol1.class);
assertEquals("user2 group2", acl.getAclString()); assertEquals("user2 group2", acl.getAclString());
} }
@Test
public void testBlockedAcl() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("drwho@EXAMPLE.COM",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
//test without setting a blocked acl
conf.set(ACL_CONFIG, "user1 group1");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
//now set a blocked acl with another user and another group
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho2 group3");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
//now set a blocked acl with the user and another group
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho group3");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
fail();
} catch (AuthorizationException e) {
}
//now set a blocked acl with another user and another group
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho2 group3");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
//now set a blocked acl with another user and group that the user belongs to
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "drwho2 group2");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
fail();
} catch (AuthorizationException e) {
//expects Exception
}
//reset blocked acl so that there is no blocked ACL
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
}
@Test
public void testDefaultBlockedAcl() throws UnknownHostException {
UserGroupInformation drwho =
UserGroupInformation.createUserForTesting("drwho@EXAMPLE.COM",
new String[] { "group1", "group2" });
ServiceAuthorizationManager serviceAuthorizationManager =
new ServiceAuthorizationManager();
Configuration conf = new Configuration ();
//test without setting a default blocked acl
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol1.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
//set a restrictive default blocked acl and an non-restricting blocked acl for TestProtocol
conf.set(
CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL,
"user2 group2");
conf.set(ACL_CONFIG + ServiceAuthorizationManager.BLOCKED, "user2");
serviceAuthorizationManager.refresh(conf, new TestPolicyProvider());
//drwho is authorized to access TestProtocol
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol.class, conf,
InetAddress.getByName(ADDRESS));
} catch (AuthorizationException e) {
fail();
}
//drwho is not authorized to access TestProtocol1 because it uses the default blocked acl.
try {
serviceAuthorizationManager.authorize(drwho, TestProtocol1.class, conf,
InetAddress.getByName(ADDRESS));
fail();
} catch (AuthorizationException e) {
//expects Exception
}
}
} }

View File

@ -0,0 +1,188 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.IOException;
import org.apache.hadoop.util.CacheableIPList;
import org.apache.hadoop.util.FileBasedIPList;
import junit.framework.TestCase;
public class TestCacheableIPList extends TestCase {
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Add a bunch of subnets and Ips
* wait for cache timeout.
* test for inclusion
* Check for exclusion
*/
public void testAddWithSleepForCacheTimeout() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
Thread.sleep(101);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Remove a bunch of subnets and Ips
* wait for cache timeout.
* test for inclusion
* Check for exclusion
*/
public void testRemovalWithSleepForCacheTimeout() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
Thread.sleep(1005);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Add a bunch of subnets and Ips
* do a refresh
* test for inclusion
* Check for exclusion
*/
public void testAddWithRefresh() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
cipl.refresh();
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
/**
* Add a bunch of subnets and IPSs to the file
* setup a low cache refresh
* test for inclusion
* Check for exclusion
* Remove a bunch of subnets and Ips
* wait for cache timeout.
* test for inclusion
* Check for exclusion
*/
public void testRemovalWithRefresh() throws IOException, InterruptedException {
String[] ips = {"10.119.103.112", "10.221.102.0/23",
"10.222.0.0/16", "10.113.221.221", "10.113.221.222"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips);
CacheableIPList cipl = new CacheableIPList(
new FileBasedIPList("ips.txt"),100);
assertTrue("10.113.221.222 is not in the list",
cipl.isIn("10.113.221.222"));
assertTrue ("10.222.103.121 is not in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
String[]ips2 = {"10.119.103.112", "10.221.102.0/23", "10.113.221.221"};
TestFileBasedIPList.createFileWithEntries ("ips.txt", ips2);
cipl.refresh();
assertFalse("10.113.221.222 is in the list",
cipl.isIn("10.113.221.222"));
assertFalse ("10.222.103.121 is in the list",
cipl.isIn("10.222.103.121"));
TestFileBasedIPList.removeFile("ips.txt");
}
}

View File

@ -19,6 +19,9 @@ package org.apache.hadoop.util;
import java.nio.ByteBuffer; import java.nio.ByteBuffer;
import java.util.Random; import java.util.Random;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Stopwatch;
import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.ChecksumException;
import org.junit.Test; import org.junit.Test;
@ -53,68 +56,113 @@ public class TestDataChecksum {
} }
} }
} }
private void doBulkTest(DataChecksum checksum, int dataLength, private static class Harness {
boolean useDirect) throws Exception { final DataChecksum checksum;
System.err.println("Testing bulk checksums of length " + final int dataLength, sumsLength, numSums;
dataLength + " with " + ByteBuffer dataBuf, checksumBuf;
(useDirect ? "direct" : "array-backed") + " buffers");
int numSums = (dataLength - 1)/checksum.getBytesPerChecksum() + 1; Harness(DataChecksum checksum, int dataLength, boolean useDirect) {
int sumsLength = numSums * checksum.getChecksumSize(); this.checksum = checksum;
this.dataLength = dataLength;
byte data[] = new byte[dataLength +
DATA_OFFSET_IN_BUFFER + numSums = (dataLength - 1)/checksum.getBytesPerChecksum() + 1;
DATA_TRAILER_IN_BUFFER]; sumsLength = numSums * checksum.getChecksumSize();
new Random().nextBytes(data);
ByteBuffer dataBuf = ByteBuffer.wrap( byte data[] = new byte[dataLength +
DATA_OFFSET_IN_BUFFER +
DATA_TRAILER_IN_BUFFER];
new Random().nextBytes(data);
dataBuf = ByteBuffer.wrap(
data, DATA_OFFSET_IN_BUFFER, dataLength); data, DATA_OFFSET_IN_BUFFER, dataLength);
byte checksums[] = new byte[SUMS_OFFSET_IN_BUFFER + sumsLength]; byte checksums[] = new byte[SUMS_OFFSET_IN_BUFFER + sumsLength];
ByteBuffer checksumBuf = ByteBuffer.wrap( checksumBuf = ByteBuffer.wrap(
checksums, SUMS_OFFSET_IN_BUFFER, sumsLength); checksums, SUMS_OFFSET_IN_BUFFER, sumsLength);
// Swap out for direct buffers if requested.
if (useDirect) {
dataBuf = directify(dataBuf);
checksumBuf = directify(checksumBuf);
}
// calculate real checksum, make sure it passes
checksum.calculateChunkedSums(dataBuf, checksumBuf);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
// Change a byte in the header and in the trailer, make sure // Swap out for direct buffers if requested.
// it doesn't affect checksum result if (useDirect) {
corruptBufferOffset(checksumBuf, 0); dataBuf = directify(dataBuf);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0); checksumBuf = directify(checksumBuf);
corruptBufferOffset(dataBuf, 0); }
dataBuf.limit(dataBuf.limit() + 1);
corruptBufferOffset(dataBuf, dataLength + DATA_OFFSET_IN_BUFFER);
dataBuf.limit(dataBuf.limit() - 1);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
// Make sure bad checksums fail - error at beginning of array
corruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER);
try {
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
fail("Did not throw on bad checksums");
} catch (ChecksumException ce) {
assertEquals(0, ce.getPos());
} }
// Make sure bad checksums fail - error at end of array void testCorrectness() throws ChecksumException {
uncorruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER); // calculate real checksum, make sure it passes
corruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER + sumsLength - 1); checksum.calculateChunkedSums(dataBuf, checksumBuf);
try {
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0); checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
fail("Did not throw on bad checksums");
} catch (ChecksumException ce) { // Change a byte in the header and in the trailer, make sure
int expectedPos = checksum.getBytesPerChecksum() * (numSums - 1); // it doesn't affect checksum result
assertEquals(expectedPos, ce.getPos()); corruptBufferOffset(checksumBuf, 0);
assertTrue(ce.getMessage().contains("fake file")); checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
corruptBufferOffset(dataBuf, 0);
dataBuf.limit(dataBuf.limit() + 1);
corruptBufferOffset(dataBuf, dataLength + DATA_OFFSET_IN_BUFFER);
dataBuf.limit(dataBuf.limit() - 1);
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
// Make sure bad checksums fail - error at beginning of array
corruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER);
try {
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
fail("Did not throw on bad checksums");
} catch (ChecksumException ce) {
assertEquals(0, ce.getPos());
}
// Make sure bad checksums fail - error at end of array
uncorruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER);
corruptBufferOffset(checksumBuf, SUMS_OFFSET_IN_BUFFER + sumsLength - 1);
try {
checksum.verifyChunkedSums(dataBuf, checksumBuf, "fake file", 0);
fail("Did not throw on bad checksums");
} catch (ChecksumException ce) {
int expectedPos = checksum.getBytesPerChecksum() * (numSums - 1);
assertEquals(expectedPos, ce.getPos());
assertTrue(ce.getMessage().contains("fake file"));
}
} }
} }
private void doBulkTest(DataChecksum checksum, int dataLength,
boolean useDirect) throws Exception {
System.err.println("Testing bulk checksums of length " +
dataLength + " with " +
(useDirect ? "direct" : "array-backed") + " buffers");
new Harness(checksum, dataLength, useDirect).testCorrectness();
}
/**
* Simple performance test for the "common case" checksum usage in HDFS:
* computing and verifying CRC32C with 512 byte chunking on native
* buffers.
*/
@Test
public void commonUsagePerfTest() throws Exception {
final int NUM_RUNS = 5;
final DataChecksum checksum = DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
final int dataLength = 512 * 1024 * 1024;
Harness h = new Harness(checksum, dataLength, true);
for (int i = 0; i < NUM_RUNS; i++) {
Stopwatch s = new Stopwatch().start();
// calculate real checksum, make sure it passes
checksum.calculateChunkedSums(h.dataBuf, h.checksumBuf);
s.stop();
System.err.println("Calculate run #" + i + ": " +
s.elapsedTime(TimeUnit.MICROSECONDS) + "us");
s = new Stopwatch().start();
// calculate real checksum, make sure it passes
checksum.verifyChunkedSums(h.dataBuf, h.checksumBuf, "fake file", 0);
s.stop();
System.err.println("Verify run #" + i + ": " +
s.elapsedTime(TimeUnit.MICROSECONDS) + "us");
}
}
@Test @Test
public void testEquality() { public void testEquality() {
assertEquals( assertEquals(

View File

@ -0,0 +1,215 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.util.FileBasedIPList;
import org.apache.hadoop.util.IPList;
import org.junit.After;
import org.junit.Test;
import junit.framework.TestCase;
public class TestFileBasedIPList extends TestCase {
@After
public void tearDown() {
removeFile("ips.txt");
}
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test
public void testSubnetsAndIPs() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertTrue ("10.119.103.112 is not in the list",
ipList.isIn("10.119.103.112"));
assertFalse ("10.119.103.113 is in the list",
ipList.isIn("10.119.103.113"));
assertTrue ("10.221.102.0 is not in the list",
ipList.isIn("10.221.102.0"));
assertTrue ("10.221.102.1 is not in the list",
ipList.isIn("10.221.102.1"));
assertTrue ("10.221.103.1 is not in the list",
ipList.isIn("10.221.103.1"));
assertTrue ("10.221.103.255 is not in the list",
ipList.isIn("10.221.103.255"));
assertFalse("10.221.104.0 is in the list",
ipList.isIn("10.221.104.0"));
assertFalse("10.221.104.1 is in the list",
ipList.isIn("10.221.104.1"));
}
/**
* Add a bunch of IPS to the file
* Check for inclusion
* Check for exclusion
*/
@Test
public void testNullIP() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertFalse ("Null Ip is in the list",
ipList.isIn(null));
}
/**
* Add a bunch of subnets and IPSs to the file
* Check for inclusion
* Check for exclusion
*/
@Test
public void testWithMultipleSubnetAndIPs() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102.0/23", "10.222.0.0/16",
"10.113.221.221"};
createFileWithEntries ("ips.txt", ips);
IPList ipList = new FileBasedIPList("ips.txt");
assertTrue ("10.119.103.112 is not in the list",
ipList.isIn("10.119.103.112"));
assertFalse ("10.119.103.113 is in the list",
ipList.isIn("10.119.103.113"));
assertTrue ("10.221.103.121 is not in the list",
ipList.isIn("10.221.103.121"));
assertFalse("10.221.104.0 is in the list",
ipList.isIn("10.221.104.0"));
assertTrue ("10.222.103.121 is not in the list",
ipList.isIn("10.222.103.121"));
assertFalse("10.223.104.0 is in the list",
ipList.isIn("10.223.104.0"));
assertTrue ("10.113.221.221 is not in the list",
ipList.isIn("10.113.221.221"));
assertFalse("10.113.221.222 is in the list",
ipList.isIn("10.113.221.222"));
}
/**
* Do not specify the file
* test for inclusion
* should be true as if the feature is turned off
*/
public void testFileNotSpecified() {
IPList ipl = new FileBasedIPList(null);
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
/**
* Specify a non existent file
* test for inclusion
* should be true as if the feature is turned off
*/
public void testFileMissing() {
IPList ipl = new FileBasedIPList("missingips.txt");
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
/**
* Specify an existing file, but empty
* test for inclusion
* should be true as if the feature is turned off
*/
public void testWithEmptyList() throws IOException {
String[] ips = {};
createFileWithEntries ("ips.txt", ips);
IPList ipl = new FileBasedIPList("ips.txt");
assertFalse("110.113.221.222 is in the list",
ipl.isIn("110.113.221.222"));
}
/**
* Specify an existing file, but ips in wrong format
* test for inclusion
* should be true as if the feature is turned off
*/
public void testForBadFIle() throws IOException {
String[] ips = { "10.221.102/23"};
createFileWithEntries ("ips.txt", ips);
try {
new FileBasedIPList("ips.txt");
fail();
} catch (Exception e) {
//expects Exception
}
}
/**
* Add a bunch of subnets and IPSs to the file. Keep one entry wrong.
* The good entries will still be used.
* Check for inclusion with good entries
* Check for exclusion
*/
public void testWithAWrongEntry() throws IOException {
String[] ips = {"10.119.103.112", "10.221.102/23", "10.221.204.1/23"};
createFileWithEntries ("ips.txt", ips);
try {
new FileBasedIPList("ips.txt");
fail();
} catch (Exception e) {
//expects Exception
}
}
public static void createFileWithEntries(String fileName, String[] ips)
throws IOException {
FileUtils.writeLines(new File(fileName), Arrays.asList(ips));
}
public static void removeFile(String fileName) {
File file = new File(fileName);
if (file.exists()) {
new File(fileName).delete();
}
}
}

View File

@ -21,11 +21,14 @@ import java.io.File;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List;
import java.util.Map; import java.util.Map;
import junit.framework.TestCase; import junit.framework.TestCase;
import org.apache.commons.math3.util.Pair;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
@ -34,12 +37,14 @@ import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.commons.cli.Option; import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder; import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options; import org.apache.commons.cli.Options;
import org.junit.Assert; import org.junit.Assert;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import static org.junit.Assert.fail;
public class TestGenericOptionsParser extends TestCase { public class TestGenericOptionsParser extends TestCase {
File testDir; File testDir;
@ -92,6 +97,67 @@ public class TestGenericOptionsParser extends TestCase {
assertNull("files is not null", files); assertNull("files is not null", files);
} }
/**
* Test the case where the libjars, files and archives arguments
* contains an empty token, which should create an IllegalArgumentException.
*/
public void testEmptyFilenames() throws Exception {
List<Pair<String, String>> argsAndConfNames = new ArrayList<Pair<String, String>>();
argsAndConfNames.add(new Pair<String, String>("-libjars", "tmpjars"));
argsAndConfNames.add(new Pair<String, String>("-files", "tmpfiles"));
argsAndConfNames.add(new Pair<String, String>("-archives", "tmparchives"));
for (Pair<String, String> argAndConfName : argsAndConfNames) {
String arg = argAndConfName.getFirst();
String configName = argAndConfName.getSecond();
File tmpFileOne = new File(testDir, "tmpfile1");
Path tmpPathOne = new Path(tmpFileOne.toString());
File tmpFileTwo = new File(testDir, "tmpfile2");
Path tmpPathTwo = new Path(tmpFileTwo.toString());
localFs.create(tmpPathOne);
localFs.create(tmpPathTwo);
String[] args = new String[2];
args[0] = arg;
// create an empty path in between two valid files,
// which prior to HADOOP-10820 used to result in the
// working directory being added to "tmpjars" (or equivalent)
args[1] = String.format("%s,,%s",
tmpFileOne.toURI().toString(), tmpFileTwo.toURI().toString());
try {
new GenericOptionsParser(conf, args);
fail("Expected exception for empty filename");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("File name can't be"
+ " empty string", e);
}
// test zero file list length - it should create an exception
args[1] = ",,";
try {
new GenericOptionsParser(conf, args);
fail("Expected exception for zero file list length");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("File name can't be"
+ " empty string", e);
}
// test filename with space character
// it should create exception from parser in URI class
// due to URI syntax error
args[1] = String.format("%s, ,%s",
tmpFileOne.toURI().toString(), tmpFileTwo.toURI().toString());
try {
new GenericOptionsParser(conf, args);
fail("Expected exception for filename with space character");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("URISyntaxException", e);
}
}
}
/** /**
* Test that options passed to the constructor are used. * Test that options passed to the constructor are used.
*/ */

View File

@ -238,7 +238,7 @@
<comparators> <comparators>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>^-count \[-q\] &lt;path&gt; \.\.\. :\s*</expected-output> <expected-output>^-count \[-q\] \[-h\] &lt;path&gt; \.\.\. :( )*</expected-output>
</comparator> </comparator>
<comparator> <comparator>
<type>RegexpComparator</type> <type>RegexpComparator</type>
@ -260,6 +260,10 @@
<type>RegexpComparator</type> <type>RegexpComparator</type>
<expected-output>^( |\t)*DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME( )*</expected-output> <expected-output>^( |\t)*DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME( )*</expected-output>
</comparator> </comparator>
<comparator>
<type>RegexpComparator</type>
<expected-output>^( |\t)*The -h option shows file sizes in human readable format.( )*</expected-output>
</comparator>
</comparators> </comparators>
</test> </test>

View File

@ -25,9 +25,10 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import javax.ws.rs.Consumes; import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE; import javax.ws.rs.DELETE;
@ -38,15 +39,13 @@ import javax.ws.rs.Path;
import javax.ws.rs.PathParam; import javax.ws.rs.PathParam;
import javax.ws.rs.Produces; import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam; import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response; import javax.ws.rs.core.Response;
import javax.ws.rs.core.SecurityContext;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.security.Principal; import java.security.PrivilegedExceptionAction;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
@ -74,15 +73,6 @@ public class KMS {
kmsAudit= KMSWebApp.getKMSAudit(); kmsAudit= KMSWebApp.getKMSAudit();
} }
private static Principal getPrincipal(SecurityContext securityContext)
throws AuthenticationException{
Principal user = securityContext.getUserPrincipal();
if (user == null) {
throw new AuthenticationException("User must be authenticated");
}
return user;
}
private static final String UNAUTHORIZED_MSG_WITH_KEY = private static final String UNAUTHORIZED_MSG_WITH_KEY =
"User:%s not allowed to do '%s' on '%s'"; "User:%s not allowed to do '%s' on '%s'";
@ -90,20 +80,21 @@ public class KMS {
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
"User:%s not allowed to do '%s'"; "User:%s not allowed to do '%s'";
private void assertAccess(KMSACLs.Type aclType, Principal principal, private void assertAccess(KMSACLs.Type aclType, UserGroupInformation ugi,
KMSOp operation) throws AccessControlException { KMSOp operation) throws AccessControlException {
assertAccess(aclType, principal, operation, null); assertAccess(aclType, ugi, operation, null);
} }
private void assertAccess(KMSACLs.Type aclType, Principal principal, private void assertAccess(KMSACLs.Type aclType,
KMSOp operation, String key) throws AccessControlException { UserGroupInformation ugi, KMSOp operation, String key)
if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) { throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, ugi)) {
KMSWebApp.getUnauthorizedCallsMeter().mark(); KMSWebApp.getUnauthorizedCallsMeter().mark();
kmsAudit.unauthorized(principal, operation, key); kmsAudit.unauthorized(ugi, operation, key);
throw new AuthorizationException(String.format( throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY (key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY, : UNAUTHORIZED_MSG_WITHOUT_KEY,
principal.getName(), operation, key)); ugi.getShortUserName(), operation, key));
} }
} }
@ -123,15 +114,14 @@ public class KMS {
@Consumes(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public Response createKey(@Context SecurityContext securityContext, public Response createKey(Map jsonKey) throws Exception {
Map jsonKey) throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD); final String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD); KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name); assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD); String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD); final String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD)) int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0; ? (Integer) jsonKey.get(KMSRESTConstants.LENGTH_FIELD) : 0;
String description = (String) String description = (String)
@ -142,7 +132,7 @@ public class KMS {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user, assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
KMSOp.CREATE_KEY, name); KMSOp.CREATE_KEY, name);
} }
KeyProvider.Options options = new KeyProvider.Options( final KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration()); KMSWebApp.getConfiguration());
if (cipher != null) { if (cipher != null) {
options.setCipher(cipher); options.setCipher(cipher);
@ -153,16 +143,23 @@ public class KMS {
options.setDescription(description); options.setDescription(description);
options.setAttributes(attributes); options.setAttributes(attributes);
KeyProvider.KeyVersion keyVersion = (material != null) KeyProvider.KeyVersion keyVersion = user.doAs(
? provider.createKey(name, Base64.decodeBase64(material), options) new PrivilegedExceptionAction<KeyVersion>() {
: provider.createKey(name, options); @Override
public KeyVersion run() throws Exception {
provider.flush(); KeyProvider.KeyVersion keyVersion = (material != null)
? provider.createKey(name, Base64.decodeBase64(material), options)
: provider.createKey(name, options);
provider.flush();
return keyVersion;
}
}
);
kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" + kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description); (material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
keyVersion = removeKeyMaterial(keyVersion); keyVersion = removeKeyMaterial(keyVersion);
} }
Map json = KMSServerJSONUtils.toJSON(keyVersion); Map json = KMSServerJSONUtils.toJSON(keyVersion);
@ -176,14 +173,21 @@ public class KMS {
@DELETE @DELETE
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}") @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
public Response deleteKey(@Context SecurityContext securityContext, public Response deleteKey(@PathParam("name") final String name)
@PathParam("name") String name) throws Exception { throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name); assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
provider.deleteKey(name);
provider.flush(); user.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
provider.deleteKey(name);
provider.flush();
return null;
}
});
kmsAudit.ok(user, KMSOp.DELETE_KEY, name, ""); kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
@ -194,29 +198,36 @@ public class KMS {
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}") @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
@Consumes(MediaType.APPLICATION_JSON) @Consumes(MediaType.APPLICATION_JSON)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response rolloverKey(@Context SecurityContext securityContext, public Response rolloverKey(@PathParam("name") final String name,
@PathParam("name") String name, Map jsonMaterial) Map jsonMaterial) throws Exception {
throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name); assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
String material = (String) final String material = (String)
jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD); jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
if (material != null) { if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user, assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
KMSOp.ROLL_NEW_VERSION, name); KMSOp.ROLL_NEW_VERSION, name);
} }
KeyProvider.KeyVersion keyVersion = (material != null)
? provider.rollNewVersion(name, Base64.decodeBase64(material))
: provider.rollNewVersion(name);
provider.flush(); KeyProvider.KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyVersion keyVersion = (material != null)
? provider.rollNewVersion(name, Base64.decodeBase64(material))
: provider.rollNewVersion(name);
provider.flush();
return keyVersion;
}
}
);
kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
(material != null) + " NewVersion:" + keyVersion.getVersionName()); (material != null) + " NewVersion:" + keyVersion.getVersionName());
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user)) {
keyVersion = removeKeyMaterial(keyVersion); keyVersion = removeKeyMaterial(keyVersion);
} }
Map json = KMSServerJSONUtils.toJSON(keyVersion); Map json = KMSServerJSONUtils.toJSON(keyVersion);
@ -226,14 +237,23 @@ public class KMS {
@GET @GET
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE) @Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@Context SecurityContext securityContext, public Response getKeysMetadata(@QueryParam(KMSRESTConstants.KEY)
@QueryParam(KMSRESTConstants.KEY) List<String> keyNamesList) List<String> keyNamesList) throws Exception {
throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]); final String[] keyNames = keyNamesList.toArray(
new String[keyNamesList.size()]);
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA); assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
KeyProvider.Metadata[] keysMeta = user.doAs(
new PrivilegedExceptionAction<KeyProvider.Metadata[]>() {
@Override
public KeyProvider.Metadata[] run() throws Exception {
return provider.getKeysMetadata(keyNames);
}
}
);
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta); Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, ""); kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
@ -242,36 +262,52 @@ public class KMS {
@GET @GET
@Path(KMSRESTConstants.KEYS_NAMES_RESOURCE) @Path(KMSRESTConstants.KEYS_NAMES_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getKeyNames(@Context SecurityContext securityContext) public Response getKeyNames() throws Exception {
throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS); assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
Object json = provider.getKeys();
List<String> json = user.doAs(
new PrivilegedExceptionAction<List<String>>() {
@Override
public List<String> run() throws Exception {
return provider.getKeys();
}
}
);
kmsAudit.ok(user, KMSOp.GET_KEYS, ""); kmsAudit.ok(user, KMSOp.GET_KEYS, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@GET @GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}") @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}")
public Response getKey(@Context SecurityContext securityContext, public Response getKey(@PathParam("name") String name)
@PathParam("name") String name)
throws Exception { throws Exception {
return getMetadata(securityContext, name); return getMetadata(name);
} }
@GET @GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" + @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.METADATA_SUB_RESOURCE) KMSRESTConstants.METADATA_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getMetadata(@Context SecurityContext securityContext, public Response getMetadata(@PathParam("name") final String name)
@PathParam("name") String name)
throws Exception { throws Exception {
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name); assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
KeyProvider.Metadata metadata = user.doAs(
new PrivilegedExceptionAction<KeyProvider.Metadata>() {
@Override
public KeyProvider.Metadata run() throws Exception {
return provider.getMetadata(name);
}
}
);
Object json = KMSServerJSONUtils.toJSON(name, metadata);
kmsAudit.ok(user, KMSOp.GET_METADATA, name, ""); kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@ -280,14 +316,23 @@ public class KMS {
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" + @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE) KMSRESTConstants.CURRENT_VERSION_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getCurrentVersion(@Context SecurityContext securityContext, public Response getCurrentVersion(@PathParam("name") final String name)
@PathParam("name") String name)
throws Exception { throws Exception {
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark(); KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
return provider.getCurrentKey(name);
}
}
);
Object json = KMSServerJSONUtils.toJSON(keyVersion);
kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, ""); kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@ -295,14 +340,22 @@ public class KMS {
@GET @GET
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}") @Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}")
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getKeyVersion(@Context SecurityContext securityContext, public Response getKeyVersion(
@PathParam("versionName") String versionName) @PathParam("versionName") final String versionName) throws Exception {
throws Exception { UserGroupInformation user = HttpUserGroupInformation.get();
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(versionName, "versionName"); KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSWebApp.getKeyCallsMeter().mark(); KMSWebApp.getKeyCallsMeter().mark();
KeyVersion keyVersion = provider.getKeyVersion(versionName);
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
KeyVersion keyVersion = user.doAs(
new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
return provider.getKeyVersion(versionName);
}
}
);
if (keyVersion != null) { if (keyVersion != null) {
kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), ""); kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
} }
@ -316,13 +369,12 @@ public class KMS {
KMSRESTConstants.EEK_SUB_RESOURCE) KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response generateEncryptedKeys( public Response generateEncryptedKeys(
@Context SecurityContext securityContext, @PathParam("name") final String name,
@PathParam("name") String name,
@QueryParam(KMSRESTConstants.EEK_OP) String edekOp, @QueryParam(KMSRESTConstants.EEK_OP) String edekOp,
@DefaultValue("1") @DefaultValue("1")
@QueryParam(KMSRESTConstants.EEK_NUM_KEYS) int numKeys) @QueryParam(KMSRESTConstants.EEK_NUM_KEYS) final int numKeys)
throws Exception { throws Exception {
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSClientProvider.checkNotNull(edekOp, "eekOp"); KMSClientProvider.checkNotNull(edekOp, "eekOp");
@ -330,12 +382,22 @@ public class KMS {
if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) { if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name); assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
List<EncryptedKeyVersion> retEdeks = final List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>(); new LinkedList<EncryptedKeyVersion>();
try { try {
for (int i = 0; i < numKeys; i ++) {
retEdeks.add(provider.generateEncryptedKey(name)); user.doAs(
} new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
for (int i = 0; i < numKeys; i++) {
retEdeks.add(provider.generateEncryptedKey(name));
}
return null;
}
}
);
} catch (Exception e) { } catch (Exception e) {
throw new IOException(e); throw new IOException(e);
} }
@ -359,16 +421,17 @@ public class KMS {
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" + @Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" +
KMSRESTConstants.EEK_SUB_RESOURCE) KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response decryptEncryptedKey(@Context SecurityContext securityContext, public Response decryptEncryptedKey(
@PathParam("versionName") String versionName, @PathParam("versionName") final String versionName,
@QueryParam(KMSRESTConstants.EEK_OP) String eekOp, @QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
Map jsonPayload) Map jsonPayload)
throws Exception { throws Exception {
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(versionName, "versionName"); KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSClientProvider.checkNotNull(eekOp, "eekOp"); KMSClientProvider.checkNotNull(eekOp, "eekOp");
String keyName = (String) jsonPayload.get(KMSRESTConstants.NAME_FIELD); final String keyName = (String) jsonPayload.get(
KMSRESTConstants.NAME_FIELD);
String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD); String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
String encMaterialStr = String encMaterialStr =
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD); (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
@ -376,14 +439,24 @@ public class KMS {
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) { if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName); assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD); KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
byte[] iv = Base64.decodeBase64(ivStr); final byte[] iv = Base64.decodeBase64(ivStr);
KMSClientProvider.checkNotNull(encMaterialStr, KMSClientProvider.checkNotNull(encMaterialStr,
KMSRESTConstants.MATERIAL_FIELD); KMSRESTConstants.MATERIAL_FIELD);
byte[] encMaterial = Base64.decodeBase64(encMaterialStr); final byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
KeyProvider.KeyVersion retKeyVersion =
provider.decryptEncryptedKey( KeyProvider.KeyVersion retKeyVersion = user.doAs(
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName, new PrivilegedExceptionAction<KeyVersion>() {
iv, KeyProviderCryptoExtension.EEK, encMaterial)); @Override
public KeyVersion run() throws Exception {
return provider.decryptEncryptedKey(
new KMSClientProvider.KMSEncryptedKeyVersion(keyName,
versionName, iv, KeyProviderCryptoExtension.EEK,
encMaterial)
);
}
}
);
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion); retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, ""); kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
} else { } else {
@ -400,14 +473,23 @@ public class KMS {
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" + @Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.VERSIONS_SUB_RESOURCE) KMSRESTConstants.VERSIONS_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getKeyVersions(@Context SecurityContext securityContext, public Response getKeyVersions(@PathParam("name") final String name)
@PathParam("name") String name)
throws Exception { throws Exception {
Principal user = getPrincipal(securityContext); UserGroupInformation user = HttpUserGroupInformation.get();
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark(); KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
List<KeyVersion> ret = user.doAs(
new PrivilegedExceptionAction<List<KeyVersion>>() {
@Override
public List<KeyVersion> run() throws Exception {
return provider.getKeyVersions(name);
}
}
);
Object json = KMSServerJSONUtils.toJSON(ret);
kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, ""); kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }

View File

@ -113,8 +113,7 @@ public class KMSACLs implements Runnable {
return conf; return conf;
} }
public boolean hasAccess(Type type, String user) { public boolean hasAccess(Type type, UserGroupInformation ugi) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
return acls.get(type).isUserAllowed(ugi); return acls.get(type).isUserAllowed(ugi);
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.crypto.key.kms.server; package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -29,7 +30,6 @@ import com.google.common.cache.RemovalNotification;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.common.util.concurrent.ThreadFactoryBuilder;
import java.security.Principal;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Set; import java.util.Set;
@ -186,22 +186,22 @@ public class KMSAudit {
} }
} }
public void ok(Principal user, KMS.KMSOp op, String key, public void ok(UserGroupInformation user, KMS.KMSOp op, String key,
String extraMsg) { String extraMsg) {
op(OpStatus.OK, op, user.getName(), key, extraMsg); op(OpStatus.OK, op, user.getShortUserName(), key, extraMsg);
} }
public void ok(Principal user, KMS.KMSOp op, String extraMsg) { public void ok(UserGroupInformation user, KMS.KMSOp op, String extraMsg) {
op(OpStatus.OK, op, user.getName(), null, extraMsg); op(OpStatus.OK, op, user.getShortUserName(), null, extraMsg);
} }
public void unauthorized(Principal user, KMS.KMSOp op, String key) { public void unauthorized(UserGroupInformation user, KMS.KMSOp op, String key) {
op(OpStatus.UNAUTHORIZED, op, user.getName(), key, ""); op(OpStatus.UNAUTHORIZED, op, user.getShortUserName(), key, "");
} }
public void error(Principal user, String method, String url, public void error(UserGroupInformation user, String method, String url,
String extraMsg) { String extraMsg) {
op(OpStatus.ERROR, null, user.getName(), null, "Method:'" + method op(OpStatus.ERROR, null, user.getShortUserName(), null, "Method:'" + method
+ "' Exception:'" + extraMsg + "'"); + "' Exception:'" + extraMsg + "'");
} }

View File

@ -19,7 +19,13 @@ package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
import javax.servlet.FilterChain; import javax.servlet.FilterChain;
import javax.servlet.FilterConfig; import javax.servlet.FilterConfig;
@ -38,7 +44,8 @@ import java.util.Properties;
* file. * file.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class KMSAuthenticationFilter extends AuthenticationFilter { public class KMSAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
private static final String CONF_PREFIX = KMSConfiguration.CONFIG_PREFIX + private static final String CONF_PREFIX = KMSConfiguration.CONFIG_PREFIX +
"authentication."; "authentication.";
@ -55,9 +62,30 @@ public class KMSAuthenticationFilter extends AuthenticationFilter {
props.setProperty(name, value); props.setProperty(name, value);
} }
} }
String authType = props.getProperty(AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
props.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
KMSClientProvider.TOKEN_KIND);
return props; return props;
} }
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) {
Map<String, String> proxyuserConf = KMSWebApp.getConfiguration().
getValByRegex("hadoop\\.kms\\.proxyuser\\.");
Configuration conf = new Configuration(false);
for (Map.Entry<String, String> entry : proxyuserConf.entrySet()) {
conf.set(entry.getKey().substring("hadoop.kms.".length()),
entry.getValue());
}
return conf;
}
private static class KMSResponse extends HttpServletResponseWrapper { private static class KMSResponse extends HttpServletResponseWrapper {
public int statusCode; public int statusCode;
public String msg; public String msg;

View File

@ -23,6 +23,7 @@ import com.sun.jersey.api.container.ContainerException;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants; import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.AuthorizationException;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -34,7 +35,6 @@ import javax.ws.rs.ext.ExceptionMapper;
import javax.ws.rs.ext.Provider; import javax.ws.rs.ext.Provider;
import java.io.IOException; import java.io.IOException;
import java.security.Principal;
import java.util.LinkedHashMap; import java.util.LinkedHashMap;
import java.util.Map; import java.util.Map;
@ -102,7 +102,7 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
status = Response.Status.INTERNAL_SERVER_ERROR; status = Response.Status.INTERNAL_SERVER_ERROR;
} }
if (doAudit) { if (doAudit) {
KMSWebApp.getKMSAudit().error(KMSMDCFilter.getPrincipal(), KMSWebApp.getKMSAudit().error(KMSMDCFilter.getUgi(),
KMSMDCFilter.getMethod(), KMSMDCFilter.getMethod(),
KMSMDCFilter.getURL(), getOneLineMessage(exception)); KMSMDCFilter.getURL(), getOneLineMessage(exception));
} }
@ -110,11 +110,11 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
} }
protected void log(Response.Status status, Throwable ex) { protected void log(Response.Status status, Throwable ex) {
Principal principal = KMSMDCFilter.getPrincipal(); UserGroupInformation ugi = KMSMDCFilter.getUgi();
String method = KMSMDCFilter.getMethod(); String method = KMSMDCFilter.getMethod();
String url = KMSMDCFilter.getURL(); String url = KMSMDCFilter.getURL();
String msg = getOneLineMessage(ex); String msg = getOneLineMessage(ex);
LOG.warn("User:{} Method:{} URL:{} Response:{}-{}", principal, method, url, LOG.warn("User:'{}' Method:{} URL:{} Response:{}-{}", ugi, method, url,
status, msg, ex); status, msg, ex);
} }

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.crypto.key.kms.server; package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import javax.servlet.Filter; import javax.servlet.Filter;
import javax.servlet.FilterChain; import javax.servlet.FilterChain;
@ -27,7 +29,6 @@ import javax.servlet.ServletRequest;
import javax.servlet.ServletResponse; import javax.servlet.ServletResponse;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import java.io.IOException; import java.io.IOException;
import java.security.Principal;
/** /**
* Servlet filter that captures context of the HTTP request to be use in the * Servlet filter that captures context of the HTTP request to be use in the
@ -37,12 +38,12 @@ import java.security.Principal;
public class KMSMDCFilter implements Filter { public class KMSMDCFilter implements Filter {
private static class Data { private static class Data {
private Principal principal; private UserGroupInformation ugi;
private String method; private String method;
private StringBuffer url; private StringBuffer url;
private Data(Principal principal, String method, StringBuffer url) { private Data(UserGroupInformation ugi, String method, StringBuffer url) {
this.principal = principal; this.ugi = ugi;
this.method = method; this.method = method;
this.url = url; this.url = url;
} }
@ -50,8 +51,8 @@ public class KMSMDCFilter implements Filter {
private static ThreadLocal<Data> DATA_TL = new ThreadLocal<Data>(); private static ThreadLocal<Data> DATA_TL = new ThreadLocal<Data>();
public static Principal getPrincipal() { public static UserGroupInformation getUgi() {
return DATA_TL.get().principal; return DATA_TL.get().ugi;
} }
public static String getMethod() { public static String getMethod() {
@ -72,14 +73,14 @@ public class KMSMDCFilter implements Filter {
throws IOException, ServletException { throws IOException, ServletException {
try { try {
DATA_TL.remove(); DATA_TL.remove();
Principal principal = ((HttpServletRequest) request).getUserPrincipal(); UserGroupInformation ugi = HttpUserGroupInformation.get();
String method = ((HttpServletRequest) request).getMethod(); String method = ((HttpServletRequest) request).getMethod();
StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL(); StringBuffer requestURL = ((HttpServletRequest) request).getRequestURL();
String queryString = ((HttpServletRequest) request).getQueryString(); String queryString = ((HttpServletRequest) request).getQueryString();
if (queryString != null) { if (queryString != null) {
requestURL.append("?").append(queryString); requestURL.append("?").append(queryString);
} }
DATA_TL.set(new Data(principal, method, requestURL)); DATA_TL.set(new Data(ugi, method, requestURL));
chain.doFilter(request, response); chain.doFilter(request, response);
} finally { } finally {
DATA_TL.remove(); DATA_TL.remove();

View File

@ -195,6 +195,46 @@ hadoop-${project.version} $ sbin/kms.sh start
NOTE: You need to restart the KMS for the configuration changes to take NOTE: You need to restart the KMS for the configuration changes to take
effect. effect.
*** KMS Proxyuser Configuration
Each proxyusers must be configured in <<<etc/hadoop/kms-site.xml>>> using the
following properties:
+---+
<property>
<name>hadoop.kms.proxyusers.#USER#.users</name>
<value>*</value>
</property>
<property>
<name>hadoop.kms.proxyusers.#USER#.groups</name>
<value>*</value>
</property>
<property>
<name>hadoop.kms.proxyusers.#USER#.hosts</name>
<value>*</value>
</property>
+---+
<<<#USER#>>> is the username of the proxyuser to configure.
The <<<users>>> property indicates the users that can be impersonated.
The <<<groups>>> property indicates the groups users being impersonated must
belong to.
At least one of the <<<users>>> or <<<groups>>> properties must be defined.
If both are specified, then the configured proxyuser will be able to
impersonate and user in the <<<users>>> list and any user belonging to one of
the groups in the <<<groups>>> list.
The <<<hosts>>> property indicates from which host the proxyuser can make
impersonation requests.
If <<<users>>>, <<<groups>>> or <<<hosts>>> has a <<<*>>>, it means there are
no restrictions for the proxyuser regarding users, groups or hosts.
*** KMS over HTTPS (SSL) *** KMS over HTTPS (SSL)
To configure KMS to work over HTTPS the following 2 properties must be To configure KMS to work over HTTPS the following 2 properties must be
@ -319,6 +359,46 @@ $ keytool -genkey -alias tomcat -keyalg RSA
</configuration> </configuration>
+---+ +---+
** KMS Delegation Token Configuration
KMS delegation token secret manager can be configured with the following
properties:
+---+
<property>
<name>hadoop.kms.authentication.delegation-token.update-interval.sec</name>
<value>86400</value>
<description>
How often the master key is rotated, in seconds. Default value 1 day.
</description>
</property>
<property>
<name>hadoop.kms.authentication.delegation-token.max-lifetime.sec</name>
<value>604800</value>
<description>
Maximum lifetime of a delagation token, in seconds. Default value 7 days.
</description>
</property>
<property>
<name>hadoop.kms.authentication.delegation-token.renew-interval.sec</name>
<value>86400</value>
<description>
Renewal interval of a delagation token, in seconds. Default value 1 day.
</description>
</property>
<property>
<name>hadoop.kms.authentication.delegation-token.removal-scan-interval.sec</name>
<value>3600</value>
<description>
Scan interval to remove expired delegation tokens.
</description>
</property>
+---+
** KMS HTTP REST API ** KMS HTTP REST API
*** Create a Key *** Create a Key

View File

@ -22,12 +22,18 @@ import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion; import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException; import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass; import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.mortbay.jetty.Connector; import org.mortbay.jetty.Connector;
@ -45,6 +51,7 @@ import java.io.FileWriter;
import java.io.IOException; import java.io.IOException;
import java.io.Writer; import java.io.Writer;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.MalformedURLException; import java.net.MalformedURLException;
import java.net.ServerSocket; import java.net.ServerSocket;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
@ -65,6 +72,13 @@ import java.util.concurrent.Callable;
public class TestKMS { public class TestKMS {
@Before
public void cleanUp() {
// resetting kerberos security
Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
}
public static File getTestDir() throws Exception { public static File getTestDir() throws Exception {
File file = new File("dummy"); File file = new File("dummy");
file = file.getAbsoluteFile(); file = file.getAbsoluteFile();
@ -255,6 +269,7 @@ public class TestKMS {
principals.add("HTTP/localhost"); principals.add("HTTP/localhost");
principals.add("client"); principals.add("client");
principals.add("client/host"); principals.add("client/host");
principals.add("client1");
for (KMSACLs.Type type : KMSACLs.Type.values()) { for (KMSACLs.Type type : KMSACLs.Type.values()) {
principals.add(type.toString()); principals.add(type.toString());
} }
@ -284,7 +299,9 @@ public class TestKMS {
try { try {
loginContext.login(); loginContext.login();
subject = loginContext.getSubject(); subject = loginContext.getSubject();
return Subject.doAs(subject, action); UserGroupInformation ugi =
UserGroupInformation.getUGIFromSubject(subject);
return ugi.doAs(action);
} finally { } finally {
loginContext.logout(); loginContext.logout();
} }
@ -292,8 +309,13 @@ public class TestKMS {
public void testStartStop(final boolean ssl, final boolean kerberos) public void testStartStop(final boolean ssl, final boolean kerberos)
throws Exception { throws Exception {
Configuration conf = new Configuration();
if (kerberos) {
conf.set("hadoop.security.authentication", "kerberos");
}
UserGroupInformation.setConfiguration(conf);
File testDir = getTestDir(); File testDir = getTestDir();
Configuration conf = createBaseKMSConf(testDir); conf = createBaseKMSConf(testDir);
final String keystore; final String keystore;
final String password; final String password;
@ -321,18 +343,18 @@ public class TestKMS {
runServer(keystore, password, testDir, new KMSCallable() { runServer(keystore, password, testDir, new KMSCallable() {
@Override @Override
public Void call() throws Exception { public Void call() throws Exception {
Configuration conf = new Configuration(); final Configuration conf = new Configuration();
URL url = getKMSUrl(); URL url = getKMSUrl();
Assert.assertEquals(keystore != null, Assert.assertEquals(keystore != null,
url.getProtocol().equals("https")); url.getProtocol().equals("https"));
URI uri = createKMSUri(getKMSUrl()); final URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp = new KMSClientProvider(uri, conf);
if (kerberos) { if (kerberos) {
for (String user : new String[]{"client", "client/host"}) { for (String user : new String[]{"client", "client/host"}) {
doAs(user, new PrivilegedExceptionAction<Void>() { doAs(user, new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
final KeyProvider kp = new KMSClientProvider(uri, conf);
// getKeys() empty // getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty()); Assert.assertTrue(kp.getKeys().isEmpty());
return null; return null;
@ -340,6 +362,7 @@ public class TestKMS {
}); });
} }
} else { } else {
KeyProvider kp = new KMSClientProvider(uri, conf);
// getKeys() empty // getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty()); Assert.assertTrue(kp.getKeys().isEmpty());
} }
@ -370,8 +393,11 @@ public class TestKMS {
@Test @Test
public void testKMSProvider() throws Exception { public void testKMSProvider() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File confDir = getTestDir(); File confDir = getTestDir();
Configuration conf = createBaseKMSConf(confDir); conf = createBaseKMSConf(confDir);
writeConf(confDir, conf); writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable() { runServer(null, null, confDir, new KMSCallable() {
@ -565,6 +591,17 @@ public class TestKMS {
Assert.assertEquals("d", meta.getDescription()); Assert.assertEquals("d", meta.getDescription());
Assert.assertEquals(attributes, meta.getAttributes()); Assert.assertEquals(attributes, meta.getAttributes());
KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
Credentials credentials = new Credentials();
kpdte.addDelegationTokens("foo", credentials);
Assert.assertEquals(1, credentials.getAllTokens().size());
InetSocketAddress kmsAddr = new InetSocketAddress(getKMSUrl().getHost(),
getKMSUrl().getPort());
Assert.assertEquals(new Text("kms-dt"), credentials.getToken(
SecurityUtil.buildTokenService(kmsAddr)).getKind());
return null; return null;
} }
}); });
@ -572,8 +609,11 @@ public class TestKMS {
@Test @Test
public void testACLs() throws Exception { public void testACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir(); final File testDir = getTestDir();
Configuration conf = createBaseKMSConf(testDir); conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos"); conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab", conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath()); keytab.getAbsolutePath());
@ -596,20 +636,20 @@ public class TestKMS {
public Void call() throws Exception { public Void call() throws Exception {
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
URI uri = createKMSUri(getKMSUrl()); final URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp = new KMSClientProvider(uri, conf);
//nothing allowed //nothing allowed
doAs("client", new PrivilegedExceptionAction<Void>() { doAs("client", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
kp.createKey("k", new KeyProvider.Options(conf)); kp.createKey("k", new KeyProvider.Options(conf));
Assert.fail(); Assert.fail();
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.createKey("k", new byte[16], new KeyProvider.Options(conf)); kp.createKey("k", new byte[16], new KeyProvider.Options(conf));
@ -617,7 +657,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.rollNewVersion("k"); kp.rollNewVersion("k");
@ -625,7 +665,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.rollNewVersion("k", new byte[16]); kp.rollNewVersion("k", new byte[16]);
@ -633,7 +673,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.getKeys(); kp.getKeys();
@ -641,7 +681,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.getKeysMetadata("k"); kp.getKeysMetadata("k");
@ -649,7 +689,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
// we are using JavaKeyStoreProvider for testing, so we know how // we are using JavaKeyStoreProvider for testing, so we know how
@ -659,7 +699,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.getCurrentKey("k"); kp.getCurrentKey("k");
@ -667,7 +707,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.getMetadata("k"); kp.getMetadata("k");
@ -675,7 +715,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
try { try {
kp.getKeyVersions("k"); kp.getKeyVersions("k");
@ -683,7 +723,7 @@ public class TestKMS {
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
@ -693,12 +733,13 @@ public class TestKMS {
doAs("CREATE", new PrivilegedExceptionAction<Void>() { doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
KeyProvider.KeyVersion kv = kp.createKey("k0", KeyProvider.KeyVersion kv = kp.createKey("k0",
new KeyProvider.Options(conf)); new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial()); Assert.assertNull(kv.getMaterial());
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -707,10 +748,11 @@ public class TestKMS {
doAs("DELETE", new PrivilegedExceptionAction<Void>() { doAs("DELETE", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
kp.deleteKey("k0"); kp.deleteKey("k0");
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -719,12 +761,13 @@ public class TestKMS {
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() { doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16], KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf)); new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial()); Assert.assertNull(kv.getMaterial());
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -733,11 +776,12 @@ public class TestKMS {
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() { doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
KeyProvider.KeyVersion kv = kp.rollNewVersion("k1"); KeyProvider.KeyVersion kv = kp.rollNewVersion("k1");
Assert.assertNull(kv.getMaterial()); Assert.assertNull(kv.getMaterial());
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -746,12 +790,13 @@ public class TestKMS {
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() { doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
KeyProvider.KeyVersion kv = KeyProvider.KeyVersion kv =
kp.rollNewVersion("k1", new byte[16]); kp.rollNewVersion("k1", new byte[16]);
Assert.assertNull(kv.getMaterial()); Assert.assertNull(kv.getMaterial());
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -761,6 +806,7 @@ public class TestKMS {
doAs("GET", new PrivilegedExceptionAction<KeyVersion>() { doAs("GET", new PrivilegedExceptionAction<KeyVersion>() {
@Override @Override
public KeyVersion run() throws Exception { public KeyVersion run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
kp.getKeyVersion("k1@0"); kp.getKeyVersion("k1@0");
KeyVersion kv = kp.getCurrentKey("k1"); KeyVersion kv = kp.getCurrentKey("k1");
@ -777,6 +823,7 @@ public class TestKMS {
new PrivilegedExceptionAction<EncryptedKeyVersion>() { new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override @Override
public EncryptedKeyVersion run() throws Exception { public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension. KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp); createKeyProviderCryptoExtension(kp);
@ -793,12 +840,13 @@ public class TestKMS {
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() { doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension. KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp); createKeyProviderCryptoExtension(kp);
kpCE.decryptEncryptedKey(encKv); kpCE.decryptEncryptedKey(encKv);
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -807,10 +855,11 @@ public class TestKMS {
doAs("GET_KEYS", new PrivilegedExceptionAction<Void>() { doAs("GET_KEYS", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
kp.getKeys(); kp.getKeys();
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -819,11 +868,12 @@ public class TestKMS {
doAs("GET_METADATA", new PrivilegedExceptionAction<Void>() { doAs("GET_METADATA", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
try { try {
kp.getMetadata("k1"); kp.getMetadata("k1");
kp.getKeysMetadata("k1"); kp.getKeysMetadata("k1");
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -836,6 +886,7 @@ public class TestKMS {
Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer Thread.sleep(10); // to ensure the ACLs file modifiedTime is newer
conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo"); conf.set(KMSACLs.Type.CREATE.getConfigKey(), "foo");
writeConf(testDir, conf); writeConf(testDir, conf);
Thread.sleep(1000);
KMSWebApp.getACLs().run(); // forcing a reload by hand. KMSWebApp.getACLs().run(); // forcing a reload by hand.
@ -844,13 +895,14 @@ public class TestKMS {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
try { try {
KeyProvider kp = new KMSClientProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("k2", KeyProvider.KeyVersion kv = kp.createKey("k2",
new KeyProvider.Options(conf)); new KeyProvider.Options(conf));
Assert.fail(); Assert.fail();
} catch (AuthorizationException ex) { } catch (AuthorizationException ex) {
//NOP //NOP
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
@ -864,8 +916,11 @@ public class TestKMS {
@Test @Test
public void testServicePrincipalACLs() throws Exception { public void testServicePrincipalACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = getTestDir(); File testDir = getTestDir();
Configuration conf = createBaseKMSConf(testDir); conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos"); conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab", conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath()); keytab.getAbsolutePath());
@ -883,18 +938,19 @@ public class TestKMS {
public Void call() throws Exception { public Void call() throws Exception {
final Configuration conf = new Configuration(); final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
URI uri = createKMSUri(getKMSUrl()); conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final KeyProvider kp = new KMSClientProvider(uri, conf); final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() { doAs("client", new PrivilegedExceptionAction<Void>() {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
try { try {
KeyProvider kp = new KMSClientProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0", KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf)); new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial()); Assert.assertNull(kv.getMaterial());
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -904,11 +960,12 @@ public class TestKMS {
@Override @Override
public Void run() throws Exception { public Void run() throws Exception {
try { try {
KeyProvider kp = new KMSClientProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1", KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf)); new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial()); Assert.assertNull(kv.getMaterial());
} catch (Exception ex) { } catch (Exception ex) {
Assert.fail(ex.toString()); Assert.fail(ex.getMessage());
} }
return null; return null;
} }
@ -982,4 +1039,142 @@ public class TestKMS {
sock.close(); sock.close();
} }
@Test
public void testDelegationTokenAccess() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final URI uri = createKMSUri(getKMSUrl());
final Credentials credentials = new Credentials();
final UserGroupInformation nonKerberosUgi =
UserGroupInformation.getCurrentUser();
try {
KeyProvider kp = new KMSClientProvider(uri, conf);
kp.createKey("kA", new KeyProvider.Options(conf));
} catch (IOException ex) {
System.out.println(ex.getMessage());
}
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
kpdte.addDelegationTokens("foo", credentials);
return null;
}
});
nonKerberosUgi.addCredentials(credentials);
try {
KeyProvider kp = new KMSClientProvider(uri, conf);
kp.createKey("kA", new KeyProvider.Options(conf));
} catch (IOException ex) {
System.out.println(ex.getMessage());
}
nonKerberosUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = new KMSClientProvider(uri, conf);
kp.createKey("kD", new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
}
@Test
public void testProxyUser() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.proxyuser.client.users", "foo");
conf.set("hadoop.kms.proxyuser.client.hosts", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
final URI uri = createKMSUri(getKMSUrl());
// proxyuser client using kerberos credentials
UserGroupInformation clientUgi = UserGroupInformation.
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final KeyProvider kp = new KMSClientProvider(uri, conf);
kp.createKey("kAA", new KeyProvider.Options(conf));
// authorized proxyuser
UserGroupInformation fooUgi =
UserGroupInformation.createRemoteUser("foo");
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Assert.assertNotNull(kp.createKey("kBB",
new KeyProvider.Options(conf)));
return null;
}
});
// unauthorized proxyuser
UserGroupInformation foo1Ugi =
UserGroupInformation.createRemoteUser("foo1");
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kp.createKey("kCC", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
// OK
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
return null;
}
});
}
} }

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.crypto.key.kms.server; package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
@ -27,7 +28,8 @@ public class TestKMSACLs {
public void testDefaults() { public void testDefaults() {
KMSACLs acls = new KMSACLs(new Configuration(false)); KMSACLs acls = new KMSACLs(new Configuration(false));
for (KMSACLs.Type type : KMSACLs.Type.values()) { for (KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type, "foo")); Assert.assertTrue(acls.hasAccess(type,
UserGroupInformation.createRemoteUser("foo")));
} }
} }
@ -39,8 +41,10 @@ public class TestKMSACLs {
} }
KMSACLs acls = new KMSACLs(conf); KMSACLs acls = new KMSACLs(conf);
for (KMSACLs.Type type : KMSACLs.Type.values()) { for (KMSACLs.Type type : KMSACLs.Type.values()) {
Assert.assertTrue(acls.hasAccess(type, type.toString())); Assert.assertTrue(acls.hasAccess(type,
Assert.assertFalse(acls.hasAccess(type, "foo")); UserGroupInformation.createRemoteUser(type.toString())));
Assert.assertFalse(acls.hasAccess(type,
UserGroupInformation.createRemoteUser("foo")));
} }
} }

View File

@ -21,9 +21,9 @@ import java.io.ByteArrayOutputStream;
import java.io.FilterOutputStream; import java.io.FilterOutputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.io.PrintStream; import java.io.PrintStream;
import java.security.Principal;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp; import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator; import org.apache.log4j.PropertyConfigurator;
import org.junit.After; import org.junit.After;
@ -81,8 +81,8 @@ public class TestKMSAudit {
@Test @Test
public void testAggregation() throws Exception { public void testAggregation() throws Exception {
Principal luser = Mockito.mock(Principal.class); UserGroupInformation luser = Mockito.mock(UserGroupInformation.class);
Mockito.when(luser.getName()).thenReturn("luser"); Mockito.when(luser.getShortUserName()).thenReturn("luser");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
@ -109,8 +109,8 @@ public class TestKMSAudit {
@Test @Test
public void testAggregationUnauth() throws Exception { public void testAggregationUnauth() throws Exception {
Principal luser = Mockito.mock(Principal.class); UserGroupInformation luser = Mockito.mock(UserGroupInformation.class);
Mockito.when(luser.getName()).thenReturn("luser"); Mockito.when(luser.getShortUserName()).thenReturn("luser");
kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2"); kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2");
Thread.sleep(1000); Thread.sleep(1000);
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");

View File

@ -91,4 +91,14 @@ public class HttpFSAuthenticationFilter
return props; return props;
} }
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) {
Map<String, String> proxyuserConf = HttpFSServerWebApp.get().getConfig().
getValByRegex("httpfs\\.proxyuser\\.");
Configuration conf = new Configuration(false);
for (Map.Entry<String, String> entry : proxyuserConf.entrySet()) {
conf.set(entry.getKey().substring("httpfs.".length()), entry.getValue());
}
return conf;
}
} }

View File

@ -30,8 +30,6 @@ import org.apache.hadoop.lib.wsrs.Param;
import org.apache.hadoop.lib.wsrs.ParametersProvider; import org.apache.hadoop.lib.wsrs.ParametersProvider;
import org.apache.hadoop.lib.wsrs.ShortParam; import org.apache.hadoop.lib.wsrs.ShortParam;
import org.apache.hadoop.lib.wsrs.StringParam; import org.apache.hadoop.lib.wsrs.StringParam;
import org.apache.hadoop.lib.wsrs.UserProvider;
import org.slf4j.MDC;
import javax.ws.rs.ext.Provider; import javax.ws.rs.ext.Provider;
import java.util.HashMap; import java.util.HashMap;
@ -53,57 +51,44 @@ public class HttpFSParametersProvider extends ParametersProvider {
static { static {
PARAMS_DEF.put(Operation.OPEN, PARAMS_DEF.put(Operation.OPEN,
new Class[]{DoAsParam.class, OffsetParam.class, LenParam.class}); new Class[]{OffsetParam.class, LenParam.class});
PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{});
PARAMS_DEF.put(Operation.LISTSTATUS, PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class});
new Class[]{DoAsParam.class, FilterParam.class}); PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{});
PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{});
PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{});
PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{});
PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{});
new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{});
PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{DoAsParam.class}); PARAMS_DEF.put(Operation.APPEND, new Class[]{DataParam.class});
PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
PARAMS_DEF.put(Operation.APPEND,
new Class[]{DoAsParam.class, DataParam.class});
PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class}); PARAMS_DEF.put(Operation.CONCAT, new Class[]{SourcesParam.class});
PARAMS_DEF.put(Operation.CREATE, PARAMS_DEF.put(Operation.CREATE,
new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class, new Class[]{PermissionParam.class, OverwriteParam.class,
ReplicationParam.class, BlockSizeParam.class, DataParam.class}); ReplicationParam.class, BlockSizeParam.class, DataParam.class});
PARAMS_DEF.put(Operation.MKDIRS, PARAMS_DEF.put(Operation.MKDIRS, new Class[]{PermissionParam.class});
new Class[]{DoAsParam.class, PermissionParam.class}); PARAMS_DEF.put(Operation.RENAME, new Class[]{DestinationParam.class});
PARAMS_DEF.put(Operation.RENAME,
new Class[]{DoAsParam.class, DestinationParam.class});
PARAMS_DEF.put(Operation.SETOWNER, PARAMS_DEF.put(Operation.SETOWNER,
new Class[]{DoAsParam.class, OwnerParam.class, GroupParam.class}); new Class[]{OwnerParam.class, GroupParam.class});
PARAMS_DEF.put(Operation.SETPERMISSION, PARAMS_DEF.put(Operation.SETPERMISSION, new Class[]{PermissionParam.class});
new Class[]{DoAsParam.class, PermissionParam.class});
PARAMS_DEF.put(Operation.SETREPLICATION, PARAMS_DEF.put(Operation.SETREPLICATION,
new Class[]{DoAsParam.class, ReplicationParam.class}); new Class[]{ReplicationParam.class});
PARAMS_DEF.put(Operation.SETTIMES, PARAMS_DEF.put(Operation.SETTIMES,
new Class[]{DoAsParam.class, ModifiedTimeParam.class, new Class[]{ModifiedTimeParam.class, AccessTimeParam.class});
AccessTimeParam.class}); PARAMS_DEF.put(Operation.DELETE, new Class[]{RecursiveParam.class});
PARAMS_DEF.put(Operation.DELETE, PARAMS_DEF.put(Operation.SETACL, new Class[]{AclPermissionParam.class});
new Class[]{DoAsParam.class, RecursiveParam.class}); PARAMS_DEF.put(Operation.REMOVEACL, new Class[]{});
PARAMS_DEF.put(Operation.SETACL,
new Class[]{DoAsParam.class, AclPermissionParam.class});
PARAMS_DEF.put(Operation.REMOVEACL,
new Class[]{DoAsParam.class});
PARAMS_DEF.put(Operation.MODIFYACLENTRIES, PARAMS_DEF.put(Operation.MODIFYACLENTRIES,
new Class[]{DoAsParam.class, AclPermissionParam.class}); new Class[]{AclPermissionParam.class});
PARAMS_DEF.put(Operation.REMOVEACLENTRIES, PARAMS_DEF.put(Operation.REMOVEACLENTRIES,
new Class[]{DoAsParam.class, AclPermissionParam.class}); new Class[]{AclPermissionParam.class});
PARAMS_DEF.put(Operation.REMOVEDEFAULTACL, PARAMS_DEF.put(Operation.REMOVEDEFAULTACL, new Class[]{});
new Class[]{DoAsParam.class});
PARAMS_DEF.put(Operation.SETXATTR, PARAMS_DEF.put(Operation.SETXATTR,
new Class[]{DoAsParam.class, XAttrNameParam.class, XAttrValueParam.class, new Class[]{XAttrNameParam.class, XAttrValueParam.class,
XAttrSetFlagParam.class}); XAttrSetFlagParam.class});
PARAMS_DEF.put(Operation.REMOVEXATTR, PARAMS_DEF.put(Operation.REMOVEXATTR, new Class[]{XAttrNameParam.class});
new Class[]{DoAsParam.class, XAttrNameParam.class});
PARAMS_DEF.put(Operation.GETXATTRS, PARAMS_DEF.put(Operation.GETXATTRS,
new Class[]{DoAsParam.class, XAttrNameParam.class, XAttrEncodingParam.class}); new Class[]{XAttrNameParam.class, XAttrEncodingParam.class});
PARAMS_DEF.put(Operation.LISTXATTRS, PARAMS_DEF.put(Operation.LISTXATTRS, new Class[]{});
new Class[]{DoAsParam.class});
} }
public HttpFSParametersProvider() { public HttpFSParametersProvider() {
@ -205,41 +190,6 @@ public class HttpFSParametersProvider extends ParametersProvider {
} }
} }
/**
* Class for do-as parameter.
*/
@InterfaceAudience.Private
public static class DoAsParam extends StringParam {
/**
* Parameter name.
*/
public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
/**
* Constructor.
*/
public DoAsParam() {
super(NAME, null, UserProvider.getUserPattern());
}
/**
* Delegates to parent and then adds do-as user to
* MDC context for logging purposes.
*
*
* @param str parameter value.
*
* @return parsed parameter
*/
@Override
public String parseParam(String str) {
String doAs = super.parseParam(str);
MDC.put(getName(), (doAs != null) ? doAs : "-");
return doAs;
}
}
/** /**
* Class for filter parameter. * Class for filter parameter.
*/ */
@ -275,7 +225,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
* Constructor. * Constructor.
*/ */
public GroupParam() { public GroupParam() {
super(NAME, null, UserProvider.getUserPattern()); super(NAME, null);
} }
} }
@ -371,7 +321,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
* Constructor. * Constructor.
*/ */
public OwnerParam() { public OwnerParam() {
super(NAME, null, UserProvider.getUserPattern()); super(NAME, null);
} }
} }

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionPa
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam; import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
@ -50,12 +49,11 @@ import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException; import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Groups; import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.Instrumentation; import org.apache.hadoop.lib.service.Instrumentation;
import org.apache.hadoop.lib.service.ProxyUser;
import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter; import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
import org.apache.hadoop.lib.servlet.HostnameFilter;
import org.apache.hadoop.lib.wsrs.InputStreamEntity; import org.apache.hadoop.lib.wsrs.InputStreamEntity;
import org.apache.hadoop.lib.wsrs.Parameters; import org.apache.hadoop.lib.wsrs.Parameters;
import org.apache.hadoop.security.authentication.server.AuthenticationToken; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
import org.json.simple.JSONObject; import org.json.simple.JSONObject;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -79,7 +77,6 @@ import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.net.URI; import java.net.URI;
import java.security.AccessControlException; import java.security.AccessControlException;
import java.security.Principal;
import java.text.MessageFormat; import java.text.MessageFormat;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.List; import java.util.List;
@ -96,49 +93,11 @@ import java.util.Map;
public class HttpFSServer { public class HttpFSServer {
private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit"); private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
/**
* Resolves the effective user that will be used to request a FileSystemAccess filesystem.
* <p/>
* If the doAs-user is NULL or the same as the user, it returns the user.
* <p/>
* Otherwise it uses proxyuser rules (see {@link ProxyUser} to determine if the
* current user can impersonate the doAs-user.
* <p/>
* If the current user cannot impersonate the doAs-user an
* <code>AccessControlException</code> will be thrown.
*
* @param user principal for whom the filesystem instance is.
* @param doAs do-as user, if any.
*
* @return the effective user.
*
* @throws IOException thrown if an IO error occurrs.
* @throws AccessControlException thrown if the current user cannot impersonate
* the doAs-user.
*/
private String getEffectiveUser(Principal user, String doAs) throws IOException {
String effectiveUser = user.getName();
if (doAs != null && !doAs.equals(user.getName())) {
ProxyUser proxyUser = HttpFSServerWebApp.get().get(ProxyUser.class);
String proxyUserName;
if (user instanceof AuthenticationToken) {
proxyUserName = ((AuthenticationToken)user).getUserName();
} else {
proxyUserName = user.getName();
}
proxyUser.validate(proxyUserName, HostnameFilter.get(), doAs);
effectiveUser = doAs;
AUDIT_LOG.info("Proxy user [{}] DoAs user [{}]", proxyUserName, doAs);
}
return effectiveUser;
}
/** /**
* Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective * Executes a {@link FileSystemAccess.FileSystemExecutor} using a filesystem for the effective
* user. * user.
* *
* @param user principal making the request. * @param ugi user making the request.
* @param doAs do-as user, if any.
* @param executor FileSystemExecutor to execute. * @param executor FileSystemExecutor to execute.
* *
* @return FileSystemExecutor response * @return FileSystemExecutor response
@ -147,12 +106,11 @@ public class HttpFSServer {
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}. * exceptions are handled by {@link HttpFSExceptionProvider}.
*/ */
private <T> T fsExecute(Principal user, String doAs, FileSystemAccess.FileSystemExecutor<T> executor) private <T> T fsExecute(UserGroupInformation ugi, FileSystemAccess.FileSystemExecutor<T> executor)
throws IOException, FileSystemAccessException { throws IOException, FileSystemAccessException {
String hadoopUser = getEffectiveUser(user, doAs);
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class); FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration(); Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
return fsAccess.execute(hadoopUser, conf, executor); return fsAccess.execute(ugi.getShortUserName(), conf, executor);
} }
/** /**
@ -162,8 +120,7 @@ public class HttpFSServer {
* If a do-as user is specified, the current user must be a valid proxyuser, otherwise an * If a do-as user is specified, the current user must be a valid proxyuser, otherwise an
* <code>AccessControlException</code> will be thrown. * <code>AccessControlException</code> will be thrown.
* *
* @param user principal for whom the filesystem instance is. * @param ugi principal for whom the filesystem instance is.
* @param doAs do-as user, if any.
* *
* @return a filesystem for the specified user or do-as user. * @return a filesystem for the specified user or do-as user.
* *
@ -172,8 +129,9 @@ public class HttpFSServer {
* @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
* exceptions are handled by {@link HttpFSExceptionProvider}. * exceptions are handled by {@link HttpFSExceptionProvider}.
*/ */
private FileSystem createFileSystem(Principal user, String doAs) throws IOException, FileSystemAccessException { private FileSystem createFileSystem(UserGroupInformation ugi)
String hadoopUser = getEffectiveUser(user, doAs); throws IOException, FileSystemAccessException {
String hadoopUser = ugi.getShortUserName();
FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class); FileSystemAccess fsAccess = HttpFSServerWebApp.get().get(FileSystemAccess.class);
Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration(); Configuration conf = HttpFSServerWebApp.get().get(FileSystemAccess.class).getFileSystemConfiguration();
FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf); FileSystem fs = fsAccess.createFileSystem(hadoopUser, conf);
@ -192,7 +150,6 @@ public class HttpFSServer {
/** /**
* Special binding for '/' as it is not handled by the wildcard binding. * Special binding for '/' as it is not handled by the wildcard binding.
* *
* @param user the principal of the user making the request.
* @param op the HttpFS operation of the request. * @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request. * @param params the HttpFS parameters of the request.
* *
@ -206,11 +163,10 @@ public class HttpFSServer {
*/ */
@GET @GET
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getRoot(@Context Principal user, public Response getRoot(@QueryParam(OperationParam.NAME) OperationParam op,
@QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params) @Context Parameters params)
throws IOException, FileSystemAccessException { throws IOException, FileSystemAccessException {
return get(user, "", op, params); return get("", op, params);
} }
private String makeAbsolute(String path) { private String makeAbsolute(String path) {
@ -220,7 +176,6 @@ public class HttpFSServer {
/** /**
* Binding to handle GET requests, supported operations are * Binding to handle GET requests, supported operations are
* *
* @param user the principal of the user making the request.
* @param path the path for operation. * @param path the path for operation.
* @param op the HttpFS operation of the request. * @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request. * @param params the HttpFS parameters of the request.
@ -236,21 +191,20 @@ public class HttpFSServer {
@GET @GET
@Path("{path:.*}") @Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response get(@Context Principal user, public Response get(@PathParam("path") String path,
@PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op, @QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params) @Context Parameters params)
throws IOException, FileSystemAccessException { throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response; Response response;
path = makeAbsolute(path); path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) { switch (op.value()) {
case OPEN: { case OPEN: {
//Invoking the command directly using an unmanaged FileSystem that is //Invoking the command directly using an unmanaged FileSystem that is
// released by the FileSystemReleaseFilter // released by the FileSystemReleaseFilter
FSOperations.FSOpen command = new FSOperations.FSOpen(path); FSOperations.FSOpen command = new FSOperations.FSOpen(path);
FileSystem fs = createFileSystem(user, doAs); FileSystem fs = createFileSystem(user);
InputStream is = command.execute(fs); InputStream is = command.execute(fs);
Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
Long len = params.get(LenParam.NAME, LenParam.class); Long len = params.get(LenParam.NAME, LenParam.class);
@ -264,7 +218,7 @@ public class HttpFSServer {
case GETFILESTATUS: { case GETFILESTATUS: {
FSOperations.FSFileStatus command = FSOperations.FSFileStatus command =
new FSOperations.FSFileStatus(path); new FSOperations.FSFileStatus(path);
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path); AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -273,7 +227,7 @@ public class HttpFSServer {
String filter = params.get(FilterParam.NAME, FilterParam.class); String filter = params.get(FilterParam.NAME, FilterParam.class);
FSOperations.FSListStatus command = new FSOperations.FSListStatus( FSOperations.FSListStatus command = new FSOperations.FSListStatus(
path, filter); path, filter);
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}] filter [{}]", path, AUDIT_LOG.info("[{}] filter [{}]", path,
(filter != null) ? filter : "-"); (filter != null) ? filter : "-");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
@ -282,7 +236,7 @@ public class HttpFSServer {
case GETHOMEDIRECTORY: { case GETHOMEDIRECTORY: {
enforceRootPath(op.value(), path); enforceRootPath(op.value(), path);
FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
JSONObject json = fsExecute(user, doAs, command); JSONObject json = fsExecute(user, command);
AUDIT_LOG.info(""); AUDIT_LOG.info("");
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -290,7 +244,7 @@ public class HttpFSServer {
case INSTRUMENTATION: { case INSTRUMENTATION: {
enforceRootPath(op.value(), path); enforceRootPath(op.value(), path);
Groups groups = HttpFSServerWebApp.get().get(Groups.class); Groups groups = HttpFSServerWebApp.get().get(Groups.class);
List<String> userGroups = groups.getGroups(user.getName()); List<String> userGroups = groups.getGroups(user.getShortUserName());
if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
throw new AccessControlException( throw new AccessControlException(
"User not in HttpFSServer admin group"); "User not in HttpFSServer admin group");
@ -304,7 +258,7 @@ public class HttpFSServer {
case GETCONTENTSUMMARY: { case GETCONTENTSUMMARY: {
FSOperations.FSContentSummary command = FSOperations.FSContentSummary command =
new FSOperations.FSContentSummary(path); new FSOperations.FSContentSummary(path);
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path); AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -312,7 +266,7 @@ public class HttpFSServer {
case GETFILECHECKSUM: { case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command = FSOperations.FSFileChecksum command =
new FSOperations.FSFileChecksum(path); new FSOperations.FSFileChecksum(path);
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path); AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -324,7 +278,7 @@ public class HttpFSServer {
case GETACLSTATUS: { case GETACLSTATUS: {
FSOperations.FSAclStatus command = FSOperations.FSAclStatus command =
new FSOperations.FSAclStatus(path); new FSOperations.FSAclStatus(path);
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("ACL status for [{}]", path); AUDIT_LOG.info("ACL status for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -337,7 +291,7 @@ public class HttpFSServer {
FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path,
xattrNames, encoding); xattrNames, encoding);
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttrs for [{}]", path); AUDIT_LOG.info("XAttrs for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -345,7 +299,7 @@ public class HttpFSServer {
case LISTXATTRS: { case LISTXATTRS: {
FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path);
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
Map json = fsExecute(user, doAs, command); Map json = fsExecute(user, command);
AUDIT_LOG.info("XAttr names for [{}]", path); AUDIT_LOG.info("XAttr names for [{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -363,7 +317,6 @@ public class HttpFSServer {
/** /**
* Binding to handle DELETE requests. * Binding to handle DELETE requests.
* *
* @param user the principal of the user making the request.
* @param path the path for operation. * @param path the path for operation.
* @param op the HttpFS operation of the request. * @param op the HttpFS operation of the request.
* @param params the HttpFS parameters of the request. * @param params the HttpFS parameters of the request.
@ -379,15 +332,14 @@ public class HttpFSServer {
@DELETE @DELETE
@Path("{path:.*}") @Path("{path:.*}")
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response delete(@Context Principal user, public Response delete(@PathParam("path") String path,
@PathParam("path") String path, @QueryParam(OperationParam.NAME) OperationParam op,
@QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params)
@Context Parameters params)
throws IOException, FileSystemAccessException { throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response; Response response;
path = makeAbsolute(path); path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) { switch (op.value()) {
case DELETE: { case DELETE: {
Boolean recursive = Boolean recursive =
@ -395,7 +347,7 @@ public class HttpFSServer {
AUDIT_LOG.info("[{}] recursive [{}]", path, recursive); AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
FSOperations.FSDelete command = FSOperations.FSDelete command =
new FSOperations.FSDelete(path, recursive); new FSOperations.FSDelete(path, recursive);
JSONObject json = fsExecute(user, doAs, command); JSONObject json = fsExecute(user, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
} }
@ -412,7 +364,6 @@ public class HttpFSServer {
* Binding to handle POST requests. * Binding to handle POST requests.
* *
* @param is the inputstream for the request payload. * @param is the inputstream for the request payload.
* @param user the principal of the user making the request.
* @param uriInfo the of the request. * @param uriInfo the of the request.
* @param path the path for operation. * @param path the path for operation.
* @param op the HttpFS operation of the request. * @param op the HttpFS operation of the request.
@ -431,18 +382,17 @@ public class HttpFSServer {
@Consumes({"*/*"}) @Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_JSON})
public Response post(InputStream is, public Response post(InputStream is,
@Context Principal user,
@Context UriInfo uriInfo, @Context UriInfo uriInfo,
@PathParam("path") String path, @PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op, @QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params) @Context Parameters params)
throws IOException, FileSystemAccessException { throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response; Response response;
path = makeAbsolute(path); path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
switch (op.value()) { switch (op.value()) {
case APPEND: { case APPEND: {
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
Boolean hasData = params.get(DataParam.NAME, DataParam.class); Boolean hasData = params.get(DataParam.NAME, DataParam.class);
if (!hasData) { if (!hasData) {
response = Response.temporaryRedirect( response = Response.temporaryRedirect(
@ -451,7 +401,7 @@ public class HttpFSServer {
} else { } else {
FSOperations.FSAppend command = FSOperations.FSAppend command =
new FSOperations.FSAppend(is, path); new FSOperations.FSAppend(is, path);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}]", path); AUDIT_LOG.info("[{}]", path);
response = Response.ok().type(MediaType.APPLICATION_JSON).build(); response = Response.ok().type(MediaType.APPLICATION_JSON).build();
} }
@ -463,7 +413,7 @@ public class HttpFSServer {
FSOperations.FSConcat command = FSOperations.FSConcat command =
new FSOperations.FSConcat(path, sources.split(",")); new FSOperations.FSConcat(path, sources.split(","));
fsExecute(user, null, command); fsExecute(user, command);
AUDIT_LOG.info("[{}]", path); AUDIT_LOG.info("[{}]", path);
System.out.println("SENT RESPONSE"); System.out.println("SENT RESPONSE");
response = Response.ok().build(); response = Response.ok().build();
@ -498,7 +448,6 @@ public class HttpFSServer {
* Binding to handle PUT requests. * Binding to handle PUT requests.
* *
* @param is the inputstream for the request payload. * @param is the inputstream for the request payload.
* @param user the principal of the user making the request.
* @param uriInfo the of the request. * @param uriInfo the of the request.
* @param path the path for operation. * @param path the path for operation.
* @param op the HttpFS operation of the request. * @param op the HttpFS operation of the request.
@ -517,16 +466,15 @@ public class HttpFSServer {
@Consumes({"*/*"}) @Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON}) @Produces({MediaType.APPLICATION_JSON})
public Response put(InputStream is, public Response put(InputStream is,
@Context Principal user,
@Context UriInfo uriInfo, @Context UriInfo uriInfo,
@PathParam("path") String path, @PathParam("path") String path,
@QueryParam(OperationParam.NAME) OperationParam op, @QueryParam(OperationParam.NAME) OperationParam op,
@Context Parameters params) @Context Parameters params)
throws IOException, FileSystemAccessException { throws IOException, FileSystemAccessException {
UserGroupInformation user = HttpUserGroupInformation.get();
Response response; Response response;
path = makeAbsolute(path); path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) { switch (op.value()) {
case CREATE: { case CREATE: {
Boolean hasData = params.get(DataParam.NAME, DataParam.class); Boolean hasData = params.get(DataParam.NAME, DataParam.class);
@ -546,7 +494,7 @@ public class HttpFSServer {
FSOperations.FSCreate command = FSOperations.FSCreate command =
new FSOperations.FSCreate(is, path, permission, override, new FSOperations.FSCreate(is, path, permission, override,
replication, blockSize); replication, blockSize);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info( AUDIT_LOG.info(
"[{}] permission [{}] override [{}] replication [{}] blockSize [{}]", "[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
new Object[]{path, permission, override, replication, blockSize}); new Object[]{path, permission, override, replication, blockSize});
@ -564,7 +512,7 @@ public class HttpFSServer {
FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr( FSOperations.FSSetXAttr command = new FSOperations.FSSetXAttr(
path, xattrName, xattrValue, flag); path, xattrName, xattrValue, flag);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName); AUDIT_LOG.info("[{}] to xAttr [{}]", path, xattrName);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -573,7 +521,7 @@ public class HttpFSServer {
String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class); String xattrName = params.get(XAttrNameParam.NAME, XAttrNameParam.class);
FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr( FSOperations.FSRemoveXAttr command = new FSOperations.FSRemoveXAttr(
path, xattrName); path, xattrName);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName); AUDIT_LOG.info("[{}] removed xAttr [{}]", path, xattrName);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -583,7 +531,7 @@ public class HttpFSServer {
PermissionParam.class); PermissionParam.class);
FSOperations.FSMkdirs command = FSOperations.FSMkdirs command =
new FSOperations.FSMkdirs(path, permission); new FSOperations.FSMkdirs(path, permission);
JSONObject json = fsExecute(user, doAs, command); JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}] permission [{}]", path, permission); AUDIT_LOG.info("[{}] permission [{}]", path, permission);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -592,7 +540,7 @@ public class HttpFSServer {
String toPath = params.get(DestinationParam.NAME, DestinationParam.class); String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
FSOperations.FSRename command = FSOperations.FSRename command =
new FSOperations.FSRename(path, toPath); new FSOperations.FSRename(path, toPath);
JSONObject json = fsExecute(user, doAs, command); JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}] to [{}]", path, toPath); AUDIT_LOG.info("[{}] to [{}]", path, toPath);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
@ -602,7 +550,7 @@ public class HttpFSServer {
String group = params.get(GroupParam.NAME, GroupParam.class); String group = params.get(GroupParam.NAME, GroupParam.class);
FSOperations.FSSetOwner command = FSOperations.FSSetOwner command =
new FSOperations.FSSetOwner(path, owner, group); new FSOperations.FSSetOwner(path, owner, group);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group); AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -612,7 +560,7 @@ public class HttpFSServer {
PermissionParam.class); PermissionParam.class);
FSOperations.FSSetPermission command = FSOperations.FSSetPermission command =
new FSOperations.FSSetPermission(path, permission); new FSOperations.FSSetPermission(path, permission);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] to [{}]", path, permission); AUDIT_LOG.info("[{}] to [{}]", path, permission);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -622,7 +570,7 @@ public class HttpFSServer {
ReplicationParam.class); ReplicationParam.class);
FSOperations.FSSetReplication command = FSOperations.FSSetReplication command =
new FSOperations.FSSetReplication(path, replication); new FSOperations.FSSetReplication(path, replication);
JSONObject json = fsExecute(user, doAs, command); JSONObject json = fsExecute(user, command);
AUDIT_LOG.info("[{}] to [{}]", path, replication); AUDIT_LOG.info("[{}] to [{}]", path, replication);
response = Response.ok(json).build(); response = Response.ok(json).build();
break; break;
@ -634,7 +582,7 @@ public class HttpFSServer {
AccessTimeParam.class); AccessTimeParam.class);
FSOperations.FSSetTimes command = FSOperations.FSSetTimes command =
new FSOperations.FSSetTimes(path, modifiedTime, accessTime); new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] to (M/A)[{}]", path, AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
modifiedTime + ":" + accessTime); modifiedTime + ":" + accessTime);
response = Response.ok().build(); response = Response.ok().build();
@ -645,7 +593,7 @@ public class HttpFSServer {
AclPermissionParam.class); AclPermissionParam.class);
FSOperations.FSSetAcl command = FSOperations.FSSetAcl command =
new FSOperations.FSSetAcl(path, aclSpec); new FSOperations.FSSetAcl(path, aclSpec);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec); AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -653,7 +601,7 @@ public class HttpFSServer {
case REMOVEACL: { case REMOVEACL: {
FSOperations.FSRemoveAcl command = FSOperations.FSRemoveAcl command =
new FSOperations.FSRemoveAcl(path); new FSOperations.FSRemoveAcl(path);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] removed acl", path); AUDIT_LOG.info("[{}] removed acl", path);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -663,7 +611,7 @@ public class HttpFSServer {
AclPermissionParam.class); AclPermissionParam.class);
FSOperations.FSModifyAclEntries command = FSOperations.FSModifyAclEntries command =
new FSOperations.FSModifyAclEntries(path, aclSpec); new FSOperations.FSModifyAclEntries(path, aclSpec);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec); AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -673,7 +621,7 @@ public class HttpFSServer {
AclPermissionParam.class); AclPermissionParam.class);
FSOperations.FSRemoveAclEntries command = FSOperations.FSRemoveAclEntries command =
new FSOperations.FSRemoveAclEntries(path, aclSpec); new FSOperations.FSRemoveAclEntries(path, aclSpec);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec); AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
response = Response.ok().build(); response = Response.ok().build();
break; break;
@ -681,7 +629,7 @@ public class HttpFSServer {
case REMOVEDEFAULTACL: { case REMOVEDEFAULTACL: {
FSOperations.FSRemoveDefaultAcl command = FSOperations.FSRemoveDefaultAcl command =
new FSOperations.FSRemoveDefaultAcl(path); new FSOperations.FSRemoveDefaultAcl(path);
fsExecute(user, doAs, command); fsExecute(user, command);
AUDIT_LOG.info("[{}] remove default acl", path); AUDIT_LOG.info("[{}] remove default acl", path);
response = Response.ok().build(); response = Response.ok().build();
break; break;

View File

@ -24,7 +24,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.lib.server.ServerException; import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.service.FileSystemAccess; import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.servlet.ServerWebApp; import org.apache.hadoop.lib.servlet.ServerWebApp;
import org.apache.hadoop.lib.wsrs.UserProvider;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
@ -103,9 +102,6 @@ public class HttpFSServerWebApp extends ServerWebApp {
LOG.info("Connects to Namenode [{}]", LOG.info("Connects to Namenode [{}]",
get().get(FileSystemAccess.class).getFileSystemConfiguration(). get().get(FileSystemAccess.class).getFileSystemConfiguration().
get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY)); get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY));
String userPattern = getConfig().get(UserProvider.USER_PATTERN_KEY,
UserProvider.USER_PATTERN_DEFAULT);
UserProvider.setUserPattern(userPattern);
} }
/** /**

View File

@ -1,179 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.ProxyUser;
import org.apache.hadoop.lib.util.Check;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.InetAddress;
import java.security.AccessControlException;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
@InterfaceAudience.Private
public class ProxyUserService extends BaseService implements ProxyUser {
private static Logger LOG = LoggerFactory.getLogger(ProxyUserService.class);
@InterfaceAudience.Private
public static enum ERROR implements XException.ERROR {
PRXU01("Could not normalize host name [{0}], {1}"),
PRXU02("Missing [{0}] property");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
private static final String PREFIX = "proxyuser";
private static final String GROUPS = ".groups";
private static final String HOSTS = ".hosts";
private Map<String, Set<String>> proxyUserHosts = new HashMap<String, Set<String>>();
private Map<String, Set<String>> proxyUserGroups = new HashMap<String, Set<String>>();
public ProxyUserService() {
super(PREFIX);
}
@Override
public Class getInterface() {
return ProxyUser.class;
}
@Override
public Class[] getServiceDependencies() {
return new Class[]{Groups.class};
}
@Override
protected void init() throws ServiceException {
for (Map.Entry<String, String> entry : getServiceConfig()) {
String key = entry.getKey();
if (key.endsWith(GROUPS)) {
String proxyUser = key.substring(0, key.lastIndexOf(GROUPS));
if (getServiceConfig().get(proxyUser + HOSTS) == null) {
throw new ServiceException(ERROR.PRXU02, getPrefixedName(proxyUser + HOSTS));
}
String value = entry.getValue().trim();
LOG.info("Loading proxyuser settings [{}]=[{}]", key, value);
Set<String> values = null;
if (!value.equals("*")) {
values = new HashSet<String>(Arrays.asList(value.split(",")));
}
proxyUserGroups.put(proxyUser, values);
}
if (key.endsWith(HOSTS)) {
String proxyUser = key.substring(0, key.lastIndexOf(HOSTS));
if (getServiceConfig().get(proxyUser + GROUPS) == null) {
throw new ServiceException(ERROR.PRXU02, getPrefixedName(proxyUser + GROUPS));
}
String value = entry.getValue().trim();
LOG.info("Loading proxyuser settings [{}]=[{}]", key, value);
Set<String> values = null;
if (!value.equals("*")) {
String[] hosts = value.split(",");
for (int i = 0; i < hosts.length; i++) {
String originalName = hosts[i];
try {
hosts[i] = normalizeHostname(originalName);
} catch (Exception ex) {
throw new ServiceException(ERROR.PRXU01, originalName, ex.getMessage(), ex);
}
LOG.info(" Hostname, original [{}], normalized [{}]", originalName, hosts[i]);
}
values = new HashSet<String>(Arrays.asList(hosts));
}
proxyUserHosts.put(proxyUser, values);
}
}
}
@Override
public void validate(String proxyUser, String proxyHost, String doAsUser) throws IOException,
AccessControlException {
Check.notEmpty(proxyUser, "proxyUser");
Check.notEmpty(proxyHost, "proxyHost");
Check.notEmpty(doAsUser, "doAsUser");
LOG.debug("Authorization check proxyuser [{}] host [{}] doAs [{}]",
new Object[]{proxyUser, proxyHost, doAsUser});
if (proxyUserHosts.containsKey(proxyUser)) {
proxyHost = normalizeHostname(proxyHost);
validateRequestorHost(proxyUser, proxyHost, proxyUserHosts.get(proxyUser));
validateGroup(proxyUser, doAsUser, proxyUserGroups.get(proxyUser));
} else {
throw new AccessControlException(MessageFormat.format("User [{0}] not defined as proxyuser", proxyUser));
}
}
private void validateRequestorHost(String proxyUser, String hostname, Set<String> validHosts)
throws IOException, AccessControlException {
if (validHosts != null) {
if (!validHosts.contains(hostname) && !validHosts.contains(normalizeHostname(hostname))) {
throw new AccessControlException(MessageFormat.format("Unauthorized host [{0}] for proxyuser [{1}]",
hostname, proxyUser));
}
}
}
private void validateGroup(String proxyUser, String user, Set<String> validGroups) throws IOException,
AccessControlException {
if (validGroups != null) {
List<String> userGroups = getServer().get(Groups.class).getGroups(user);
for (String g : validGroups) {
if (userGroups.contains(g)) {
return;
}
}
throw new AccessControlException(
MessageFormat.format("Unauthorized proxyuser [{0}] for user [{1}], not in proxyuser groups",
proxyUser, user));
}
}
private String normalizeHostname(String name) {
try {
InetAddress address = InetAddress.getByName(name);
return address.getCanonicalHostName();
} catch (IOException ex) {
throw new AccessControlException(MessageFormat.format("Could not resolve host [{0}], {1}", name,
ex.getMessage()));
}
}
}

View File

@ -1,109 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.core.spi.component.ComponentContext;
import com.sun.jersey.core.spi.component.ComponentScope;
import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
import com.sun.jersey.spi.inject.Injectable;
import com.sun.jersey.spi.inject.InjectableProvider;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.MDC;
import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
import java.lang.reflect.Type;
import java.security.Principal;
import java.text.MessageFormat;
import java.util.regex.Pattern;
@Provider
@InterfaceAudience.Private
public class UserProvider extends AbstractHttpContextInjectable<Principal> implements
InjectableProvider<Context, Type> {
public static final String USER_NAME_PARAM = "user.name";
public static final String USER_PATTERN_KEY
= "httpfs.user.provider.user.pattern";
public static final String USER_PATTERN_DEFAULT
= "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
private static Pattern userPattern = Pattern.compile(USER_PATTERN_DEFAULT);
public static void setUserPattern(String pattern) {
userPattern = Pattern.compile(pattern);
}
public static Pattern getUserPattern() {
return userPattern;
}
static class UserParam extends StringParam {
public UserParam(String user) {
super(USER_NAME_PARAM, user, getUserPattern());
}
@Override
public String parseParam(String str) {
if (str != null) {
int len = str.length();
if (len < 1) {
throw new IllegalArgumentException(MessageFormat.format(
"Parameter [{0}], it's length must be at least 1", getName()));
}
}
return super.parseParam(str);
}
}
@Override
public Principal getValue(HttpContext httpContext) {
Principal principal = httpContext.getRequest().getUserPrincipal();
if (principal == null) {
final String user = httpContext.getRequest().getQueryParameters().getFirst(USER_NAME_PARAM);
if (user != null) {
principal = new Principal() {
@Override
public String getName() {
return new UserParam(user).value();
}
};
}
}
if (principal != null) {
MDC.put("user", principal.getName());
}
return principal;
}
@Override
public ComponentScope getScope() {
return ComponentScope.PerRequest;
}
@Override
public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) {
return (type.equals(Principal.class)) ? this : null;
}
}

View File

@ -34,7 +34,6 @@
org.apache.hadoop.lib.service.instrumentation.InstrumentationService, org.apache.hadoop.lib.service.instrumentation.InstrumentationService,
org.apache.hadoop.lib.service.scheduler.SchedulerService, org.apache.hadoop.lib.service.scheduler.SchedulerService,
org.apache.hadoop.lib.service.security.GroupsService, org.apache.hadoop.lib.service.security.GroupsService,
org.apache.hadoop.lib.service.security.ProxyUserService,
org.apache.hadoop.lib.service.hadoop.FileSystemAccessService org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
</value> </value>
<description> <description>
@ -118,6 +117,10 @@
</property> </property>
<!-- HttpFSServer proxy user Configuration --> <!-- HttpFSServer proxy user Configuration -->
<!--
The following 2 properties within this comment are provided as an
example to facilitate configuring HttpFS proxyusers.
<property> <property>
<name>httpfs.proxyuser.#USER#.hosts</name> <name>httpfs.proxyuser.#USER#.hosts</name>
@ -152,6 +155,7 @@
in the property name. in the property name.
</description> </description>
</property> </property>
-->
<!-- HttpFS Delegation Token configuration --> <!-- HttpFS Delegation Token configuration -->

View File

@ -1,226 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import static org.junit.Assert.assertNotNull;
import java.security.AccessControlException;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.service.ProxyUser;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
public class TestProxyUserService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
server.destroy();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "PRXU02.*")
@TestDir
public void wrongConfigGroups() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "PRXU01.*")
@TestDir
public void wrongHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "otherhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestException(exception = ServiceException.class, msgRegExp = "PRXU02.*")
@TestDir
public void wrongConfigHosts() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
}
@Test
@TestDir
public void validateAnyHostAnyUser() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", "bar");
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void invalidProxyUser() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("bar", "localhost", "foo");
server.destroy();
}
@Test
@TestDir
public void validateHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", "bar");
server.destroy();
}
private String getGroup() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName())));
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
Groups groups = server.get(Groups.class);
List<String> g = groups.getGroups(System.getProperty("user.name"));
server.destroy();
return g.get(0);
}
@Test
@TestDir
public void validateGroup() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "*");
conf.set("server.proxyuser.foo.groups", getGroup());
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", System.getProperty("user.name"));
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void unknownHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("foo", "unknownhost.bar.foo", "bar");
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void invalidHost() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "*");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("foo", "www.yahoo.com", "bar");
server.destroy();
}
@Test(expected = AccessControlException.class)
@TestDir
public void invalidGroup() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",", Arrays.asList(GroupsService.class.getName(),
ProxyUserService.class.getName())));
conf.set("server.proxyuser.foo.hosts", "localhost");
conf.set("server.proxyuser.foo.groups", "nobody");
Server server = new Server("server", dir, dir, dir, dir, conf);
server.init();
ProxyUser proxyUser = server.get(ProxyUser.class);
assertNotNull(proxyUser);
proxyUser.validate("foo", "localhost", System.getProperty("user.name"));
server.destroy();
}
}

View File

@ -1,142 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.wsrs;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import java.security.Principal;
import javax.ws.rs.core.MultivaluedMap;
import org.apache.hadoop.test.TestException;
import org.apache.hadoop.test.TestExceptionHelper;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.MethodRule;
import org.mockito.Mockito;
import org.slf4j.MDC;
import com.sun.jersey.api.core.HttpContext;
import com.sun.jersey.api.core.HttpRequestContext;
import com.sun.jersey.core.spi.component.ComponentScope;
public class TestUserProvider {
@Rule
public MethodRule exceptionHelper = new TestExceptionHelper();
@Test
@SuppressWarnings("unchecked")
public void noUser() {
MDC.remove("user");
HttpRequestContext request = Mockito.mock(HttpRequestContext.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
MultivaluedMap map = Mockito.mock(MultivaluedMap.class);
Mockito.when(map.getFirst(UserProvider.USER_NAME_PARAM)).thenReturn(null);
Mockito.when(request.getQueryParameters()).thenReturn(map);
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
assertNull(up.getValue(context));
assertNull(MDC.get("user"));
}
@Test
@SuppressWarnings("unchecked")
public void queryStringUser() {
MDC.remove("user");
HttpRequestContext request = Mockito.mock(HttpRequestContext.class);
Mockito.when(request.getUserPrincipal()).thenReturn(null);
MultivaluedMap map = Mockito.mock(MultivaluedMap.class);
Mockito.when(map.getFirst(UserProvider.USER_NAME_PARAM)).thenReturn("foo");
Mockito.when(request.getQueryParameters()).thenReturn(map);
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
assertEquals(up.getValue(context).getName(), "foo");
assertEquals(MDC.get("user"), "foo");
}
@Test
@SuppressWarnings("unchecked")
public void principalUser() {
MDC.remove("user");
HttpRequestContext request = Mockito.mock(HttpRequestContext.class);
Mockito.when(request.getUserPrincipal()).thenReturn(new Principal() {
@Override
public String getName() {
return "bar";
}
});
HttpContext context = Mockito.mock(HttpContext.class);
Mockito.when(context.getRequest()).thenReturn(request);
UserProvider up = new UserProvider();
assertEquals(up.getValue(context).getName(), "bar");
assertEquals(MDC.get("user"), "bar");
}
@Test
public void getters() {
UserProvider up = new UserProvider();
assertEquals(up.getScope(), ComponentScope.PerRequest);
assertEquals(up.getInjectable(null, null, Principal.class), up);
assertNull(up.getInjectable(null, null, String.class));
}
@Test
@TestException(exception = IllegalArgumentException.class)
public void userNameEmpty() {
new UserProvider.UserParam("");
}
@Test
@TestException(exception = IllegalArgumentException.class)
public void userNameInvalidStart() {
new UserProvider.UserParam("1x");
}
@Test
@TestException(exception = IllegalArgumentException.class)
public void userNameInvalidDollarSign() {
new UserProvider.UserParam("1$x");
}
@Test
public void userNameMinLength() {
new UserProvider.UserParam("a");
}
@Test
public void userNameValidDollarSign() {
new UserProvider.UserParam("a$");
}
@Test
public void customUserPattern() {
try {
UserProvider.setUserPattern("1");
new UserProvider.UserParam("1");
} finally {
UserProvider.setUserPattern(UserProvider.USER_PATTERN_DEFAULT);
}
}
}

View File

@ -30,6 +30,7 @@ import java.util.concurrent.ConcurrentNavigableMap;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration; import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx; import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
@ -407,4 +408,80 @@ public class TestWrites {
} }
} }
} }
@Test
public void testOOOWrites() throws IOException, InterruptedException {
NfsConfiguration config = new NfsConfiguration();
MiniDFSCluster cluster = null;
RpcProgramNfs3 nfsd;
final int bufSize = 32;
final int numOOO = 3;
SecurityHandler securityHandler = Mockito.mock(SecurityHandler.class);
Mockito.when(securityHandler.getUser()).thenReturn(
System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(currentUser),
"*");
config.set(
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(currentUser),
"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
// Use emphral port in case tests are running in parallel
config.setInt("nfs3.mountd.port", 0);
config.setInt("nfs3.server.port", 0);
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();
Nfs3 nfs3 = new Nfs3(config);
nfs3.startServiceInternal(false);
nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
HdfsFileStatus status = dfsClient.getFileInfo("/");
FileHandle rootHandle = new FileHandle(status.getFileId());
CREATE3Request createReq = new CREATE3Request(rootHandle,
"out-of-order-write" + System.currentTimeMillis(),
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
XDR createXdr = new XDR();
createReq.serialize(createXdr);
CREATE3Response createRsp = nfsd.create(createXdr.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
FileHandle handle = createRsp.getObjHandle();
byte[][] oooBuf = new byte[numOOO][bufSize];
for (int i = 0; i < numOOO; i++) {
Arrays.fill(oooBuf[i], (byte) i);
}
for (int i = 0; i < numOOO; i++) {
final long offset = (numOOO - 1 - i) * bufSize;
WRITE3Request writeReq = new WRITE3Request(handle, offset, bufSize,
WriteStableHow.UNSTABLE, ByteBuffer.wrap(oooBuf[i]));
XDR writeXdr = new XDR();
writeReq.serialize(writeXdr);
nfsd.write(writeXdr.asReadOnlyWrap(), null, 1, securityHandler,
new InetSocketAddress("localhost", 1234));
}
waitWrite(nfsd, handle, 60000);
READ3Request readReq = new READ3Request(handle, bufSize, bufSize);
XDR readXdr = new XDR();
readReq.serialize(readXdr);
READ3Response readRsp = nfsd.read(readXdr.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", config.getInt(
NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY,
NfsConfigKeys.DFS_NFS_SERVER_PORT_DEFAULT)));
assertTrue(Arrays.equals(oooBuf[1], readRsp.getData().array()));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }

View File

@ -137,9 +137,6 @@ Trunk (Unreleased)
BUG FIXES BUG FIXES
HDFS-6517. Remove hadoop-metrics2.properties from hdfs project (Akira
AJISAKA via aw)
HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
via cmccabe) via cmccabe)
@ -393,6 +390,18 @@ Release 2.6.0 - UNRELEASED
HDFS-6838. Code cleanup for unnecessary INode replacement. HDFS-6838. Code cleanup for unnecessary INode replacement.
(Jing Zhao via wheat9) (Jing Zhao via wheat9)
HDFS-6836. HDFS INFO logging is verbose & uses file appenders. (Xiaoyu
Yao via Arpit Agarwal)
HDFS-6567. Normalize the order of public final in HdfsFileStatus.
(Tassapol Athiapinya via wheat9)
HDFS-6849. Replace HttpFS custom proxyuser handling with common
implementation. (tucu)
HDFS-6850. Move NFS out of order write unit tests into TestWrites class.
(Zhe Zhang via atm)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)
@ -402,6 +411,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
insecure HDFS (Allen Wittenauer via raviprak) insecure HDFS (Allen Wittenauer via raviprak)
HDFS-6517. Remove hadoop-metrics2.properties from hdfs project (Akira
AJISAKA via aw)
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
due to a long edit log sync op. (Liang Xie via cnauroth) due to a long edit log sync op. (Liang Xie via cnauroth)
@ -491,6 +503,17 @@ Release 2.6.0 - UNRELEASED
HDFS-6582. Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler) HDFS-6582. Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler)
(Abhiraj Butala via brandonli) (Abhiraj Butala via brandonli)
HDFS-6830. BlockInfo.addStorage fails when DN changes the storage for a
block replica (Arpit Agarwal)
HDFS-6247. Avoid timeouts for replaceBlock() call by sending intermediate
responses to Balancer (vinayakumarb)
HDFS-6783. Fix HDFS CacheReplicationMonitor rescan logic. (Yi Liu and Colin Patrick McCabe via umamahesh)
HDFS-6825. Edit log corruption due to delayed block removal.
(Yongjun Zhang via wang)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -96,7 +96,7 @@ public class HdfsFileStatus {
* Get the length of this file, in bytes. * Get the length of this file, in bytes.
* @return the length of this file, in bytes. * @return the length of this file, in bytes.
*/ */
final public long getLen() { public final long getLen() {
return length; return length;
} }
@ -104,7 +104,7 @@ public class HdfsFileStatus {
* Is this a directory? * Is this a directory?
* @return true if this is a directory * @return true if this is a directory
*/ */
final public boolean isDir() { public final boolean isDir() {
return isdir; return isdir;
} }
@ -120,7 +120,7 @@ public class HdfsFileStatus {
* Get the block size of the file. * Get the block size of the file.
* @return the number of bytes * @return the number of bytes
*/ */
final public long getBlockSize() { public final long getBlockSize() {
return blocksize; return blocksize;
} }
@ -128,7 +128,7 @@ public class HdfsFileStatus {
* Get the replication factor of a file. * Get the replication factor of a file.
* @return the replication factor of a file. * @return the replication factor of a file.
*/ */
final public short getReplication() { public final short getReplication() {
return block_replication; return block_replication;
} }
@ -136,7 +136,7 @@ public class HdfsFileStatus {
* Get the modification time of the file. * Get the modification time of the file.
* @return the modification time of file in milliseconds since January 1, 1970 UTC. * @return the modification time of file in milliseconds since January 1, 1970 UTC.
*/ */
final public long getModificationTime() { public final long getModificationTime() {
return modification_time; return modification_time;
} }
@ -144,7 +144,7 @@ public class HdfsFileStatus {
* Get the access time of the file. * Get the access time of the file.
* @return the access time of file in milliseconds since January 1, 1970 UTC. * @return the access time of file in milliseconds since January 1, 1970 UTC.
*/ */
final public long getAccessTime() { public final long getAccessTime() {
return access_time; return access_time;
} }
@ -152,7 +152,7 @@ public class HdfsFileStatus {
* Get FsPermission associated with the file. * Get FsPermission associated with the file.
* @return permssion * @return permssion
*/ */
final public FsPermission getPermission() { public final FsPermission getPermission() {
return permission; return permission;
} }
@ -160,7 +160,7 @@ public class HdfsFileStatus {
* Get the owner of the file. * Get the owner of the file.
* @return owner of the file * @return owner of the file
*/ */
final public String getOwner() { public final String getOwner() {
return owner; return owner;
} }
@ -168,7 +168,7 @@ public class HdfsFileStatus {
* Get the group associated with the file. * Get the group associated with the file.
* @return group for the file. * @return group for the file.
*/ */
final public String getGroup() { public final String getGroup() {
return group; return group;
} }
@ -176,7 +176,7 @@ public class HdfsFileStatus {
* Check if the local name is empty * Check if the local name is empty
* @return true if the name is empty * @return true if the name is empty
*/ */
final public boolean isEmptyLocalName() { public final boolean isEmptyLocalName() {
return path.length == 0; return path.length == 0;
} }
@ -184,7 +184,7 @@ public class HdfsFileStatus {
* Get the string representation of the local name * Get the string representation of the local name
* @return the local name in string * @return the local name in string
*/ */
final public String getLocalName() { public final String getLocalName() {
return DFSUtil.bytes2String(path); return DFSUtil.bytes2String(path);
} }
@ -192,7 +192,7 @@ public class HdfsFileStatus {
* Get the Java UTF8 representation of the local name * Get the Java UTF8 representation of the local name
* @return the local name in java UTF8 * @return the local name in java UTF8
*/ */
final public byte[] getLocalNameInBytes() { public final byte[] getLocalNameInBytes() {
return path; return path;
} }
@ -201,7 +201,7 @@ public class HdfsFileStatus {
* @param parent the parent path * @param parent the parent path
* @return the full path in string * @return the full path in string
*/ */
final public String getFullName(final String parent) { public final String getFullName(final String parent) {
if (isEmptyLocalName()) { if (isEmptyLocalName()) {
return parent; return parent;
} }
@ -219,7 +219,7 @@ public class HdfsFileStatus {
* @param parent the parent path * @param parent the parent path
* @return the full path * @return the full path
*/ */
final public Path getFullPath(final Path parent) { public final Path getFullPath(final Path parent) {
if (isEmptyLocalName()) { if (isEmptyLocalName()) {
return parent; return parent;
} }
@ -231,27 +231,27 @@ public class HdfsFileStatus {
* Get the string representation of the symlink. * Get the string representation of the symlink.
* @return the symlink as a string. * @return the symlink as a string.
*/ */
final public String getSymlink() { public final String getSymlink() {
return DFSUtil.bytes2String(symlink); return DFSUtil.bytes2String(symlink);
} }
final public byte[] getSymlinkInBytes() { public final byte[] getSymlinkInBytes() {
return symlink; return symlink;
} }
final public long getFileId() { public final long getFileId() {
return fileId; return fileId;
} }
final public FileEncryptionInfo getFileEncryptionInfo() { public final FileEncryptionInfo getFileEncryptionInfo() {
return feInfo; return feInfo;
} }
final public int getChildrenNum() { public final int getChildrenNum() {
return childrenNum; return childrenNum;
} }
final public FileStatus makeQualified(URI defaultUri, Path path) { public final FileStatus makeQualified(URI defaultUri, Path path) {
return new FileStatus(getLen(), isDir(), getReplication(), return new FileStatus(getLen(), isDir(), getReplication(),
getBlockSize(), getModificationTime(), getBlockSize(), getModificationTime(),
getAccessTime(), getAccessTime(),

View File

@ -69,7 +69,7 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
return locations; return locations;
} }
final public LocatedFileStatus makeQualifiedLocated(URI defaultUri, public final LocatedFileStatus makeQualifiedLocated(URI defaultUri,
Path path) { Path path) {
return new LocatedFileStatus(getLen(), isDir(), getReplication(), return new LocatedFileStatus(getLen(), isDir(), getReplication(),
getBlockSize(), getModificationTime(), getBlockSize(), getModificationTime(),

View File

@ -87,8 +87,6 @@ public class Dispatcher {
private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5; private static final int MAX_NO_PENDING_MOVE_ITERATIONS = 5;
private static final long DELAY_AFTER_ERROR = 10 * 1000L; // 10 seconds private static final long DELAY_AFTER_ERROR = 10 * 1000L; // 10 seconds
private static final int BLOCK_MOVE_READ_TIMEOUT = 20 * 60 * 1000; // 20
// minutes
private final NameNodeConnector nnc; private final NameNodeConnector nnc;
private final SaslDataTransferClient saslClient; private final SaslDataTransferClient saslClient;
@ -278,13 +276,6 @@ public class Dispatcher {
sock.connect( sock.connect(
NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()), NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
HdfsServerConstants.READ_TIMEOUT); HdfsServerConstants.READ_TIMEOUT);
/*
* Unfortunately we don't have a good way to know if the Datanode is
* taking a really long time to move a block, OR something has gone
* wrong and it's never going to finish. To deal with this scenario, we
* set a long timeout (20 minutes) to avoid hanging indefinitely.
*/
sock.setSoTimeout(BLOCK_MOVE_READ_TIMEOUT);
sock.setKeepAlive(true); sock.setKeepAlive(true);
@ -341,8 +332,12 @@ public class Dispatcher {
/** Receive a block copy response from the input stream */ /** Receive a block copy response from the input stream */
private void receiveResponse(DataInputStream in) throws IOException { private void receiveResponse(DataInputStream in) throws IOException {
BlockOpResponseProto response = BlockOpResponseProto BlockOpResponseProto response =
.parseFrom(vintPrefixed(in)); BlockOpResponseProto.parseFrom(vintPrefixed(in));
while (response.getStatus() == Status.IN_PROGRESS) {
// read intermediate responses
response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
}
if (response.getStatus() != Status.SUCCESS) { if (response.getStatus() != Status.SUCCESS) {
if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) { if (response.getStatus() == Status.ERROR_ACCESS_TOKEN) {
throw new IOException("block move failed due to access token error"); throw new IOException("block move failed due to access token error");

View File

@ -194,24 +194,12 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* Add a {@link DatanodeStorageInfo} location for a block * Add a {@link DatanodeStorageInfo} location for a block
*/ */
boolean addStorage(DatanodeStorageInfo storage) { boolean addStorage(DatanodeStorageInfo storage) {
boolean added = true;
int idx = findDatanode(storage.getDatanodeDescriptor());
if(idx >= 0) {
if (getStorageInfo(idx) == storage) { // the storage is already there
return false;
} else {
// The block is on the DN but belongs to a different storage.
// Update our state.
removeStorage(getStorageInfo(idx));
added = false; // Just updating storage. Return false.
}
}
// find the last null node // find the last null node
int lastNode = ensureCapacity(1); int lastNode = ensureCapacity(1);
setStorageInfo(lastNode, storage); setStorageInfo(lastNode, storage);
setNext(lastNode, null); setNext(lastNode, null);
setPrevious(lastNode, null); setPrevious(lastNode, null);
return added; return true;
} }
/** /**
@ -240,16 +228,18 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
* Find specified DatanodeDescriptor. * Find specified DatanodeDescriptor.
* @return index or -1 if not found. * @return index or -1 if not found.
*/ */
int findDatanode(DatanodeDescriptor dn) { boolean findDatanode(DatanodeDescriptor dn) {
int len = getCapacity(); int len = getCapacity();
for(int idx = 0; idx < len; idx++) { for(int idx = 0; idx < len; idx++) {
DatanodeDescriptor cur = getDatanode(idx); DatanodeDescriptor cur = getDatanode(idx);
if(cur == dn) if(cur == dn) {
return idx; return true;
if(cur == null) }
if(cur == null) {
break; break;
}
} }
return -1; return false;
} }
/** /**
* Find specified DatanodeStorageInfo. * Find specified DatanodeStorageInfo.

View File

@ -373,12 +373,14 @@ public class BlockInfoUnderConstruction extends BlockInfo {
sb.append("{blockUCState=").append(blockUCState) sb.append("{blockUCState=").append(blockUCState)
.append(", primaryNodeIndex=").append(primaryNodeIndex) .append(", primaryNodeIndex=").append(primaryNodeIndex)
.append(", replicas=["); .append(", replicas=[");
Iterator<ReplicaUnderConstruction> iter = replicas.iterator(); if (replicas != null) {
if (iter.hasNext()) { Iterator<ReplicaUnderConstruction> iter = replicas.iterator();
iter.next().appendStringTo(sb); if (iter.hasNext()) {
while (iter.hasNext()) {
sb.append(", ");
iter.next().appendStringTo(sb); iter.next().appendStringTo(sb);
while (iter.hasNext()) {
sb.append(", ");
iter.next().appendStringTo(sb);
}
} }
} }
sb.append("]}"); sb.append("]}");

View File

@ -2068,7 +2068,7 @@ public class BlockManager {
// Add replica if appropriate. If the replica was previously corrupt // Add replica if appropriate. If the replica was previously corrupt
// but now okay, it might need to be updated. // but now okay, it might need to be updated.
if (reportedState == ReplicaState.FINALIZED if (reportedState == ReplicaState.FINALIZED
&& (storedBlock.findDatanode(dn) < 0 && (!storedBlock.findDatanode(dn)
|| corruptReplicas.isReplicaCorrupt(storedBlock, dn))) { || corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
toAdd.add(storedBlock); toAdd.add(storedBlock);
} }
@ -2249,7 +2249,7 @@ public class BlockManager {
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState); storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
if (ucBlock.reportedState == ReplicaState.FINALIZED && if (ucBlock.reportedState == ReplicaState.FINALIZED &&
block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) { !block.findDatanode(storageInfo.getDatanodeDescriptor())) {
addStoredBlock(block, storageInfo, null, true); addStoredBlock(block, storageInfo, null, true);
} }
} }

View File

@ -103,22 +103,22 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
*/ */
private final Condition scanFinished; private final Condition scanFinished;
/**
* Whether there are pending CacheManager operations that necessitate a
* CacheReplicationMonitor rescan. Protected by the CRM lock.
*/
private boolean needsRescan = true;
/**
* Whether we are currently doing a rescan. Protected by the CRM lock.
*/
private boolean isScanning = false;
/** /**
* The number of rescans completed. Used to wait for scans to finish. * The number of rescans completed. Used to wait for scans to finish.
* Protected by the CacheReplicationMonitor lock. * Protected by the CacheReplicationMonitor lock.
*/ */
private long scanCount = 0; private long completedScanCount = 0;
/**
* The scan we're currently performing, or -1 if no scan is in progress.
* Protected by the CacheReplicationMonitor lock.
*/
private long curScanCount = -1;
/**
* The number of rescans we need to complete. Protected by the CRM lock.
*/
private long neededScanCount = 0;
/** /**
* True if this monitor should terminate. Protected by the CRM lock. * True if this monitor should terminate. Protected by the CRM lock.
@ -169,7 +169,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
LOG.info("Shutting down CacheReplicationMonitor"); LOG.info("Shutting down CacheReplicationMonitor");
return; return;
} }
if (needsRescan) { if (completedScanCount < neededScanCount) {
LOG.info("Rescanning because of pending operations"); LOG.info("Rescanning because of pending operations");
break; break;
} }
@ -182,8 +182,6 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
doRescan.await(delta, TimeUnit.MILLISECONDS); doRescan.await(delta, TimeUnit.MILLISECONDS);
curTimeMs = Time.monotonicNow(); curTimeMs = Time.monotonicNow();
} }
isScanning = true;
needsRescan = false;
} finally { } finally {
lock.unlock(); lock.unlock();
} }
@ -194,8 +192,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
// Update synchronization-related variables. // Update synchronization-related variables.
lock.lock(); lock.lock();
try { try {
isScanning = false; completedScanCount = curScanCount;
scanCount++; curScanCount = -1;
scanFinished.signalAll(); scanFinished.signalAll();
} finally { } finally {
lock.unlock(); lock.unlock();
@ -226,16 +224,15 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
"Must not hold the FSN write lock when waiting for a rescan."); "Must not hold the FSN write lock when waiting for a rescan.");
Preconditions.checkArgument(lock.isHeldByCurrentThread(), Preconditions.checkArgument(lock.isHeldByCurrentThread(),
"Must hold the CRM lock when waiting for a rescan."); "Must hold the CRM lock when waiting for a rescan.");
if (!needsRescan) { if (neededScanCount <= completedScanCount) {
return; return;
} }
// If no scan is already ongoing, mark the CRM as dirty and kick // If no scan is already ongoing, mark the CRM as dirty and kick
if (!isScanning) { if (curScanCount < 0) {
doRescan.signal(); doRescan.signal();
} }
// Wait until the scan finishes and the count advances // Wait until the scan finishes and the count advances
final long startCount = scanCount; while ((!shutdown) && (completedScanCount < neededScanCount)) {
while ((!shutdown) && (startCount >= scanCount)) {
try { try {
scanFinished.await(); scanFinished.await();
} catch (InterruptedException e) { } catch (InterruptedException e) {
@ -253,7 +250,14 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
public void setNeedsRescan() { public void setNeedsRescan() {
Preconditions.checkArgument(lock.isHeldByCurrentThread(), Preconditions.checkArgument(lock.isHeldByCurrentThread(),
"Must hold the CRM lock when setting the needsRescan bit."); "Must hold the CRM lock when setting the needsRescan bit.");
this.needsRescan = true; if (curScanCount >= 0) {
// If there is a scan in progress, we need to wait for the scan after
// that.
neededScanCount = curScanCount + 1;
} else {
// If there is no scan in progress, we need to wait for the next scan.
neededScanCount = completedScanCount + 1;
}
} }
/** /**
@ -282,12 +286,19 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
private void rescan() throws InterruptedException { private void rescan() throws InterruptedException {
scannedDirectives = 0; scannedDirectives = 0;
scannedBlocks = 0; scannedBlocks = 0;
namesystem.writeLock();
try { try {
if (shutdown) { namesystem.writeLock();
throw new InterruptedException("CacheReplicationMonitor was " + try {
"shut down."); lock.lock();
if (shutdown) {
throw new InterruptedException("CacheReplicationMonitor was " +
"shut down.");
}
curScanCount = completedScanCount + 1;
} finally {
lock.unlock();
} }
resetStatistics(); resetStatistics();
rescanCacheDirectives(); rescanCacheDirectives();
rescanCachedBlockMap(); rescanCachedBlockMap();

View File

@ -208,12 +208,28 @@ public class DatanodeStorageInfo {
} }
public boolean addBlock(BlockInfo b) { public boolean addBlock(BlockInfo b) {
if(!b.addStorage(this)) // First check whether the block belongs to a different storage
return false; // on the same DN.
boolean replaced = false;
DatanodeStorageInfo otherStorage =
b.findStorageInfo(getDatanodeDescriptor());
if (otherStorage != null) {
if (otherStorage != this) {
// The block belongs to a different storage. Remove it first.
otherStorage.removeBlock(b);
replaced = true;
} else {
// The block is already associated with this storage.
return false;
}
}
// add to the head of the data-node list // add to the head of the data-node list
b.addStorage(this);
blockList = b.listInsert(blockList, this); blockList = b.listInsert(blockList, this);
numBlocks++; numBlocks++;
return true; return !replaced;
} }
boolean removeBlock(BlockInfo b) { boolean removeBlock(BlockInfo b) {

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck; import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaInputStreams;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams; import org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams;
@ -123,6 +124,14 @@ class BlockReceiver implements Closeable {
private boolean syncOnClose; private boolean syncOnClose;
private long restartBudget; private long restartBudget;
/**
* for replaceBlock response
*/
private final long responseInterval;
private long lastResponseTime = 0;
private boolean isReplaceBlock = false;
private DataOutputStream replyOut = null;
BlockReceiver(final ExtendedBlock block, final StorageType storageType, BlockReceiver(final ExtendedBlock block, final StorageType storageType,
final DataInputStream in, final DataInputStream in,
final String inAddr, final String myAddr, final String inAddr, final String myAddr,
@ -144,6 +153,9 @@ class BlockReceiver implements Closeable {
this.isClient = !this.isDatanode; this.isClient = !this.isDatanode;
this.restartBudget = datanode.getDnConf().restartReplicaExpiry; this.restartBudget = datanode.getDnConf().restartReplicaExpiry;
this.datanodeSlowLogThresholdMs = datanode.getDnConf().datanodeSlowIoWarningThresholdMs; this.datanodeSlowLogThresholdMs = datanode.getDnConf().datanodeSlowIoWarningThresholdMs;
// For replaceBlock() calls response should be sent to avoid socketTimeout
// at clients. So sending with the interval of 0.5 * socketTimeout
this.responseInterval = (long) (datanode.getDnConf().socketTimeout * 0.5);
//for datanode, we have //for datanode, we have
//1: clientName.length() == 0, and //1: clientName.length() == 0, and
//2: stage == null or PIPELINE_SETUP_CREATE //2: stage == null or PIPELINE_SETUP_CREATE
@ -651,6 +663,20 @@ class BlockReceiver implements Closeable {
lastPacketInBlock, offsetInBlock, Status.SUCCESS); lastPacketInBlock, offsetInBlock, Status.SUCCESS);
} }
/*
* Send in-progress responses for the replaceBlock() calls back to caller to
* avoid timeouts due to balancer throttling. HDFS-6247
*/
if (isReplaceBlock
&& (Time.monotonicNow() - lastResponseTime > responseInterval)) {
BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
.setStatus(Status.IN_PROGRESS);
response.build().writeDelimitedTo(replyOut);
replyOut.flush();
lastResponseTime = Time.monotonicNow();
}
if (throttler != null) { // throttle I/O if (throttler != null) { // throttle I/O
throttler.throttle(len); throttler.throttle(len);
} }
@ -718,7 +744,8 @@ class BlockReceiver implements Closeable {
DataInputStream mirrIn, // input from next datanode DataInputStream mirrIn, // input from next datanode
DataOutputStream replyOut, // output to previous datanode DataOutputStream replyOut, // output to previous datanode
String mirrAddr, DataTransferThrottler throttlerArg, String mirrAddr, DataTransferThrottler throttlerArg,
DatanodeInfo[] downstreams) throws IOException { DatanodeInfo[] downstreams,
boolean isReplaceBlock) throws IOException {
syncOnClose = datanode.getDnConf().syncOnClose; syncOnClose = datanode.getDnConf().syncOnClose;
boolean responderClosed = false; boolean responderClosed = false;
@ -726,6 +753,9 @@ class BlockReceiver implements Closeable {
mirrorAddr = mirrAddr; mirrorAddr = mirrAddr;
throttler = throttlerArg; throttler = throttlerArg;
this.replyOut = replyOut;
this.isReplaceBlock = isReplaceBlock;
try { try {
if (isClient && !isTransfer) { if (isClient && !isTransfer) {
responder = new Daemon(datanode.threadGroup, responder = new Daemon(datanode.threadGroup,

View File

@ -687,7 +687,7 @@ class BlockSender implements java.io.Closeable {
// Trigger readahead of beginning of file if configured. // Trigger readahead of beginning of file if configured.
manageOsCache(); manageOsCache();
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0; final long startTime = ClientTraceLog.isDebugEnabled() ? System.nanoTime() : 0;
try { try {
int maxChunksPerPacket; int maxChunksPerPacket;
int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN; int pktBufSize = PacketHeader.PKT_MAX_HEADER_LEN;
@ -733,9 +733,9 @@ class BlockSender implements java.io.Closeable {
sentEntireByteRange = true; sentEntireByteRange = true;
} }
} finally { } finally {
if (clientTraceFmt != null) { if ((clientTraceFmt != null) && ClientTraceLog.isDebugEnabled()) {
final long endTime = System.nanoTime(); final long endTime = System.nanoTime();
ClientTraceLog.info(String.format(clientTraceFmt, totalRead, ClientTraceLog.debug(String.format(clientTraceFmt, totalRead,
initialOffset, endTime - startTime)); initialOffset, endTime - startTime));
} }
close(); close();

View File

@ -708,7 +708,7 @@ class DataXceiver extends Receiver implements Runnable {
if (blockReceiver != null) { if (blockReceiver != null) {
String mirrorAddr = (mirrorSock == null) ? null : mirrorNode; String mirrorAddr = (mirrorSock == null) ? null : mirrorNode;
blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut,
mirrorAddr, null, targets); mirrorAddr, null, targets, false);
// send close-ack for transfer-RBW/Finalized // send close-ack for transfer-RBW/Finalized
if (isTransfer) { if (isTransfer) {
@ -983,7 +983,7 @@ class DataXceiver extends Receiver implements Runnable {
String errMsg = null; String errMsg = null;
BlockReceiver blockReceiver = null; BlockReceiver blockReceiver = null;
DataInputStream proxyReply = null; DataInputStream proxyReply = null;
DataOutputStream replyOut = new DataOutputStream(getOutputStream());
try { try {
// get the output stream to the proxy // get the output stream to the proxy
final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname); final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
@ -1040,8 +1040,8 @@ class DataXceiver extends Receiver implements Runnable {
CachingStrategy.newDropBehind()); CachingStrategy.newDropBehind());
// receive a block // receive a block
blockReceiver.receiveBlock(null, null, null, null, blockReceiver.receiveBlock(null, null, replyOut, null,
dataXceiverServer.balanceThrottler, null); dataXceiverServer.balanceThrottler, null, true);
// notify name node // notify name node
datanode.notifyNamenodeReceivedBlock( datanode.notifyNamenodeReceivedBlock(
@ -1076,6 +1076,7 @@ class DataXceiver extends Receiver implements Runnable {
IOUtils.closeStream(proxyOut); IOUtils.closeStream(proxyOut);
IOUtils.closeStream(blockReceiver); IOUtils.closeStream(blockReceiver);
IOUtils.closeStream(proxyReply); IOUtils.closeStream(proxyReply);
IOUtils.closeStream(replyOut);
} }
//update metrics //update metrics

View File

@ -4530,7 +4530,30 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throw new IOException("Block (=" + lastblock + ") not found"); throw new IOException("Block (=" + lastblock + ") not found");
} }
} }
INodeFile iFile = ((INode)storedBlock.getBlockCollection()).asFile(); //
// The implementation of delete operation (see @deleteInternal method)
// first removes the file paths from namespace, and delays the removal
// of blocks to later time for better performance. When
// commitBlockSynchronization (this method) is called in between, the
// blockCollection of storedBlock could have been assigned to null by
// the delete operation, throw IOException here instead of NPE; if the
// file path is already removed from namespace by the delete operation,
// throw FileNotFoundException here, so not to proceed to the end of
// this method to add a CloseOp to the edit log for an already deleted
// file (See HDFS-6825).
//
BlockCollection blockCollection = storedBlock.getBlockCollection();
if (blockCollection == null) {
throw new IOException("The blockCollection of " + storedBlock
+ " is null, likely because the file owning this block was"
+ " deleted and the block removal is delayed");
}
INodeFile iFile = ((INode)blockCollection).asFile();
if (isFileDeleted(iFile)) {
throw new FileNotFoundException("File not found: "
+ iFile.getFullPathName() + ", likely due to delayed block"
+ " removal");
}
if (!iFile.isUnderConstruction() || storedBlock.isComplete()) { if (!iFile.isUnderConstruction() || storedBlock.isComplete()) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Unexpected block (=" + lastblock LOG.debug("Unexpected block (=" + lastblock
@ -6550,9 +6573,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private boolean isFileDeleted(INodeFile file) { private boolean isFileDeleted(INodeFile file) {
// Not in the inodeMap or in the snapshot but marked deleted. // Not in the inodeMap or in the snapshot but marked deleted.
if (dir.getInode(file.getId()) == null || if (dir.getInode(file.getId()) == null) {
file.getParent() == null || (file.isWithSnapshot() && return true;
file.getFileWithSnapshotFeature().isCurrentFileDeleted())) { }
// look at the path hierarchy to see if one parent is deleted by recursive
// deletion
INode tmpChild = file;
INodeDirectory tmpParent = file.getParent();
while (true) {
if (tmpParent == null ||
tmpParent.searchChildren(tmpChild.getLocalNameBytes()) < 0) {
return true;
}
if (tmpParent.isRoot()) {
break;
}
tmpChild = tmpParent;
tmpParent = tmpParent.getParent();
}
if (file.isWithSnapshot() &&
file.getFileWithSnapshotFeature().isCurrentFileDeleted()) {
return true; return true;
} }
return false; return false;

View File

@ -157,7 +157,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
return quota; return quota;
} }
private int searchChildren(byte[] name) { int searchChildren(byte[] name) {
return children == null? -1: Collections.binarySearch(children, name); return children == null? -1: Collections.binarySearch(children, name);
} }

View File

@ -207,6 +207,7 @@ enum Status {
OOB_RESERVED1 = 9; // Reserved OOB_RESERVED1 = 9; // Reserved
OOB_RESERVED2 = 10; // Reserved OOB_RESERVED2 = 10; // Reserved
OOB_RESERVED3 = 11; // Reserved OOB_RESERVED3 = 11; // Reserved
IN_PROGRESS = 12;
} }
message PipelineAckProto { message PipelineAckProto {

View File

@ -45,6 +45,9 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
@ -1368,4 +1371,33 @@ public class DFSTestUtil {
provider.createKey(keyName, options); provider.createKey(keyName, options);
provider.flush(); provider.flush();
} }
/**
* @return the node which is expected to run the recovery of the
* given block, which is known to be under construction inside the
* given NameNOde.
*/
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
ExtendedBlock blk) {
BlockManager bm0 = nn.getNamesystem().getBlockManager();
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
assertTrue("Block " + blk + " should be under construction, " +
"got: " + storedBlock,
storedBlock instanceof BlockInfoUnderConstruction);
BlockInfoUnderConstruction ucBlock =
(BlockInfoUnderConstruction)storedBlock;
// We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
DatanodeStorageInfo expectedPrimary = storages[0];
long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
for (int i = 1; i < storages.length; i++) {
final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
if (lastUpdate > mostRecentLastUpdate) {
expectedPrimary = storages[i];
mostRecentLastUpdate = lastUpdate;
}
}
return expectedPrimary.getDatanodeDescriptor();
}
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.blockmanagement; package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
@ -59,17 +60,24 @@ public class TestBlockInfo {
@Test @Test
public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception { public void testReplaceStorage() throws Exception {
BlockInfo blockInfo = new BlockInfo(3);
// Create two dummy storages.
final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1"); final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2")); final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
final int NUM_BLOCKS = 10;
BlockInfo[] blockInfos = new BlockInfo[NUM_BLOCKS];
blockInfo.addStorage(storage1); // Create a few dummy blocks and add them to the first storage.
boolean added = blockInfo.addStorage(storage2); for (int i = 0; i < NUM_BLOCKS; ++i) {
blockInfos[i] = new BlockInfo(3);
storage1.addBlock(blockInfos[i]);
}
Assert.assertFalse(added); // Try to move one of the blocks to a different storage.
Assert.assertEquals(storage2, blockInfo.getStorageInfo(0)); boolean added = storage2.addBlock(blockInfos[NUM_BLOCKS/2]);
Assert.assertThat(added, is(false));
Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
} }
@Test @Test

View File

@ -272,8 +272,10 @@ public class TestBlockReplacement {
// receiveResponse // receiveResponse
DataInputStream reply = new DataInputStream(sock.getInputStream()); DataInputStream reply = new DataInputStream(sock.getInputStream());
BlockOpResponseProto proto = BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(reply);
BlockOpResponseProto.parseDelimitedFrom(reply); while (proto.getStatus() == Status.IN_PROGRESS) {
proto = BlockOpResponseProto.parseDelimitedFrom(reply);
}
return proto.getStatus() == Status.SUCCESS; return proto.getStatus() == Status.SUCCESS;
} }

View File

@ -50,6 +50,17 @@ public class TestCommitBlockSynchronization {
FSNamesystem namesystem = new FSNamesystem(conf, image); FSNamesystem namesystem = new FSNamesystem(conf, image);
namesystem.setImageLoaded(true); namesystem.setImageLoaded(true);
// set file's parent as root and put the file to inodeMap, so
// FSNamesystem's isFileDeleted() method will return false on this file
if (file.getParent() == null) {
INodeDirectory parent = mock(INodeDirectory.class);
parent.setLocalName(new byte[0]);
parent.addChild(file);
file.setParent(parent);
}
namesystem.dir.getINodeMap().put(file);
FSNamesystem namesystemSpy = spy(namesystem); FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction( BlockInfoUnderConstruction blockInfo = new BlockInfoUnderConstruction(
block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets); block, 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException; import java.io.FileNotFoundException;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Set; import java.util.Set;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -27,19 +29,30 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StorageType; import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.Node; import org.apache.hadoop.net.Node;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox; import org.mockito.internal.util.reflection.Whitebox;
@ -49,6 +62,7 @@ import org.mockito.internal.util.reflection.Whitebox;
* whole duration. * whole duration.
*/ */
public class TestDeleteRace { public class TestDeleteRace {
private static final int BLOCK_SIZE = 4096;
private static final Log LOG = LogFactory.getLog(TestDeleteRace.class); private static final Log LOG = LogFactory.getLog(TestDeleteRace.class);
private static final Configuration conf = new HdfsConfiguration(); private static final Configuration conf = new HdfsConfiguration();
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
@ -201,7 +215,126 @@ public class TestDeleteRace {
cluster.shutdown(); cluster.shutdown();
} }
} }
}
/**
* Test race between delete operation and commitBlockSynchronization method.
* See HDFS-6825.
* @param hasSnapshot
* @throws Exception
*/
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot)
throws Exception {
LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
final String testPaths[] = {
"/test-file",
"/testdir/testdir1/test-file"
};
final Path rootPath = new Path("/");
final Configuration conf = new Configuration();
// Disable permissions so that another user can recover the lease.
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
FSDataOutputStream stm = null;
Map<DataNode, DatanodeProtocolClientSideTranslatorPB> dnMap =
new HashMap<DataNode, DatanodeProtocolClientSideTranslatorPB>();
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
int stId = 0;
for (String testPath : testPaths) {
LOG.info("test on " + testPath + " snapshot: " + hasSnapshot);
Path fPath = new Path(testPath);
//find grandest non-root parent
Path grandestNonRootParent = fPath;
while (!grandestNonRootParent.getParent().equals(rootPath)) {
grandestNonRootParent = grandestNonRootParent.getParent();
}
stm = fs.create(fPath);
LOG.info("test on " + testPath + " created " + fPath);
// write a half block
AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
stm.hflush();
if (hasSnapshot) {
SnapshotTestHelper.createSnapshot(fs, rootPath,
"st" + String.valueOf(stId));
++stId;
}
// Look into the block manager on the active node for the block
// under construction.
NameNode nn = cluster.getNameNode();
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, fPath);
DatanodeDescriptor expectedPrimary =
DFSTestUtil.getExpectedPrimaryNode(nn, blk);
LOG.info("Expecting block recovery to be triggered on DN " +
expectedPrimary);
// Find the corresponding DN daemon, and spy on its connection to the
// active.
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy = dnMap.get(primaryDN);
if (nnSpy == null) {
nnSpy = DataNodeTestUtils.spyOnBposToNN(primaryDN, nn);
dnMap.put(primaryDN, nnSpy);
}
// Delay the commitBlockSynchronization call
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(
Mockito.eq(blk),
Mockito.anyInt(), // new genstamp
Mockito.anyLong(), // new length
Mockito.eq(true), // close file
Mockito.eq(false), // delete block
(DatanodeID[]) Mockito.anyObject(), // new targets
(String[]) Mockito.anyObject()); // new target storages
fs.recoverLease(fPath);
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Deleting recursively " + grandestNonRootParent);
fs.delete(grandestNonRootParent, true);
delayer.proceed();
LOG.info("Now wait for result");
delayer.waitForResult();
Throwable t = delayer.getThrown();
if (t != null) {
LOG.info("Result exception (snapshot: " + hasSnapshot + "): " + t);
}
} // end of loop each fPath
LOG.info("Now check we can restart");
cluster.restartNameNodes();
LOG.info("Restart finished");
} finally {
if (stm != null) {
IOUtils.closeStream(stm);
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=600000)
public void testDeleteAndCommitBlockSynchonizationRaceNoSnapshot()
throws Exception {
testDeleteAndCommitBlockSynchronizationRace(false);
}
@Test(timeout=600000)
public void testDeleteAndCommitBlockSynchronizationRaceHasSnapshot()
throws Exception {
testDeleteAndCommitBlockSynchronizationRace(true);
} }
} }

View File

@ -356,7 +356,8 @@ public class TestPipelinesFailover {
NameNode nn0 = cluster.getNameNode(0); NameNode nn0 = cluster.getNameNode(0);
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH); ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
DatanodeDescriptor expectedPrimary = getExpectedPrimaryNode(nn0, blk); DatanodeDescriptor expectedPrimary =
DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
LOG.info("Expecting block recovery to be triggered on DN " + LOG.info("Expecting block recovery to be triggered on DN " +
expectedPrimary); expectedPrimary);
@ -506,37 +507,6 @@ public class TestPipelinesFailover {
} }
} }
/**
* @return the node which is expected to run the recovery of the
* given block, which is known to be under construction inside the
* given NameNOde.
*/
private DatanodeDescriptor getExpectedPrimaryNode(NameNode nn,
ExtendedBlock blk) {
BlockManager bm0 = nn.getNamesystem().getBlockManager();
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
assertTrue("Block " + blk + " should be under construction, " +
"got: " + storedBlock,
storedBlock instanceof BlockInfoUnderConstruction);
BlockInfoUnderConstruction ucBlock =
(BlockInfoUnderConstruction)storedBlock;
// We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = ucBlock.getExpectedStorageLocations();
DatanodeStorageInfo expectedPrimary = storages[0];
long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdate();
for (int i = 1; i < storages.length; i++) {
final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdate();
if (lastUpdate > mostRecentLastUpdate) {
expectedPrimary = storages[i];
mostRecentLastUpdate = lastUpdate;
}
}
return expectedPrimary.getDatanodeDescriptor();
}
private DistributedFileSystem createFsAsOtherUser( private DistributedFileSystem createFsAsOtherUser(
final MiniDFSCluster cluster, final Configuration conf) final MiniDFSCluster cluster, final Configuration conf)
throws IOException, InterruptedException { throws IOException, InterruptedException {

View File

@ -8655,6 +8655,50 @@
</comparators> </comparators>
</test> </test>
<test> <!-- TESTED -->
<description>count: file using -h option</description>
<test-commands>
<command>-fs NAMENODE -mkdir -p dir</command> <!-- make sure user home dir exists -->
<command>-fs NAMENODE -put CLITEST_DATA/data15bytes file1</command>
<command>-fs NAMENODE -put CLITEST_DATA/data1k file2</command>
<command>-fs NAMENODE -count -h file1 file2</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm file1 file2</command>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>( |\t)*0( |\t)*1( |\t)*15 file1</expected-output>
</comparator>
</comparators>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>( |\t)*0( |\t)*1( |\t)*1\.0 K file2</expected-output>
</comparator>
</comparators>
</test>
<test> <!-- TESTED -->
<description>count: directory using -q and -h options</description>
<test-commands>
<command>-fs NAMENODE -mkdir /dir1</command>
<dfs-admin-command>-fs NAMENODE -setQuota 10 /dir1 </dfs-admin-command>
<dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir1 </dfs-admin-command>
<command>-fs NAMENODE -count -q -h /dir1</command>
</test-commands>
<cleanup-commands>
<command>-fs NAMENODE -rm -r /dir1</command>
</cleanup-commands>
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>( |\t)*10( |\t)*9( |\t)*1 M( |\t)*1 M( |\t)*1( |\t)*0( |\t)*0 /dir1</expected-output>
</comparator>
</comparators>
</test>
<!-- Tests for chmod --> <!-- Tests for chmod -->
<test> <!-- TESTED --> <test> <!-- TESTED -->
<description>chmod: change permission(octal mode) of file in absolute path</description> <description>chmod: change permission(octal mode) of file in absolute path</description>

View File

@ -165,6 +165,15 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-5963. ShuffleHandler DB schema should be versioned with MAPREDUCE-5963. ShuffleHandler DB schema should be versioned with
compatible/incompatible changes (Junping Du via jlowe) compatible/incompatible changes (Junping Du via jlowe)
MAPREDUCE-883. harchive: Document how to unarchive (Akira AJISAKA and
Koji Noguchi via aw)
MAPREDUCE-4791. Javadoc for KeyValueTextInputFormat should include default
separator and how to change it (Akira AJISAKA via aw)
MAPREDUCE-5906. Inconsistent configuration in property
"mapreduce.reduce.shuffle.input.buffer.percent" (Akira AJISAKA via aw)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -187,6 +196,43 @@ Release 2.6.0 - UNRELEASED
MAPREDUCE-6021. MR AM should have working directory in LD_LIBRARY_PATH MAPREDUCE-6021. MR AM should have working directory in LD_LIBRARY_PATH
(jlowe) (jlowe)
MAPREDUCE-6010. HistoryServerFileSystemStateStore fails to update tokens
(jlowe)
MAPREDUCE-5878. some standard JDK APIs are not part of system classes
defaults (Sangjin Lee via jlowe)
MAPREDUCE-5944. Remove MRv1 commands from CommandsManual.apt.vm
(Akira AJISAKA via aw)
MAPREDUCE-5943. Separate mapred commands from CommandManual.apt.vm
(Akira AJISAKA via aw)
MAPREDUCE-5363. Fix doc and spelling for TaskCompletionEvent#getTaskStatus
and getStatus (Akira AJISAKA via aw)
MAPREDUCE-5595. Typo in MergeManagerImpl.java (Akira AJISAKA via aw)
MAPREDUCE-5597. Missing alternatives in javadocs for deprecated constructors
in mapreduce.Job (Akira AJISAKA via aw)
MAPREDUCE-5950. incorrect description in distcp2 document (Akira AJISAKA
via aw)
MAPREDUCE-5998. CompositeInputFormat javadoc is broken (Akira AJISAKA via
aw)
MAPREDUCE-5999. Fix dead link in InputFormat javadoc (Akira AJISAKA via aw)
MAPREDUCE-6032. Made MR jobs write job history files on the default FS when
the current context's FS is different. (Benjamin Zhitomirsky via zjshen)
MAPREDUCE-6024. Shortened the time when Fetcher is stuck in retrying before
concluding the failure by configuration. (Yunjiong Zhao via zjshen)
MAPREDUCE-6036. TestJobEndNotifier fails intermittently in branch-2 (chang
li via jlowe)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -268,6 +314,9 @@ Release 2.5.0 - UNRELEASED
BUG FIXES BUG FIXES
MAPREDUCE-6033. Updated access check for displaying job information
(Yu Gao via Eric Yang)
MAPREDUCE-5759. Remove unnecessary conf load in Limits (Sandy Ryza) MAPREDUCE-5759. Remove unnecessary conf load in Limits (Sandy Ryza)
MAPREDUCE-5014. Extend Distcp to accept a custom CopyListing. MAPREDUCE-5014. Extend Distcp to accept a custom CopyListing.

View File

@ -73,6 +73,12 @@
<groupId>org.apache.hadoop</groupId> <groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-shuffle</artifactId> <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
</dependencies> </dependencies>
<build> <build>

View File

@ -28,13 +28,13 @@ import java.util.Timer;
import java.util.TimerTask; import java.util.TimerTask;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.LinkedBlockingQueue;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
@ -74,7 +74,9 @@ public class JobHistoryEventHandler extends AbstractService
private int eventCounter; private int eventCounter;
//TODO Does the FS object need to be different ? // Those file systems may differ from the job configuration
// See org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils
// #ensurePathInDefaultFileSystem
private FileSystem stagingDirFS; // log Dir FileSystem private FileSystem stagingDirFS; // log Dir FileSystem
private FileSystem doneDirFS; // done Dir FileSystem private FileSystem doneDirFS; // done Dir FileSystem
@ -141,7 +143,7 @@ public class JobHistoryEventHandler extends AbstractService
//Check for the existence of the history staging dir. Maybe create it. //Check for the existence of the history staging dir. Maybe create it.
try { try {
stagingDirPath = stagingDirPath =
FileSystem.get(conf).makeQualified(new Path(stagingDirStr)); FileContext.getFileContext(conf).makeQualified(new Path(stagingDirStr));
stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf); stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf);
mkdir(stagingDirFS, stagingDirPath, new FsPermission( mkdir(stagingDirFS, stagingDirPath, new FsPermission(
JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS)); JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS));
@ -154,7 +156,7 @@ public class JobHistoryEventHandler extends AbstractService
//Check for the existence of intermediate done dir. //Check for the existence of intermediate done dir.
Path doneDirPath = null; Path doneDirPath = null;
try { try {
doneDirPath = FileSystem.get(conf).makeQualified(new Path(doneDirStr)); doneDirPath = FileContext.getFileContext(conf).makeQualified(new Path(doneDirStr));
doneDirFS = FileSystem.get(doneDirPath.toUri(), conf); doneDirFS = FileSystem.get(doneDirPath.toUri(), conf);
// This directory will be in a common location, or this may be a cluster // This directory will be in a common location, or this may be a cluster
// meant for a single user. Creating based on the conf. Should ideally be // meant for a single user. Creating based on the conf. Should ideally be
@ -194,7 +196,7 @@ public class JobHistoryEventHandler extends AbstractService
//Check/create user directory under intermediate done dir. //Check/create user directory under intermediate done dir.
try { try {
doneDirPrefixPath = doneDirPrefixPath =
FileSystem.get(conf).makeQualified(new Path(userDoneDirStr)); FileContext.getFileContext(conf).makeQualified(new Path(userDoneDirStr));
mkdir(doneDirFS, doneDirPrefixPath, new FsPermission( mkdir(doneDirFS, doneDirPrefixPath, new FsPermission(
JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS)); JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS));
} catch (IOException e) { } catch (IOException e) {

View File

@ -148,10 +148,10 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private static final Log LOG = LogFactory.getLog(JobImpl.class); private static final Log LOG = LogFactory.getLog(JobImpl.class);
//The maximum fraction of fetch failures allowed for a map //The maximum fraction of fetch failures allowed for a map
private static final double MAX_ALLOWED_FETCH_FAILURES_FRACTION = 0.5; private float maxAllowedFetchFailuresFraction;
// Maximum no. of fetch-failure notifications after which map task is failed //Maximum no. of fetch-failure notifications after which map task is failed
private static final int MAX_FETCH_FAILURES_NOTIFICATIONS = 3; private int maxFetchFailuresNotifications;
public static final String JOB_KILLED_DIAG = public static final String JOB_KILLED_DIAG =
"Job received Kill while in RUNNING state."; "Job received Kill while in RUNNING state.";
@ -704,6 +704,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
if(forcedDiagnostic != null) { if(forcedDiagnostic != null) {
this.diagnostics.add(forcedDiagnostic); this.diagnostics.add(forcedDiagnostic);
} }
this.maxAllowedFetchFailuresFraction = conf.getFloat(
MRJobConfig.MAX_ALLOWED_FETCH_FAILURES_FRACTION,
MRJobConfig.DEFAULT_MAX_ALLOWED_FETCH_FAILURES_FRACTION);
this.maxFetchFailuresNotifications = conf.getInt(
MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS,
MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS);
} }
protected StateMachine<JobStateInternal, JobEventType, JobEvent> getStateMachine() { protected StateMachine<JobStateInternal, JobEventType, JobEvent> getStateMachine() {
@ -730,7 +737,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
if (jobACL == null) { if (jobACL == null) {
return true; return true;
} }
return aclsManager.checkAccess(callerUGI, jobOperation, username, jobACL); return aclsManager.checkAccess(callerUGI, jobOperation, userName, jobACL);
} }
@Override @Override
@ -1900,9 +1907,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
float failureRate = shufflingReduceTasks == 0 ? 1.0f : float failureRate = shufflingReduceTasks == 0 ? 1.0f :
(float) fetchFailures / shufflingReduceTasks; (float) fetchFailures / shufflingReduceTasks;
// declare faulty if fetch-failures >= max-allowed-failures // declare faulty if fetch-failures >= max-allowed-failures
boolean isMapFaulty = if (fetchFailures >= job.getMaxFetchFailuresNotifications()
(failureRate >= MAX_ALLOWED_FETCH_FAILURES_FRACTION); && failureRate >= job.getMaxAllowedFetchFailuresFraction()) {
if (fetchFailures >= MAX_FETCH_FAILURES_NOTIFICATIONS && isMapFaulty) {
LOG.info("Too many fetch-failures for output of task attempt: " + LOG.info("Too many fetch-failures for output of task attempt: " +
mapId + " ... raising fetch failure to map"); mapId + " ... raising fetch failure to map");
job.eventHandler.handle(new TaskAttemptEvent(mapId, job.eventHandler.handle(new TaskAttemptEvent(mapId,
@ -2185,4 +2191,12 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
jobConf.addResource(fc.open(confPath), confPath.toString()); jobConf.addResource(fc.open(confPath), confPath.toString());
return jobConf; return jobConf;
} }
public float getMaxAllowedFetchFailuresFraction() {
return maxAllowedFetchFailuresFraction;
}
public int getMaxFetchFailuresNotifications() {
return maxFetchFailuresNotifications;
}
} }

View File

@ -28,6 +28,7 @@ import static org.mockito.Mockito.when;
import static org.mockito.Mockito.never; import static org.mockito.Mockito.never;
import java.io.File; import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException; import java.io.IOException;
import org.junit.Assert; import org.junit.Assert;
@ -35,8 +36,13 @@ import org.junit.Assert;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
@ -52,6 +58,10 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.junit.After;
import org.junit.AfterClass;
import static org.junit.Assert.assertFalse;
import org.junit.BeforeClass;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -60,6 +70,26 @@ public class TestJobHistoryEventHandler {
private static final Log LOG = LogFactory private static final Log LOG = LogFactory
.getLog(TestJobHistoryEventHandler.class); .getLog(TestJobHistoryEventHandler.class);
private static MiniDFSCluster dfsCluster = null;
private static String coreSitePath;
@BeforeClass
public static void setUpClass() throws Exception {
coreSitePath = "." + File.separator + "target" + File.separator +
"test-classes" + File.separator + "core-site.xml";
Configuration conf = new HdfsConfiguration();
dfsCluster = new MiniDFSCluster.Builder(conf).build();
}
@AfterClass
public static void cleanUpClass() throws Exception {
dfsCluster.shutdown();
}
@After
public void cleanTest() throws Exception {
new File(coreSitePath).delete();
}
@Test (timeout=50000) @Test (timeout=50000)
public void testFirstFlushOnCompletionEvent() throws Exception { public void testFirstFlushOnCompletionEvent() throws Exception {
@ -325,6 +355,50 @@ public class TestJobHistoryEventHandler {
} }
} }
@Test (timeout=50000)
public void testDefaultFsIsUsedForHistory() throws Exception {
// Create default configuration pointing to the minicluster
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
dfsCluster.getURI().toString());
FileOutputStream os = new FileOutputStream(coreSitePath);
conf.writeXml(os);
os.close();
// simulate execution under a non-default namenode
conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
"file:///");
TestParams t = new TestParams();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, t.dfsWorkDir);
JHEvenHandlerForTest realJheh =
new JHEvenHandlerForTest(t.mockAppContext, 0, false);
JHEvenHandlerForTest jheh = spy(realJheh);
jheh.init(conf);
try {
jheh.start();
handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent(
t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000)));
handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent(
TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(),
new Counters(), new Counters())));
// If we got here then event handler worked but we don't know with which
// file system. Now we check that history stuff was written to minicluster
FileSystem dfsFileSystem = dfsCluster.getFileSystem();
assertTrue("Minicluster contains some history files",
dfsFileSystem.globStatus(new Path(t.dfsWorkDir + "/*")).length != 0);
FileSystem localFileSystem = LocalFileSystem.get(conf);
assertFalse("No history directory on non-default file system",
localFileSystem.exists(new Path(t.dfsWorkDir)));
} finally {
jheh.stop();
}
}
private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) { private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) {
jheh.handle(event); jheh.handle(event);
} }
@ -372,6 +446,7 @@ public class TestJobHistoryEventHandler {
private class TestParams { private class TestParams {
boolean isLastAMRetry; boolean isLastAMRetry;
String workDir = setupTestWorkDir(); String workDir = setupTestWorkDir();
String dfsWorkDir = "/" + this.getClass().getCanonicalName();
ApplicationId appId = ApplicationId.newInstance(200, 1); ApplicationId appId = ApplicationId.newInstance(200, 1);
ApplicationAttemptId appAttemptId = ApplicationAttemptId appAttemptId =
ApplicationAttemptId.newInstance(appId, 1); ApplicationAttemptId.newInstance(appId, 1);
@ -451,10 +526,16 @@ public class TestJobHistoryEventHandler {
class JHEvenHandlerForTest extends JobHistoryEventHandler { class JHEvenHandlerForTest extends JobHistoryEventHandler {
private EventWriter eventWriter; private EventWriter eventWriter;
private boolean mockHistoryProcessing = true;
public JHEvenHandlerForTest(AppContext context, int startCount) { public JHEvenHandlerForTest(AppContext context, int startCount) {
super(context, startCount); super(context, startCount);
} }
public JHEvenHandlerForTest(AppContext context, int startCount, boolean mockHistoryProcessing) {
super(context, startCount);
this.mockHistoryProcessing = mockHistoryProcessing;
}
@Override @Override
protected void serviceStart() { protected void serviceStart() {
} }
@ -462,7 +543,12 @@ class JHEvenHandlerForTest extends JobHistoryEventHandler {
@Override @Override
protected EventWriter createEventWriter(Path historyFilePath) protected EventWriter createEventWriter(Path historyFilePath)
throws IOException { throws IOException {
this.eventWriter = mock(EventWriter.class); if (mockHistoryProcessing) {
this.eventWriter = mock(EventWriter.class);
}
else {
this.eventWriter = super.createEventWriter(historyFilePath);
}
return this.eventWriter; return this.eventWriter;
} }
@ -475,8 +561,13 @@ class JHEvenHandlerForTest extends JobHistoryEventHandler {
} }
@Override @Override
protected void processDoneFiles(JobId jobId){ protected void processDoneFiles(JobId jobId) throws IOException {
// do nothing if (!mockHistoryProcessing) {
super.processDoneFiles(jobId);
}
else {
// do nothing
}
} }
} }

View File

@ -270,7 +270,8 @@ public class TestJobEndNotifier extends JobEndNotifier {
app.waitForInternalState(job, JobStateInternal.REBOOT); app.waitForInternalState(job, JobStateInternal.REBOOT);
// Now shutdown. User should see FAILED state. // Now shutdown. User should see FAILED state.
// Unregistration fails: isLastAMRetry is recalculated, this is // Unregistration fails: isLastAMRetry is recalculated, this is
app.shutDownJob(); ///reboot will stop service internally, we don't need to shutdown twice
app.waitForServiceToStop(10000);
Assert.assertFalse(app.isLastAMRetry()); Assert.assertFalse(app.isLastAMRetry());
// Since it's not last retry, JobEndServlet didn't called // Since it's not last retry, JobEndServlet didn't called
Assert.assertEquals(0, JobEndServlet.calledTimes); Assert.assertEquals(0, JobEndServlet.calledTimes);

View File

@ -536,7 +536,7 @@ public class TestJobImpl {
// Verify access // Verify access
JobImpl job1 = new JobImpl(jobId, null, conf1, null, null, null, null, null, JobImpl job1 = new JobImpl(jobId, null, conf1, null, null, null, null, null,
null, null, null, true, null, 0, null, null, null, null); null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job1.checkAccess(ugi1, JobACL.VIEW_JOB)); Assert.assertTrue(job1.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertFalse(job1.checkAccess(ugi2, JobACL.VIEW_JOB)); Assert.assertFalse(job1.checkAccess(ugi2, JobACL.VIEW_JOB));
@ -547,7 +547,7 @@ public class TestJobImpl {
// Verify access // Verify access
JobImpl job2 = new JobImpl(jobId, null, conf2, null, null, null, null, null, JobImpl job2 = new JobImpl(jobId, null, conf2, null, null, null, null, null,
null, null, null, true, null, 0, null, null, null, null); null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job2.checkAccess(ugi1, JobACL.VIEW_JOB)); Assert.assertTrue(job2.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertTrue(job2.checkAccess(ugi2, JobACL.VIEW_JOB)); Assert.assertTrue(job2.checkAccess(ugi2, JobACL.VIEW_JOB));
@ -558,7 +558,7 @@ public class TestJobImpl {
// Verify access // Verify access
JobImpl job3 = new JobImpl(jobId, null, conf3, null, null, null, null, null, JobImpl job3 = new JobImpl(jobId, null, conf3, null, null, null, null, null,
null, null, null, true, null, 0, null, null, null, null); null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job3.checkAccess(ugi1, JobACL.VIEW_JOB)); Assert.assertTrue(job3.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertTrue(job3.checkAccess(ugi2, JobACL.VIEW_JOB)); Assert.assertTrue(job3.checkAccess(ugi2, JobACL.VIEW_JOB));
@ -569,7 +569,7 @@ public class TestJobImpl {
// Verify access // Verify access
JobImpl job4 = new JobImpl(jobId, null, conf4, null, null, null, null, null, JobImpl job4 = new JobImpl(jobId, null, conf4, null, null, null, null, null,
null, null, null, true, null, 0, null, null, null, null); null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB)); Assert.assertTrue(job4.checkAccess(ugi1, JobACL.VIEW_JOB));
Assert.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB)); Assert.assertTrue(job4.checkAccess(ugi2, JobACL.VIEW_JOB));
@ -580,7 +580,7 @@ public class TestJobImpl {
// Verify access // Verify access
JobImpl job5 = new JobImpl(jobId, null, conf5, null, null, null, null, null, JobImpl job5 = new JobImpl(jobId, null, conf5, null, null, null, null, null,
null, null, null, true, null, 0, null, null, null, null); null, null, null, true, user1, 0, null, null, null, null);
Assert.assertTrue(job5.checkAccess(ugi1, null)); Assert.assertTrue(job5.checkAccess(ugi1, null));
Assert.assertTrue(job5.checkAccess(ugi2, null)); Assert.assertTrue(job5.checkAccess(ugi2, null));
} }

View File

@ -22,20 +22,24 @@ import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.util.Calendar; import java.util.Calendar;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.MRJobConfig;
@ -117,6 +121,7 @@ public class JobHistoryUtils {
public static final String TIMESTAMP_DIR_REGEX = "\\d{4}" + "\\" + Path.SEPARATOR + "\\d{2}" + "\\" + Path.SEPARATOR + "\\d{2}"; public static final String TIMESTAMP_DIR_REGEX = "\\d{4}" + "\\" + Path.SEPARATOR + "\\d{2}" + "\\" + Path.SEPARATOR + "\\d{2}";
public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX); public static final Pattern TIMESTAMP_DIR_PATTERN = Pattern.compile(TIMESTAMP_DIR_REGEX);
private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d"; private static final String TIMESTAMP_DIR_FORMAT = "%04d" + File.separator + "%02d" + File.separator + "%02d";
private static final Log LOG = LogFactory.getLog(JobHistoryUtils.class);
private static final PathFilter CONF_FILTER = new PathFilter() { private static final PathFilter CONF_FILTER = new PathFilter() {
@Override @Override
@ -183,7 +188,7 @@ public class JobHistoryUtils {
Path stagingPath = MRApps.getStagingAreaDir(conf, user); Path stagingPath = MRApps.getStagingAreaDir(conf, user);
Path path = new Path(stagingPath, jobId); Path path = new Path(stagingPath, jobId);
String logDir = path.toString(); String logDir = path.toString();
return logDir; return ensurePathInDefaultFileSystem(logDir, conf);
} }
/** /**
@ -200,7 +205,7 @@ public class JobHistoryUtils {
MRJobConfig.DEFAULT_MR_AM_STAGING_DIR) MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+ "/history/done_intermediate"; + "/history/done_intermediate";
} }
return doneDirPrefix; return ensurePathInDefaultFileSystem(doneDirPrefix, conf);
} }
/** /**
@ -216,7 +221,69 @@ public class JobHistoryUtils {
MRJobConfig.DEFAULT_MR_AM_STAGING_DIR) MRJobConfig.DEFAULT_MR_AM_STAGING_DIR)
+ "/history/done"; + "/history/done";
} }
return doneDirPrefix; return ensurePathInDefaultFileSystem(doneDirPrefix, conf);
}
/**
* Get default file system URI for the cluster (used to ensure consistency
* of history done/staging locations) over different context
*
* @return Default file context
*/
private static FileContext getDefaultFileContext() {
// If FS_DEFAULT_NAME_KEY was set solely by core-default.xml then we ignore
// ignore it. This prevents defaulting history paths to file system specified
// by core-default.xml which would not make sense in any case. For a test
// case to exploit this functionality it should create core-site.xml
FileContext fc = null;
Configuration defaultConf = new Configuration();
String[] sources;
sources = defaultConf.getPropertySources(
CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
if (sources != null &&
(!Arrays.asList(sources).contains("core-default.xml") ||
sources.length > 1)) {
try {
fc = FileContext.getFileContext(defaultConf);
LOG.info("Default file system [" +
fc.getDefaultFileSystem().getUri() + "]");
} catch (UnsupportedFileSystemException e) {
LOG.error("Unable to create default file context [" +
defaultConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY) +
"]",
e);
}
}
else {
LOG.info("Default file system is set solely " +
"by core-default.xml therefore - ignoring");
}
return fc;
}
/**
* Ensure that path belongs to cluster's default file system unless
* 1. it is already fully qualified.
* 2. current job configuration uses default file system
* 3. running from a test case without core-site.xml
*
* @param sourcePath source path
* @param conf the job configuration
* @return full qualified path (if necessary) in default file system
*/
private static String ensurePathInDefaultFileSystem(String sourcePath, Configuration conf) {
Path path = new Path(sourcePath);
FileContext fc = getDefaultFileContext();
if (fc == null ||
fc.getDefaultFileSystem().getUri().toString().equals(
conf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "")) ||
path.toUri().getAuthority() != null ||
path.toUri().getScheme()!= null) {
return sourcePath;
}
return fc.makeQualified(path).toString();
} }
/** /**

View File

@ -50,7 +50,7 @@ import org.apache.hadoop.fs.FileSystem;
* bytes, of the input files. However, the {@link FileSystem} blocksize of * bytes, of the input files. However, the {@link FileSystem} blocksize of
* the input files is treated as an upper bound for input splits. A lower bound * the input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via * on the split size can be set via
* <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize"> * <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
* mapreduce.input.fileinputformat.split.minsize</a>.</p> * mapreduce.input.fileinputformat.split.minsize</a>.</p>
* *
* <p>Clearly, logical splits based on input-size is insufficient for many * <p>Clearly, logical splits based on input-size is insufficient for many

View File

@ -90,8 +90,8 @@ public class TaskCompletionEvent
} }
/** /**
* Returns enum Status.SUCESS or Status.FAILURE. * Returns {@link Status}
* @return task tracker status * @return task completion status
*/ */
public Status getTaskStatus() { public Status getTaskStatus() {
return Status.valueOf(super.getStatus().name()); return Status.valueOf(super.getStatus().name());

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.mapred.Reporter;
/** /**
* An InputFormat capable of performing joins over a set of data sources sorted * An InputFormat capable of performing joins over a set of data sources sorted
* and partitioned the same way. * and partitioned the same way.
* @see #setFormat
* *
* A user may define new join types by setting the property * A user may define new join types by setting the property
* <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression * <tt>mapred.join.define.&lt;ident&gt;</tt> to a classname. In the expression
@ -44,6 +43,7 @@ import org.apache.hadoop.mapred.Reporter;
* ComposableRecordReader. * ComposableRecordReader.
* <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys * <tt>mapred.join.keycomparator</tt> can be a classname used to compare keys
* in the join. * in the join.
* @see #setFormat
* @see JoinRecordReader * @see JoinRecordReader
* @see MultiFilterRecordReader * @see MultiFilterRecordReader
*/ */

View File

@ -52,7 +52,7 @@ import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
* bytes, of the input files. However, the {@link FileSystem} blocksize of * bytes, of the input files. However, the {@link FileSystem} blocksize of
* the input files is treated as an upper bound for input splits. A lower bound * the input files is treated as an upper bound for input splits. A lower bound
* on the split size can be set via * on the split size can be set via
* <a href="{@docRoot}/../mapred-default.html#mapreduce.input.fileinputformat.split.minsize"> * <a href="{@docRoot}/../hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml#mapreduce.input.fileinputformat.split.minsize">
* mapreduce.input.fileinputformat.split.minsize</a>.</p> * mapreduce.input.fileinputformat.split.minsize</a>.</p>
* *
* <p>Clearly, logical splits based on input-size is insufficient for many * <p>Clearly, logical splits based on input-size is insufficient for many

View File

@ -54,7 +54,7 @@ import org.apache.hadoop.util.StringUtils;
* <p>Here is an example on how to submit a job:</p> * <p>Here is an example on how to submit a job:</p>
* <p><blockquote><pre> * <p><blockquote><pre>
* // Create a new Job * // Create a new Job
* Job job = new Job(new Configuration()); * Job job = Job.getInstance();
* job.setJarByClass(MyJob.class); * job.setJarByClass(MyJob.class);
* *
* // Specify various job-specific parameters * // Specify various job-specific parameters
@ -113,16 +113,25 @@ public class Job extends JobContextImpl implements JobContext {
private long statustime; private long statustime;
private Cluster cluster; private Cluster cluster;
/**
* @deprecated Use {@link #getInstance()}
*/
@Deprecated @Deprecated
public Job() throws IOException { public Job() throws IOException {
this(new Configuration()); this(new Configuration());
} }
/**
* @deprecated Use {@link #getInstance(Configuration)}
*/
@Deprecated @Deprecated
public Job(Configuration conf) throws IOException { public Job(Configuration conf) throws IOException {
this(new JobConf(conf)); this(new JobConf(conf));
} }
/**
* @deprecated Use {@link #getInstance(Configuration, String)}
*/
@Deprecated @Deprecated
public Job(Configuration conf, String jobName) throws IOException { public Job(Configuration conf, String jobName) throws IOException {
this(conf); this(conf);

Some files were not shown because too many files have changed in this diff Show More