merge from trunk r1617527

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1617532 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Charles Lamb 2014-08-12 17:02:07 +00:00
commit 74f7be0887
198 changed files with 10565 additions and 4166 deletions

View File

@ -210,6 +210,7 @@ Requirements:
* Maven 3.0 or later
* Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.5.0
* CMake 2.6 or newer
* Windows SDK or Visual Studio 2010 Professional
* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
* zlib headers (if building native code bindings for zlib)

View File

@ -144,6 +144,15 @@
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<id>prepare-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
<execution>
<id>prepare-test-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>test-jar</goal>
</goals>

View File

@ -120,32 +120,6 @@ public class AuthenticatedURL {
return token;
}
/**
* Return the hashcode for the token.
*
* @return the hashcode for the token.
*/
@Override
public int hashCode() {
return (token != null) ? token.hashCode() : 0;
}
/**
* Return if two token instances are equal.
*
* @param o the other token instance.
*
* @return if this instance and the other instance are equal.
*/
@Override
public boolean equals(Object o) {
boolean eq = false;
if (o instanceof Token) {
Token other = (Token) o;
eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
}
return eq;
}
}
private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@ -208,6 +182,16 @@ public class AuthenticatedURL {
this.authenticator.setConnectionConfigurator(connConfigurator);
}
/**
* Returns the {@link Authenticator} instance used by the
* <code>AuthenticatedURL</code>.
*
* @return the {@link Authenticator} instance
*/
protected Authenticator getAuthenticator() {
return authenticator;
}
/**
* Returns an authenticated {@link HttpURLConnection}.
*

View File

@ -127,6 +127,7 @@ public class AuthenticationFilter implements Filter {
public static final String SIGNATURE_PROVIDER_ATTRIBUTE =
"org.apache.hadoop.security.authentication.util.SignerSecretProvider";
private Properties config;
private Signer signer;
private SignerSecretProvider secretProvider;
private AuthenticationHandler authHandler;
@ -150,7 +151,7 @@ public class AuthenticationFilter implements Filter {
public void init(FilterConfig filterConfig) throws ServletException {
String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
configPrefix = (configPrefix != null) ? configPrefix + "." : "";
Properties config = getConfiguration(configPrefix, filterConfig);
config = getConfiguration(configPrefix, filterConfig);
String authHandlerName = config.getProperty(AUTH_TYPE, null);
String authHandlerClassName;
if (authHandlerName == null) {
@ -224,6 +225,17 @@ public class AuthenticationFilter implements Filter {
cookiePath = config.getProperty(COOKIE_PATH, null);
}
/**
* Returns the configuration properties of the {@link AuthenticationFilter}
* without the prefix. The returned properties are the same that the
* {@link #getConfiguration(String, FilterConfig)} method returned.
*
* @return the configuration properties.
*/
protected Properties getConfiguration() {
return config;
}
/**
* Returns the authentication handler being used.
*
@ -457,7 +469,7 @@ public class AuthenticationFilter implements Filter {
createAuthCookie(httpResponse, signedToken, getCookieDomain(),
getCookiePath(), token.getExpires(), isHttps);
}
filterChain.doFilter(httpRequest, httpResponse);
doFilter(filterChain, httpRequest, httpResponse);
}
} else {
unauthorizedResponse = false;
@ -481,6 +493,15 @@ public class AuthenticationFilter implements Filter {
}
}
/**
* Delegates call to the servlet filter chain. Sub-classes my override this
* method to perform pre and post tasks.
*/
protected void doFilter(FilterChain filterChain, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
filterChain.doFilter(request, response);
}
/**
* Creates the Hadoop authentication HTTP cookie.
*

View File

@ -142,11 +142,30 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
*/
public static final String NAME_RULES = TYPE + ".name.rules";
private String type;
private String keytab;
private GSSManager gssManager;
private Subject serverSubject = new Subject();
private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
/**
* Creates a Kerberos SPNEGO authentication handler with the default
* auth-token type, <code>kerberos</code>.
*/
public KerberosAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Kerberos SPNEGO authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public KerberosAuthenticationHandler(String type) {
this.type = type;
}
/**
* Initializes the authentication handler instance.
* <p/>
@ -249,7 +268,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
*/
@Override
public String getType() {
return TYPE;
return type;
}
/**

View File

@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private boolean acceptAnonymous;
private String type;
/**
* Creates a Hadoop pseudo authentication handler with the default auth-token
* type, <code>simple</code>.
*/
public PseudoAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Hadoop pseudo authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public PseudoAuthenticationHandler(String type) {
this.type = type;
}
/**
* Initializes the authentication handler instance.
@ -96,7 +115,7 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
*/
@Override
public String getType() {
return TYPE;
return type;
}
/**

View File

@ -33,36 +33,6 @@ public class TestAuthenticatedURL {
token = new AuthenticatedURL.Token("foo");
Assert.assertTrue(token.isSet());
Assert.assertEquals("foo", token.toString());
AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
Assert.assertEquals(token1.hashCode(), token2.hashCode());
Assert.assertTrue(token1.equals(token2));
token1 = new AuthenticatedURL.Token();
token2 = new AuthenticatedURL.Token("foo");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token();
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token("foo");
Assert.assertEquals(token1.hashCode(), token2.hashCode());
Assert.assertTrue(token1.equals(token2));
token1 = new AuthenticatedURL.Token("bar");
token2 = new AuthenticatedURL.Token("foo");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token("bar");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
}
@Test
@ -137,4 +107,12 @@ public class TestAuthenticatedURL {
Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
}
@Test
public void testGetAuthenticator() throws Exception {
Authenticator authenticator = Mockito.mock(Authenticator.class);
AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
Assert.assertEquals(authenticator, aURL.getAuthenticator());
}
}

View File

@ -199,6 +199,9 @@ Trunk (Unreleased)
HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting
underlying store. (asuresh via tucu)
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -421,6 +424,9 @@ Trunk (Unreleased)
HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
length keys. (Arun Suresh via wang)
HADOOP-10862. Miscellaneous trivial corrections to KMS classes.
(asuresh via tucu)
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -490,6 +496,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10791. AuthenticationFilter should support externalizing the
secret for signing and provide rotation support. (rkanter via tucu)
HADOOP-10771. Refactor HTTP delegation support out of httpfs to common.
(tucu)
HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication
client/server libraries. (tucu)
OPTIMIZATIONS
BUG FIXES
@ -521,9 +533,6 @@ Release 2.6.0 - UNRELEASED
HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
(Benoy Antony via umamahesh)
HADOOP-10876. The constructor of Path should not take an empty URL as a
parameter. (Zhihai Xu via wang)
HADOOP-10928. Incorrect usage on `hadoop credential list`.
(Josh Elser via wang)
@ -545,6 +554,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
(lmccay via brandonli)
HADOOP-10402. Configuration.getValByRegex does not substitute for
variables. (Robert Kanter via kasha)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -203,6 +203,17 @@
<artifactId>hadoop-auth</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.jcraft</groupId>
<artifactId>jsch</artifactId>

View File

@ -1781,7 +1781,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
public char[] getPassword(String name) throws IOException {
char[] pass = null;
pass = getPasswordFromCredenitalProviders(name);
pass = getPasswordFromCredentialProviders(name);
if (pass == null) {
pass = getPasswordFromConfig(name);
@ -1797,7 +1797,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
* @return password or null if not found
* @throws IOException
*/
protected char[] getPasswordFromCredenitalProviders(String name)
protected char[] getPasswordFromCredentialProviders(String name)
throws IOException {
char[] pass = null;
try {
@ -2755,7 +2755,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
item.getValue() instanceof String) {
m = p.matcher((String)item.getKey());
if(m.find()) { // match
result.put((String) item.getKey(), (String) item.getValue());
result.put((String) item.getKey(),
substituteVars(getProps().getProperty((String) item.getKey())));
}
}
}

View File

@ -27,8 +27,11 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.ProviderUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.crypto.spec.SecretKeySpec;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
@ -80,6 +83,9 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
@InterfaceAudience.Private
public class JavaKeyStoreProvider extends KeyProvider {
private static final String KEY_METADATA = "KeyMetadata";
private static Logger LOG =
LoggerFactory.getLogger(JavaKeyStoreProvider.class);
public static final String SCHEME_NAME = "jceks";
public static final String KEYSTORE_PASSWORD_FILE_KEY =
@ -115,6 +121,10 @@ public class JavaKeyStoreProvider extends KeyProvider {
if (pwFile != null) {
ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL pwdFile = cl.getResource(pwFile);
if (pwdFile == null) {
// Provided Password file does not exist
throw new IOException("Password file does not exists");
}
if (pwdFile != null) {
InputStream is = pwdFile.openStream();
try {
@ -129,19 +139,25 @@ public class JavaKeyStoreProvider extends KeyProvider {
password = KEYSTORE_PASSWORD_DEFAULT;
}
try {
Path oldPath = constructOldPath(path);
Path newPath = constructNewPath(path);
keyStore = KeyStore.getInstance(SCHEME_NAME);
FsPermission perm = null;
if (fs.exists(path)) {
// save off permissions in case we need to
// rewrite the keystore in flush()
FileStatus s = fs.getFileStatus(path);
permissions = s.getPermission();
keyStore.load(fs.open(path), password);
// flush did not proceed to completion
// _NEW should not exist
if (fs.exists(newPath)) {
throw new IOException(
String.format("Keystore not loaded due to some inconsistency "
+ "('%s' and '%s' should not exist together)!!", path, newPath));
}
perm = tryLoadFromPath(path, oldPath);
} else {
permissions = new FsPermission("700");
// required to create an empty keystore. *sigh*
keyStore.load(null, password);
perm = tryLoadIncompleteFlush(oldPath, newPath);
}
// Need to save off permissions in case we need to
// rewrite the keystore in flush()
permissions = perm;
} catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e);
} catch (NoSuchAlgorithmException e) {
@ -154,6 +170,136 @@ public class JavaKeyStoreProvider extends KeyProvider {
writeLock = lock.writeLock();
}
/**
* Try loading from the user specified path, else load from the backup
* path in case Exception is not due to bad/wrong password
* @param path Actual path to load from
* @param backupPath Backup path (_OLD)
* @return The permissions of the loaded file
* @throws NoSuchAlgorithmException
* @throws CertificateException
* @throws IOException
*/
private FsPermission tryLoadFromPath(Path path, Path backupPath)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(path, password);
// Remove _OLD if exists
if (fs.exists(backupPath)) {
fs.delete(backupPath, true);
}
LOG.debug("KeyStore loaded successfully !!");
} catch (IOException ioe) {
// If file is corrupted for some reason other than
// wrong password try the _OLD file if exits
if (!isBadorWrongPassword(ioe)) {
perm = loadFromPath(backupPath, password);
// Rename CURRENT to CORRUPTED
renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
+ System.currentTimeMillis()));
renameOrFail(backupPath, path);
LOG.debug(String.format(
"KeyStore loaded successfully from '%s' since '%s'"
+ "was corrupted !!", backupPath, path));
} else {
throw ioe;
}
}
return perm;
}
/**
* The KeyStore might have gone down during a flush, In which case either the
* _NEW or _OLD files might exists. This method tries to load the KeyStore
* from one of these intermediate files.
* @param oldPath the _OLD file created during flush
* @param newPath the _NEW file created during flush
* @return The permissions of the loaded file
* @throws IOException
* @throws NoSuchAlgorithmException
* @throws CertificateException
*/
private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath)
throws IOException, NoSuchAlgorithmException, CertificateException {
FsPermission perm = null;
// Check if _NEW exists (in case flush had finished writing but not
// completed the re-naming)
if (fs.exists(newPath)) {
perm = loadAndReturnPerm(newPath, oldPath);
}
// try loading from _OLD (An earlier Flushing MIGHT not have completed
// writing completely)
if ((perm == null) && fs.exists(oldPath)) {
perm = loadAndReturnPerm(oldPath, newPath);
}
// If not loaded yet,
// required to create an empty keystore. *sigh*
if (perm == null) {
keyStore.load(null, password);
LOG.debug("KeyStore initialized anew successfully !!");
perm = new FsPermission("700");
}
return perm;
}
private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(pathToLoad, password);
renameOrFail(pathToLoad, path);
LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
pathToLoad));
if (fs.exists(pathToDelete)) {
fs.delete(pathToDelete, true);
}
} catch (IOException e) {
// Check for password issue : don't want to trash file due
// to wrong password
if (isBadorWrongPassword(e)) {
throw e;
}
}
return perm;
}
private boolean isBadorWrongPassword(IOException ioe) {
// As per documentation this is supposed to be the way to figure
// if password was correct
if (ioe.getCause() instanceof UnrecoverableKeyException) {
return true;
}
// Unfortunately that doesn't seem to work..
// Workaround :
if ((ioe.getCause() == null)
&& (ioe.getMessage() != null)
&& ((ioe.getMessage().contains("Keystore was tampered")) || (ioe
.getMessage().contains("password was incorrect")))) {
return true;
}
return false;
}
private FsPermission loadFromPath(Path p, char[] password)
throws IOException, NoSuchAlgorithmException, CertificateException {
FileStatus s = fs.getFileStatus(p);
keyStore.load(fs.open(p), password);
return s.getPermission();
}
private Path constructNewPath(Path path) {
Path newPath = new Path(path.toString() + "_NEW");
return newPath;
}
private Path constructOldPath(Path path) {
Path oldPath = new Path(path.toString() + "_OLD");
return oldPath;
}
@Override
public KeyVersion getKeyVersion(String versionName) throws IOException {
readLock.lock();
@ -352,11 +498,22 @@ public class JavaKeyStoreProvider extends KeyProvider {
@Override
public void flush() throws IOException {
Path newPath = constructNewPath(path);
Path oldPath = constructOldPath(path);
writeLock.lock();
try {
if (!changed) {
return;
}
// Might exist if a backup has been restored etc.
if (fs.exists(newPath)) {
renameOrFail(newPath, new Path(newPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
}
if (fs.exists(oldPath)) {
renameOrFail(oldPath, new Path(oldPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
}
// put all of the updates into the keystore
for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
try {
@ -366,25 +523,77 @@ public class JavaKeyStoreProvider extends KeyProvider {
throw new IOException("Can't set metadata key " + entry.getKey(),e );
}
}
// Save old File first
boolean fileExisted = backupToOld(oldPath);
// write out the keystore
FSDataOutputStream out = FileSystem.create(fs, path, permissions);
// Write to _NEW path first :
try {
keyStore.store(out, password);
} catch (KeyStoreException e) {
throw new IOException("Can't store keystore " + this, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException("No such algorithm storing keystore " + this, e);
} catch (CertificateException e) {
throw new IOException("Certificate exception storing keystore " + this,
e);
writeToNew(newPath);
} catch (IOException ioe) {
// rename _OLD back to curent and throw Exception
revertFromOld(oldPath, fileExisted);
throw ioe;
}
out.close();
// Rename _NEW to CURRENT and delete _OLD
cleanupNewAndOld(newPath, oldPath);
changed = false;
} finally {
writeLock.unlock();
}
}
private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
// Rename _NEW to CURRENT
renameOrFail(newPath, path);
// Delete _OLD
if (fs.exists(oldPath)) {
fs.delete(oldPath, true);
}
}
private void writeToNew(Path newPath) throws IOException {
FSDataOutputStream out =
FileSystem.create(fs, newPath, permissions);
try {
keyStore.store(out, password);
} catch (KeyStoreException e) {
throw new IOException("Can't store keystore " + this, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException(
"No such algorithm storing keystore " + this, e);
} catch (CertificateException e) {
throw new IOException(
"Certificate exception storing keystore " + this, e);
}
out.close();
}
private void revertFromOld(Path oldPath, boolean fileExisted)
throws IOException {
if (fileExisted) {
renameOrFail(oldPath, path);
}
}
private boolean backupToOld(Path oldPath)
throws IOException {
boolean fileExisted = false;
if (fs.exists(path)) {
renameOrFail(path, oldPath);
fileExisted = true;
}
return fileExisted;
}
private void renameOrFail(Path src, Path dest)
throws IOException {
if (!fs.rename(src, dest)) {
throw new IOException("Rename unsuccessful : "
+ String.format("'%s' to '%s'", src, dest));
}
}
@Override
public String toString() {
return uri.toString();

View File

@ -512,7 +512,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
List<String> batch = new ArrayList<String>();
int batchLen = 0;
for (String name : keyNames) {
int additionalLen = KMSRESTConstants.KEY_OP.length() + 1 + name.length();
int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length();
batchLen += additionalLen;
// topping at 1500 to account for initial URL and encoded names
if (batchLen > 1500) {
@ -536,7 +536,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
for (String[] keySet : keySets) {
if (keyNames.length > 0) {
Map<String, Object> queryStr = new HashMap<String, Object>();
queryStr.put(KMSRESTConstants.KEY_OP, keySet);
queryStr.put(KMSRESTConstants.KEY, keySet);
URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
null, queryStr);
HttpURLConnection conn = createConnection(url, HTTP_GET);

View File

@ -37,7 +37,7 @@ public class KMSRESTConstants {
public static final String EEK_SUB_RESOURCE = "_eek";
public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
public static final String KEY_OP = "key";
public static final String KEY = "key";
public static final String EEK_OP = "eek_op";
public static final String EEK_GENERATE = "generate";
public static final String EEK_DECRYPT = "decrypt";

View File

@ -129,19 +129,6 @@ public class Path implements Comparable {
}
}
/** check URI parameter of Path constructor. */
private void checkPathArg(URI aUri) throws IllegalArgumentException {
// disallow construction of a Path from an empty URI
if (aUri == null) {
throw new IllegalArgumentException(
"Can not create a Path from a null URI");
}
if (aUri.toString().isEmpty()) {
throw new IllegalArgumentException(
"Can not create a Path from an empty URI");
}
}
/** Construct a path from a String. Path strings are URIs, but with
* unescaped elements and some additional normalization. */
public Path(String pathString) throws IllegalArgumentException {
@ -189,7 +176,6 @@ public class Path implements Comparable {
* Construct a path from a URI
*/
public Path(URI aUri) {
checkPathArg(aUri);
uri = aUri.normalize();
}

View File

@ -21,14 +21,18 @@ package org.apache.hadoop.metrics2.impl;
import java.util.Iterator;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsFilter;
import static org.apache.hadoop.metrics2.lib.Interns.*;
class MetricsCollectorImpl implements MetricsCollector,
@InterfaceAudience.Private
@VisibleForTesting
public class MetricsCollectorImpl implements MetricsCollector,
Iterable<MetricsRecordBuilderImpl> {
private final List<MetricsRecordBuilderImpl> rbs = Lists.newArrayList();

View File

@ -89,6 +89,14 @@ public class MutableStat extends MutableMetric {
this(name, description, sampleName, valueName, false);
}
/**
* Set whether to display the extended stats (stdev, min/max etc.) or not
* @param extended enable/disable displaying extended stats
*/
public synchronized void setExtended(boolean extended) {
this.extended = extended;
}
/**
* Add a number of samples and their sum to the running stat
* @param numSamples number of samples

View File

@ -0,0 +1,343 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* The <code>DelegationTokenAuthenticatedURL</code> is a
* {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
* functionality.
* <p/>
* The authentication mechanisms supported by default are Hadoop Simple
* authentication (also known as pseudo authentication) and Kerberos SPNEGO
* authentication.
* <p/>
* Additional authentication mechanisms can be supported via {@link
* DelegationTokenAuthenticator} implementations.
* <p/>
* The default {@link DelegationTokenAuthenticator} is the {@link
* KerberosDelegationTokenAuthenticator} class which supports
* automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
* the {@link PseudoDelegationTokenAuthenticator} class.
* <p/>
* <code>AuthenticatedURL</code> instances are not thread-safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
/**
* Constant used in URL's query string to perform a proxy user request, the
* value of the <code>DO_AS</code> parameter is the user the request will be
* done on behalf of.
*/
static final String DO_AS = "doAs";
/**
* Client side authentication token that handles Delegation Tokens.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static class Token extends AuthenticatedURL.Token {
private
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
delegationToken;
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken() {
return delegationToken;
}
}
private static Class<? extends DelegationTokenAuthenticator>
DEFAULT_AUTHENTICATOR = KerberosDelegationTokenAuthenticator.class;
/**
* Sets the default {@link DelegationTokenAuthenticator} class to use when an
* {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
*
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @param authenticator the authenticator class to use as default.
*/
public static void setDefaultDelegationTokenAuthenticator(
Class<? extends DelegationTokenAuthenticator> authenticator) {
DEFAULT_AUTHENTICATOR = authenticator;
}
/**
* Returns the default {@link DelegationTokenAuthenticator} class to use when
* an {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
* <p/>
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @return the delegation token authenticator class to use as default.
*/
public static Class<? extends DelegationTokenAuthenticator>
getDefaultDelegationTokenAuthenticator() {
return DEFAULT_AUTHENTICATOR;
}
private static DelegationTokenAuthenticator
obtainDelegationTokenAuthenticator(DelegationTokenAuthenticator dta) {
try {
return (dta != null) ? dta : DEFAULT_AUTHENTICATOR.newInstance();
} catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
* An instance of the default {@link DelegationTokenAuthenticator} will be
* used.
*/
public DelegationTokenAuthenticatedURL() {
this(null, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator) {
this(authenticator, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code> using the default
* {@link DelegationTokenAuthenticator} class.
*
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
ConnectionConfigurator connConfigurator) {
this(null, connConfigurator);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator,
ConnectionConfigurator connConfigurator) {
super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
}
/**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
* it contains a Delegation Token, otherwise use the configured
* {@link DelegationTokenAuthenticator} to authenticate the connection.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
@Override
public HttpURLConnection openConnection(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
return (token instanceof Token) ? openConnection(url, (Token) token)
: super.openConnection(url ,token);
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token)
throws IOException, AuthenticationException {
return openConnection(url, token, null);
}
private URL augmentURL(URL url, Map<String, String> params)
throws IOException {
if (params != null && params.size() > 0) {
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> param : params.entrySet()) {
sb.append(separator).append(param.getKey()).append("=").append(
param.getValue());
separator = "&";
}
url = new URL(sb.toString());
}
return url;
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
* the request will be done on behalf of the specified <code>doAs</code> user.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @param doAs user to do the the request on behalf of, if NULL the request is
* as self.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
// delegation token
Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
if (!creds.getAllTokens().isEmpty()) {
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
url.getPort());
Text service = SecurityUtil.buildTokenService(serviceAddr);
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
creds.getToken(service);
if (dt != null) {
extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
dt.encodeToUrlString());
}
}
// proxyuser
if (doAs != null) {
extraParams.put(DO_AS, URLEncoder.encode(doAs, "UTF-8"));
}
url = augmentURL(url, extraParams);
return super.openConnection(url, token);
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @return a delegation token.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken(URL url, Token token, String renewer)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
try {
token.delegationToken =
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
getDelegationToken(url, token, renewer);
return token.delegationToken;
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url, Token token)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
return ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
renewDelegationToken(url, token, token.delegationToken);
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url, Token token)
throws IOException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
cancelDelegationToken(url, token, token.delegationToken);
} finally {
token.delegationToken = null;
}
}
}

View File

@ -0,0 +1,274 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.Charset;
import java.security.Principal;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* The <code>DelegationTokenAuthenticationFilter</code> filter is a
* {@link AuthenticationFilter} with Hadoop Delegation Token support.
* <p/>
* By default it uses it own instance of the {@link
* AbstractDelegationTokenSecretManager}. For situations where an external
* <code>AbstractDelegationTokenSecretManager</code> is required (i.e. one that
* shares the secret with <code>AbstractDelegationTokenSecretManager</code>
* instance running in other services), the external
* <code>AbstractDelegationTokenSecretManager</code> must be set as an
* attribute in the {@link ServletContext} of the web application using the
* {@link #DELEGATION_TOKEN_SECRET_MANAGER_ATTR} attribute name (
* 'hadoop.http.delegation-token-secret-manager').
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenAuthenticationFilter
extends AuthenticationFilter {
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String ERROR_EXCEPTION_JSON = "exception";
private static final String ERROR_MESSAGE_JSON = "message";
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*/
public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR =
"hadoop.http.delegation-token-secret-manager";
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private static final ThreadLocal<UserGroupInformation> UGI_TL =
new ThreadLocal<UserGroupInformation>();
public static final String PROXYUSER_PREFIX = "proxyuser";
private SaslRpcServer.AuthMethod handlerAuthMethod;
/**
* It delegates to
* {@link AuthenticationFilter#getConfiguration(String, FilterConfig)} and
* then overrides the {@link AuthenticationHandler} to use if authentication
* type is set to <code>simple</code> or <code>kerberos</code> in order to use
* the corresponding implementation with delegation token support.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
* @return hadoop-auth de-prefixed configuration for the filter and handler.
*/
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties props = super.getConfiguration(configPrefix, filterConfig);
String authType = props.getProperty(AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
return props;
}
/**
* Returns the proxyuser configuration. All returned properties must start
* with <code>proxyuser.</code>'
* <p/>
* Subclasses may override this method if the proxyuser configuration is
* read from other place than the filter init parameters.
*
* @param filterConfig filter configuration object
* @return the proxyuser configuration properties.
* @throws ServletException thrown if the configuration could not be created.
*/
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig)
throws ServletException {
// this filter class gets the configuration from the filter configs, we are
// creating an empty configuration and injecting the proxyuser settings in
// it. In the initialization of the filter, the returned configuration is
// passed to the ProxyUsers which only looks for 'proxyusers.' properties.
Configuration conf = new Configuration(false);
Enumeration<?> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(PROXYUSER_PREFIX + ".")) {
String value = filterConfig.getInitParameter(name);
conf.set(name, value);
}
}
return conf;
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
super.init(filterConfig);
AuthenticationHandler handler = getAuthenticationHandler();
AbstractDelegationTokenSecretManager dtSecretManager =
(AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
if (dtSecretManager != null && handler
instanceof DelegationTokenAuthenticationHandler) {
DelegationTokenAuthenticationHandler dtHandler =
(DelegationTokenAuthenticationHandler) getAuthenticationHandler();
dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager);
}
if (handler instanceof PseudoAuthenticationHandler ||
handler instanceof PseudoDelegationTokenAuthenticationHandler) {
setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE);
}
if (handler instanceof KerberosAuthenticationHandler ||
handler instanceof KerberosDelegationTokenAuthenticationHandler) {
setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS);
}
// proxyuser configuration
Configuration conf = getProxyuserConfiguration(filterConfig);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
}
protected void setHandlerAuthMethod(SaslRpcServer.AuthMethod authMethod) {
this.handlerAuthMethod = authMethod;
}
@VisibleForTesting
static String getDoAs(HttpServletRequest request) {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (DelegationTokenAuthenticatedURL.DO_AS.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
static UserGroupInformation getHttpUserGroupInformationInContext() {
return UGI_TL.get();
}
@Override
protected void doFilter(FilterChain filterChain, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
boolean requestCompleted = false;
UserGroupInformation ugi = null;
AuthenticationToken authToken = (AuthenticationToken)
request.getUserPrincipal();
if (authToken != null && authToken != AuthenticationToken.ANONYMOUS) {
// if the request was authenticated because of a delegation token,
// then we ignore proxyuser (this is the same as the RPC behavior).
ugi = (UserGroupInformation) request.getAttribute(
DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE);
if (ugi == null) {
String realUser = request.getUserPrincipal().getName();
ugi = UserGroupInformation.createRemoteUser(realUser,
handlerAuthMethod);
String doAsUser = getDoAs(request);
if (doAsUser != null) {
ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
try {
ProxyUsers.authorize(ugi, request.getRemoteHost());
} catch (AuthorizationException ex) {
String msg = String.format(
"User '%s' from host '%s' not allowed to impersonate user '%s'",
realUser, request.getRemoteHost(), doAsUser);
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
response.setContentType(APPLICATION_JSON_MIME);
Map<String, String> json = new HashMap<String, String>();
json.put(ERROR_EXCEPTION_JSON,
AuthorizationException.class.getName());
json.put(ERROR_MESSAGE_JSON, msg);
Writer writer = response.getWriter();
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writeValue(writer, json);
requestCompleted = true;
}
}
}
UGI_TL.set(ugi);
}
if (!requestCompleted) {
final UserGroupInformation ugiF = ugi;
try {
request = new HttpServletRequestWrapper(request) {
@Override
public String getAuthType() {
return (ugiF != null) ? handlerAuthMethod.toString() : null;
}
@Override
public String getRemoteUser() {
return (ugiF != null) ? ugiF.getShortUserName() : null;
}
@Override
public Principal getUserPrincipal() {
return (ugiF != null) ? new Principal() {
@Override
public String getName() {
return ugiF.getUserName();
}
} : null;
}
};
super.doFilter(filterChain, request, response);
} finally {
UGI_TL.remove();
}
}
}
}

View File

@ -0,0 +1,359 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the wrapped {@link AuthenticationHandler} configuration
* properties, this handler supports the following properties prefixed
* with the type of the wrapped <code>AuthenticationHandler</code>:
* <ul>
* <li>delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticationHandler
implements AuthenticationHandler {
protected static final String TYPE_POSTFIX = "-dt";
public static final String PREFIX = "delegation-token.";
public static final String TOKEN_KIND = PREFIX + "token-kind.sec";
public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String MAX_LIFETIME = PREFIX + "max-lifetime.sec";
public static final long MAX_LIFETIME_DEFAULT = 7 * 24 * 60 * 60;
public static final String RENEW_INTERVAL = PREFIX + "renew-interval.sec";
public static final long RENEW_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String REMOVAL_SCAN_INTERVAL = PREFIX +
"removal-scan-interval.sec";
public static final long REMOVAL_SCAN_INTERVAL_DEFAULT = 60 * 60;
private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<String>();
static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
"hadoop.security.delegation-token.ugi";
static {
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
private AuthenticationHandler authHandler;
private DelegationTokenManager tokenManager;
private String authType;
public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
authHandler = handler;
authType = handler.getType();
}
@VisibleForTesting
DelegationTokenManager getTokenManager() {
return tokenManager;
}
@Override
public void init(Properties config) throws ServletException {
authHandler.init(config);
initTokenManager(config);
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
tokenManager.setExternalDelegationTokenSecretManager(secretManager);
}
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
String configPrefix = authHandler.getType() + ".";
Configuration conf = new Configuration(false);
for (Map.Entry entry : config.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
String tokenKind = conf.get(TOKEN_KIND);
if (tokenKind == null) {
throw new IllegalArgumentException(
"The configuration does not define the token kind");
}
tokenKind = tokenKind.trim();
long updateInterval = conf.getLong(configPrefix + UPDATE_INTERVAL,
UPDATE_INTERVAL_DEFAULT);
long maxLifeTime = conf.getLong(configPrefix + MAX_LIFETIME,
MAX_LIFETIME_DEFAULT);
long renewInterval = conf.getLong(configPrefix + RENEW_INTERVAL,
RENEW_INTERVAL_DEFAULT);
long removalScanInterval = conf.getLong(
configPrefix + REMOVAL_SCAN_INTERVAL, REMOVAL_SCAN_INTERVAL_DEFAULT);
tokenManager = new DelegationTokenManager(new Text(tokenKind),
updateInterval * 1000, maxLifeTime * 1000, renewInterval * 1000,
removalScanInterval * 1000);
tokenManager.init();
}
@Override
public void destroy() {
tokenManager.destroy();
authHandler.destroy();
}
@Override
public String getType() {
return authType;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.OP_PARAM);
op = (op != null) ? op.toUpperCase() : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp =
KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
boolean doManagement;
if (dtOp.requiresKerberosCredentials() && token == null) {
token = authenticate(request, response);
if (token == null) {
requestContinues = false;
doManagement = false;
} else {
doManagement = true;
}
} else {
doManagement = true;
}
if (doManagement) {
UserGroupInformation requestUgi = (token != null)
? UserGroupInformation.createRemoteUser(token.getUserName())
: null;
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String renewer = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
try {
Token<?> dToken = tokenManager.createToken(requestUgi, renewer);
map = delegationTokenToJSON(dToken);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
break;
case RENEWDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String tokenToRenew = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToRenew == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
try {
dt.decodeFromUrlString(tokenToRenew);
long expirationTime = tokenManager.renewToken(dt,
requestUgi.getShortUserName());
map = new HashMap();
map.put("long", expirationTime);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
break;
case CANCELDELEGATIONTOKEN:
String tokenToCancel = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToCancel == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
try {
dt.decodeFromUrlString(tokenToCancel);
tokenManager.cancelToken(dt, (requestUgi != null)
? requestUgi.getShortUserName() : null);
} catch (IOException ex) {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
"Invalid delegation token, cannot cancel");
requestContinues = false;
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writeValue(writer, map);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be " +
"[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(
KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(delegationParam);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
request.setAttribute(DELEGATION_TOKEN_UGI_ATTRIBUTE, ugi);
} catch (Throwable ex) {
throw new AuthenticationException("Could not verify DelegationToken, " +
ex.toString(), ex);
}
} else {
token = authHandler.authenticate(request, response);
}
return token;
}
}

View File

@ -0,0 +1,250 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* {@link Authenticator} wrapper that enhances an {@link Authenticator} with
* Delegation Token support.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticator implements Authenticator {
private static Logger LOG =
LoggerFactory.getLogger(DelegationTokenAuthenticator.class);
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String OP_PARAM = "op";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
private Authenticator authenticator;
public DelegationTokenAuthenticator(Authenticator authenticator) {
this.authenticator = authenticator;
}
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
authenticator.setConnectionConfigurator(configurator);
}
private boolean hasDelegationToken(URL url) {
String queryStr = url.getQuery();
return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
authenticator.authenticate(url, token);
}
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public Token<AbstractDelegationTokenIdentifier> getDelegationToken(URL url,
AuthenticatedURL.Token token, String renewer)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.GETDELEGATIONTOKEN, renewer, null, true);
json = (Map) json.get(DELEGATION_TOKEN_JSON);
String tokenStr = (String) json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
InetSocketAddress service = new InetSocketAddress(url.getHost(),
url.getPort());
SecurityUtil.setTokenService(dToken, service);
return dToken;
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.RENEWDELEGATIONTOKEN, null, dToken, true);
return (Long) json.get(RENEW_DELEGATION_TOKEN_JSON);
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException {
try {
doDelegationTokenOperation(url, token,
DelegationTokenOperation.CANCELDELEGATIONTOKEN, null, dToken, false);
} catch (AuthenticationException ex) {
throw new IOException("This should not happen: " + ex.getMessage(), ex);
}
}
private Map doDelegationTokenOperation(URL url,
AuthenticatedURL.Token token, DelegationTokenOperation operation,
String renewer, Token<?> dToken, boolean hasResponse)
throws IOException, AuthenticationException {
Map ret = null;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, operation.toString());
if (renewer != null) {
params.put(RENEWER_PARAM, renewer);
}
if (dToken != null) {
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
}
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(separator).append(entry.getKey()).append("=").
append(URLEncoder.encode(entry.getValue(), "UTF8"));
separator = "&";
}
url = new URL(sb.toString());
AuthenticatedURL aUrl = new AuthenticatedURL(this);
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(operation.getHttpMethod());
validateResponse(conn, HttpURLConnection.HTTP_OK);
if (hasResponse) {
String contentType = conn.getHeaderField(CONTENT_TYPE);
contentType = (contentType != null) ? contentType.toLowerCase()
: null;
if (contentType != null &&
contentType.contains(APPLICATION_JSON_MIME)) {
try {
ObjectMapper mapper = new ObjectMapper();
ret = mapper.readValue(conn.getInputStream(), Map.class);
} catch (Exception ex) {
throw new AuthenticationException(String.format(
"'%s' did not handle the '%s' delegation token operation: %s",
url.getAuthority(), operation, ex.getMessage()), ex);
}
} else {
throw new AuthenticationException(String.format("'%s' did not " +
"respond with JSON to the '%s' delegation token operation",
url.getAuthority(), operation));
}
}
return ret;
}
@SuppressWarnings("unchecked")
private static void validateResponse(HttpURLConnection conn, int expected)
throws IOException {
int status = conn.getResponseCode();
if (status != expected) {
try {
conn.getInputStream().close();
} catch (IOException ex) {
//NOP
}
String msg = String.format("HTTP status, expected [%d], got [%d]: %s",
expected, status, conn.getResponseMessage());
LOG.debug(msg);
throw new IOException(msg);
}
}
}

View File

@ -15,21 +15,24 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/**
* HttpFS <code>DelegationTokenIdentifier</code> implementation.
* Concrete delegation token identifier used by {@link DelegationTokenManager},
* {@link KerberosDelegationTokenAuthenticationHandler} and
* {@link DelegationTokenAuthenticationFilter}.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
extends AbstractDelegationTokenIdentifier {
private Text kind = WebHdfsFileSystem.TOKEN_KIND;
private Text kind;
public DelegationTokenIdentifier(Text kind) {
this.kind = kind;
@ -50,8 +53,8 @@ public class DelegationTokenIdentifier
}
/**
* Returns the kind, <code>TOKEN_KIND</code>.
* @return returns <code>TOKEN_KIND</code>.
* Return the delegation token kind
* @return returns the delegation token kind
*/
@Override
public Text getKind() {

View File

@ -0,0 +1,153 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Delegation Token Manager used by the
* {@link KerberosDelegationTokenAuthenticationHandler}.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class DelegationTokenManager {
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
public DelegationTokenSecretManager(Text tokenKind,
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
}
private AbstractDelegationTokenSecretManager secretManager = null;
private boolean managedSecretManager;
private Text tokenKind;
public DelegationTokenManager(Text tokenKind,
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
this.secretManager = new DelegationTokenSecretManager(tokenKind,
delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
managedSecretManager = true;
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
this.secretManager.stopThreads();
this.secretManager = secretManager;
this.tokenKind = secretManager.createIdentifier().getKind();
managedSecretManager = false;
}
public void init() {
if (managedSecretManager) {
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new RuntimeException("Could not start " +
secretManager.getClass() + ": " + ex.toString(), ex);
}
}
}
public void destroy() {
if (managedSecretManager) {
secretManager.stopThreads();
}
}
@SuppressWarnings("unchecked")
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer) {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier tokenIdentifier = new DelegationTokenIdentifier(
tokenKind, owner, new Text(renewer), realUser);
return new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
}
@SuppressWarnings("unchecked")
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws IOException {
return secretManager.renewToken(token, renewer);
}
@SuppressWarnings("unchecked")
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler) throws IOException {
canceler = (canceler != null) ? canceler :
verifyToken(token).getShortUserName();
secretManager.cancelToken(token, canceler);
}
@SuppressWarnings("unchecked")
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier>
token) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
id.readFields(dis);
dis.close();
secretManager.verifyToken(id, token.getPassword());
return id.getUser();
}
}

View File

@ -15,33 +15,29 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException;
import javax.servlet.http.HttpServletRequest;
/**
* A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's
* <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user).
* Util class that returns the remote {@link UserGroupInformation} in scope
* for the HTTP request.
*/
@InterfaceAudience.Private
public class HttpFSPseudoAuthenticator extends PseudoAuthenticator {
public class HttpUserGroupInformation {
/**
* Return the client user name.
* Returns the remote {@link UserGroupInformation} in context for the current
* HTTP request, taking into account proxy user requests.
*
* @return the client user name.
* @return the remote {@link UserGroupInformation}, <code>NULL</code> if none.
*/
@Override
protected String getUserName() {
try {
return UserGroupInformation.getLoginUser().getUserName();
} catch (IOException ex) {
throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
}
public static UserGroupInformation get() {
return DelegationTokenAuthenticationFilter.
getHttpUserGroupInformationInContext();
}
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>kerberos.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>kerberos.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>kerberos.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public KerberosDelegationTokenAuthenticationHandler() {
super(new KerberosAuthenticationHandler(KerberosAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
/**
* The <code>KerberosDelegationTokenAuthenticator</code> provides support for
* Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
* Token operations.
* <p/>
* It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
* endpoint does not trigger a SPNEGO authentication
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public KerberosDelegationTokenAuthenticator() {
super(new KerberosAuthenticator() {
@Override
protected Authenticator getFallBackAuthenticator() {
return new PseudoDelegationTokenAuthenticator();
}
});
}
}

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>simple.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>simple.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>simple.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>simple.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>simple.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public PseudoDelegationTokenAuthenticationHandler() {
super(new PseudoAuthenticationHandler(PseudoAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException;
/**
* The <code>PseudoDelegationTokenAuthenticator</code> provides support for
* Hadoop's pseudo authentication mechanism that accepts
* the user name specified as a query string parameter and support for Hadoop
* Delegation Token operations.
* <p/>
* This mimics the model of Hadoop Simple authentication trusting the
* {@link UserGroupInformation#getCurrentUser()} value.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public PseudoDelegationTokenAuthenticator() {
super(new PseudoAuthenticator() {
@Override
protected String getUserName() {
try {
return UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
});
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
/**
* Servlet utility methods.
*/
@InterfaceAudience.Private
class ServletUtils {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
/**
* Extract a query string parameter without triggering http parameters
* processing by the servlet container.
*
* @param request the request
* @param name the parameter to get the value.
* @return the parameter value, or <code>NULL</code> if the parameter is not
* defined.
* @throws IOException thrown if there was an error parsing the query string.
*/
public static String getParameter(HttpServletRequest request, String name)
throws IOException {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (name.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
}

View File

@ -178,6 +178,14 @@ public class TestConfiguration extends TestCase {
// check that expansion also occurs for getInt()
assertTrue(conf.getInt("intvar", -1) == 42);
assertTrue(conf.getInt("my.int", -1) == 42);
Map<String, String> results = conf.getValByRegex("^my.*file$");
assertTrue(results.keySet().contains("my.relfile"));
assertTrue(results.keySet().contains("my.fullfile"));
assertTrue(results.keySet().contains("my.file"));
assertEquals(-1, results.get("my.relfile").indexOf("${"));
assertEquals(-1, results.get("my.fullfile").indexOf("${"));
assertEquals(-1, results.get("my.file").indexOf("${"));
}
public void testFinalParam() throws IOException {

View File

@ -220,11 +220,76 @@ public class TestKeyProviderFactory {
assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile());
// Corrupt file and Check if JKS can reload from _OLD file
File oldFile = new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted", !oldFile.exists());
verifyAfterReload(file, provider);
assertTrue(!oldFile.exists());
// _NEW and current file should not exist together
File newFile = new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
} catch (Exception e) {
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
// Load from _NEW file
file.renameTo(newFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _NEW file !!");
// Ignore
}
verifyAfterReload(file, provider);
// _NEW exists but corrupt.. must load from _OLD
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _OLD file !!");
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file, provider);
// check permission retention after explicit change
fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path);
}
private void verifyAfterReload(File file, KeyProvider provider)
throws IOException {
List<String> existingKeys = provider.getKeys();
assertTrue(existingKeys.contains("key4"));
assertTrue(existingKeys.contains("key3"));
assertTrue(file.exists());
}
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777

View File

@ -26,13 +26,11 @@ import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import com.google.common.base.Joiner;
import junit.framework.TestCase;
import static org.junit.Assert.fail;
public class TestPath extends TestCase {
/**
@ -307,28 +305,6 @@ public class TestPath extends TestCase {
// if the child uri is absolute path
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
// empty URI
URI uri3 = new URI("");
assertEquals("", uri3.toString());
try {
path = new Path(uri3);
fail("Expected exception for empty URI");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("Can not create a Path"
+ " from an empty URI", e);
}
// null URI
uri3 = null;
try {
path = new Path(uri3);
fail("Expected exception for null URI");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("Can not create a Path"
+ " from a null URI", e);
}
}
/** Test URIs created from Path objects */

View File

@ -0,0 +1,326 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Map;
import java.util.Properties;
public class TestDelegationTokenAuthenticationHandlerWithMocks {
public static class MockDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public MockDelegationTokenAuthenticationHandler() {
super(new AuthenticationHandler() {
@Override
public String getType() {
return "T";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "mock");
return null;
}
});
}
}
private DelegationTokenAuthenticationHandler handler;
@Before
public void setUp() throws Exception {
Properties conf = new Properties();
conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
handler = new MockDelegationTokenAuthenticationHandler();
handler.initTokenManager(conf);
}
@After
public void cleanUp() {
handler.destroy();
}
@Test
public void testManagementOperations() throws Exception {
testNonManagementOperation();
testManagementOperationErrors();
testGetToken(null, new Text("foo"));
testGetToken("bar", new Text("foo"));
testCancelToken();
testRenewToken();
}
private void testNonManagementOperation() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn("CREATE");
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" +
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.toString()
);
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).thenReturn(
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.getHttpMethod()
);
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(
Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock"));
}
private void testGetToken(String renewer, Text expectedTokenKind)
throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(
new StringWriter()));
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.RENEWER_PARAM + "=" + renewer);
Mockito.reset(response);
Mockito.reset(token);
Mockito.when(token.getUserName()).thenReturn("user");
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = DelegationTokenAuthenticator.
DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
ObjectMapper jsonMapper = new ObjectMapper();
Map json = jsonMapper.readValue(responseOutput, Map.class);
json = (Map) json.get(tokenLabel);
String tokenStr;
tokenStr = (String) json.get(DelegationTokenAuthenticator.
DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
handler.getTokenManager().verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
private void testCancelToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
token.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
handler.getTokenManager().verifyToken(token);
Assert.fail();
} catch (SecretManager.InvalidToken ex) {
//NOP
} catch (Throwable ex) {
Assert.fail();
}
}
private void testRenewToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(Mockito.eq(
KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock")
);
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
handler.getTokenManager().verifyToken(dToken);
}
@Test
public void testAuthenticate() throws Exception {
testValidDelegationToken();
testInvalidDelegationToken();
}
private void testValidDelegationToken() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=" +
dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().
getShortUserName(), token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(handler.getType(),
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
public class TestDelegationTokenManager {
private static final long DAY_IN_SECS = 86400;
@Test
public void testDTManager() throws Exception {
DelegationTokenManager tm = new DelegationTokenManager(new Text("foo"),
DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS);
tm.init();
Token<DelegationTokenIdentifier> token =
tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (IOException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
tm.destroy();
}
}

View File

@ -0,0 +1,869 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.ServletHolder;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.servlet.Filter;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.URL;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
public class TestWebDelegationToken {
private static final String OK_USER = "ok-user";
private static final String FAIL_USER = "fail-user";
private static final String FOO_USER = "foo";
private Server jetty;
public static class DummyAuthenticationHandler
implements AuthenticationHandler {
@Override
public String getType() {
return "dummy";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
if (request.getParameter("authenticated") != null) {
token = new AuthenticationToken(request.getParameter("authenticated"),
"U", "test");
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
}
return token;
}
}
public static class DummyDelegationTokenAuthenticationHandler extends
DelegationTokenAuthenticationHandler {
public DummyDelegationTokenAuthenticationHandler() {
super(new DummyAuthenticationHandler());
}
@Override
public void init(Properties config) throws ServletException {
Properties conf = new Properties(config);
conf.setProperty(TOKEN_KIND, "token-kind");
initTokenManager(conf);
}
}
public static class AFilter extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
DummyDelegationTokenAuthenticationHandler.class.getName());
return conf;
}
}
public static class PingServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
Writer writer = resp.getWriter();
writer.write("ping: ");
IOUtils.copy(req.getReader(), writer);
resp.setStatus(HttpServletResponse.SC_OK);
}
}
protected Server createJettyServer() {
try {
InetAddress localhost = InetAddress.getLocalHost();
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
jetty = new Server(0);
jetty.getConnectors()[0].setHost("localhost");
jetty.getConnectors()[0].setPort(port);
return jetty;
} catch (Exception ex) {
throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
ex);
}
}
protected String getJettyURL() {
Connector c = jetty.getConnectors()[0];
return "http://" + c.getHost() + ":" + c.getPort();
}
@Before
public void setUp() throws Exception {
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
jetty = createJettyServer();
}
@After
public void cleanUp() throws Exception {
jetty.stop();
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
}
protected Server getJetty() {
return jetty;
}
@Test
public void testRawHttpCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
// unauthenticated access to URL
HttpURLConnection conn = (HttpURLConnection) nonAuthURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to URL
conn = (HttpURLConnection) authURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// unauthenticated access to get delegation token
URL url = new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to get delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ObjectMapper mapper = new ObjectMapper();
Map map = mapper.readValue(conn.getInputStream(), Map.class);
String dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// delegation token access to URL
url = new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// delegation token and authenticated access to URL
url = new URL(authURL.toExternalForm() + "&delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, unauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// renewew delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, authenticated access to URL, not renewer
url = new URL(getJettyURL() +
"/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// cancel delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// cancel canceled delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
conn.getResponseCode());
// get new delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
mapper = new ObjectMapper();
map = mapper.readValue(conn.getInputStream(), Map.class);
dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// cancel delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
} finally {
jetty.stop();
}
}
@Test
public void testDelegationTokenAuthenticatorCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
try {
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
aUrl.renewDelegationToken(authURL, token);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
try {
aUrl.renewDelegationToken(authURL2, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
aUrl.cancelDelegationToken(authURL, token);
aUrl.getDelegationToken(authURL, token, FOO_USER);
aUrl.cancelDelegationToken(nonAuthURL, token);
aUrl.getDelegationToken(authURL, token, FOO_USER);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
} finally {
jetty.stop();
}
}
private static class DummyDelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
public DummyDelegationTokenSecretManager() {
super(10000, 10000, 10000, 10000);
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(new Text("fooKind"));
}
}
@Test
public void testExternalDelegationTokenSecretManager() throws Exception {
DummyDelegationTokenSecretManager secretMgr
= new DummyDelegationTokenSecretManager();
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
secretMgr.startThreads();
context.setAttribute(DelegationTokenAuthenticationFilter.
DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretMgr);
jetty.start();
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
aUrl.getDelegationToken(authURL, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("fooKind"),
token.getDelegationToken().getKind());
} finally {
jetty.stop();
secretMgr.stopThreads();
}
}
public static class NoDTFilter extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class NoDTHandlerDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class UserServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(req.getUserPrincipal().getName());
}
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTFilter()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTFilter.class);
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTHandler()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTHandlerDTAFilter.class);
}
// we are, also, implicitly testing KerberosDelegationTokenAuthenticator
// fallback here
private void testDelegationTokenAuthenticatedURLWithNoDT(
Class<? extends Filter> filterClass) throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(filterClass), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
try {
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains(
"delegation token operation"));
}
return null;
}
});
} finally {
jetty.stop();
}
}
public static class PseudoDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
@Override
protected org.apache.hadoop.conf.Configuration getProxyuserConfiguration(
FilterConfig filterConfig) throws ServletException {
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration(false);
conf.set("proxyuser.foo.users", OK_USER);
conf.set("proxyuser.foo.hosts", "localhost");
return conf;
}
}
@Test
public void testFallbackToPseudoDelegationTokenAuthenticator()
throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
return null;
}
});
} finally {
jetty.stop();
}
}
public static class KDTAFilter extends DelegationTokenAuthenticationFilter {
static String keytabFile;
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(KerberosAuthenticationHandler.KEYTAB, keytabFile);
conf.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
"HTTP/localhost");
conf.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
}
private static class KerberosConfiguration extends Configuration {
private String principal;
private String keytab;
public KerberosConfiguration(String principal, String keytab) {
this.principal = principal;
this.keytab = keytab;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("principal", principal);
options.put("keyTab", keytab);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
public static <T> T doAsKerberosUser(String principal, String keytab,
final Callable<T> callable) throws Exception {
LoginContext loginContext = null;
try {
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
new KerberosConfiguration(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
@Test
public void testKerberosDelegationTokenAuthenticator() throws Exception {
// setting hadoop security to kerberos
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = new File("target/" + UUID.randomUUID().toString());
Assert.assertTrue(testDir.mkdirs());
MiniKdc kdc = new MiniKdc(MiniKdc.createConf(), testDir);
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(KDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
kdc.start();
File keytabFile = new File(testDir, "test.keytab");
kdc.createPrincipal(keytabFile, "client", "HTTP/localhost");
KDTAFilter.keytabFile = keytabFile.getAbsolutePath();
jetty.start();
final DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
final URL url = new URL(getJettyURL() + "/foo/bar");
try {
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains("GSSException"));
}
doAsKerberosUser("client", keytabFile.getAbsolutePath(),
new Callable<Void>() {
@Override
public Void call() throws Exception {
aUrl.getDelegationToken(url, token, "client");
Assert.assertNotNull(token.getDelegationToken());
aUrl.renewDelegationToken(url, token);
Assert.assertNotNull(token.getDelegationToken());
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
try {
aUrl.renewDelegationToken(url, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(url, token, FOO_USER);
aUrl.cancelDelegationToken(url, token);
Assert.assertNull(token.getDelegationToken());
return null;
}
});
} finally {
jetty.stop();
kdc.stop();
}
}
@Test
public void testProxyUser() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// proxyuser using authentication handler authentication
HttpURLConnection conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
// unauthorized proxy user using authentication handler authentication
conn = aUrl.openConnection(url, token, FAIL_USER);
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// proxy using delegation token authentication
aUrl.getDelegationToken(url, token, FOO_USER);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
token = new DelegationTokenAuthenticatedURL.Token();
// requests using delegation token as auth do not honor doAs
conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
public static class UGIServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
UserGroupInformation ugi = HttpUserGroupInformation.get();
if (ugi != null) {
String ret = "remoteuser=" + req.getRemoteUser() + ":ugi=" +
ugi.getShortUserName();
if (ugi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY) {
ret = "realugi=" + ugi.getRealUser().getShortUserName() + ":" + ret;
}
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(ret);
} else {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
@Test
public void testHttpUGI() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UGIServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// user foo
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("remoteuser=" + FOO_USER+ ":ugi=" + FOO_USER,
ret.get(0));
// user ok-user via proxyuser foo
conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("realugi=" + FOO_USER +":remoteuser=" + OK_USER +
":ugi=" + OK_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
}

View File

@ -47,7 +47,6 @@ import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.Principal;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
@ -59,18 +58,13 @@ import java.util.Map;
@Path(KMSRESTConstants.SERVICE_VERSION)
@InterfaceAudience.Private
public class KMS {
public static final String CREATE_KEY = "CREATE_KEY";
public static final String DELETE_KEY = "DELETE_KEY";
public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
public static final String GET_KEYS = "GET_KEYS";
public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
public static final String GET_METADATA = "GET_METADATA";
public static final String GET_KEY_VERSION = "GET_KEY_VERSION";
public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
public static final String GENERATE_EEK = "GENERATE_EEK";
public static final String DECRYPT_EEK = "DECRYPT_EEK";
public static enum KMSOp {
CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION,
GET_KEYS, GET_KEYS_METADATA,
GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,
GENERATE_EEK, DECRYPT_EEK
}
private KeyProviderCryptoExtension provider;
private KMSAudit kmsAudit;
@ -91,22 +85,22 @@ public class KMS {
private static final String UNAUTHORIZED_MSG_WITH_KEY =
"User:{0} not allowed to do ''{1}'' on ''{2}''";
"User:%s not allowed to do '%s' on '%s'";
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
"User:{0} not allowed to do ''{1}''";
"User:%s not allowed to do '%s'";
private void assertAccess(KMSACLs.Type aclType, Principal principal,
String operation) throws AccessControlException {
KMSOp operation) throws AccessControlException {
assertAccess(aclType, principal, operation, null);
}
private void assertAccess(KMSACLs.Type aclType, Principal principal,
String operation, String key) throws AccessControlException {
KMSOp operation, String key) throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
KMSWebApp.getUnauthorizedCallsMeter().mark();
kmsAudit.unauthorized(principal, operation, key);
throw new AuthorizationException(MessageFormat.format(
throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY,
principal.getName(), operation, key));
@ -135,7 +129,7 @@ public class KMS {
Principal user = getPrincipal(securityContext);
String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
assertAccess(KMSACLs.Type.CREATE, user, CREATE_KEY, name);
assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
@ -146,7 +140,7 @@ public class KMS {
jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
CREATE_KEY + " with user provided material", name);
KMSOp.CREATE_KEY, name);
}
KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration());
@ -165,7 +159,7 @@ public class KMS {
provider.flush();
kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@ -186,12 +180,12 @@ public class KMS {
@PathParam("name") String name) throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
assertAccess(KMSACLs.Type.DELETE, user, DELETE_KEY, name);
assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
KMSClientProvider.checkNotEmpty(name, "name");
provider.deleteKey(name);
provider.flush();
kmsAudit.ok(user, DELETE_KEY, name, "");
kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
return Response.ok().build();
}
@ -205,13 +199,13 @@ public class KMS {
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
assertAccess(KMSACLs.Type.ROLLOVER, user, ROLL_NEW_VERSION, name);
assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
KMSClientProvider.checkNotEmpty(name, "name");
String material = (String)
jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
ROLL_NEW_VERSION + " with user provided material", name);
KMSOp.ROLL_NEW_VERSION, name);
}
KeyProvider.KeyVersion keyVersion = (material != null)
? provider.rollNewVersion(name, Base64.decodeBase64(material))
@ -219,7 +213,7 @@ public class KMS {
provider.flush();
kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
(material != null) + " NewVersion:" + keyVersion.getVersionName());
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@ -233,15 +227,15 @@ public class KMS {
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@Context SecurityContext securityContext,
@QueryParam(KMSRESTConstants.KEY_OP) List<String> keyNamesList)
@QueryParam(KMSRESTConstants.KEY) List<String> keyNamesList)
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA);
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
kmsAudit.ok(user, GET_KEYS_METADATA, "");
kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@ -252,9 +246,9 @@ public class KMS {
throws Exception {
KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext);
assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS);
assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
Object json = provider.getKeys();
kmsAudit.ok(user, GET_KEYS, "");
kmsAudit.ok(user, KMSOp.GET_KEYS, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@ -276,9 +270,9 @@ public class KMS {
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getAdminCallsMeter().mark();
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
kmsAudit.ok(user, GET_METADATA, name, "");
kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@ -292,9 +286,9 @@ public class KMS {
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
kmsAudit.ok(user, GET_CURRENT_KEY, name, "");
kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@ -308,9 +302,9 @@ public class KMS {
KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSWebApp.getKeyCallsMeter().mark();
KeyVersion keyVersion = provider.getKeyVersion(versionName);
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION);
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
if (keyVersion != null) {
kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), "");
kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
}
Object json = KMSServerJSONUtils.toJSON(keyVersion);
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
@ -334,7 +328,7 @@ public class KMS {
Object retJSON;
if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
assertAccess(KMSACLs.Type.GENERATE_EEK, user, GENERATE_EEK, name);
assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>();
@ -345,7 +339,7 @@ public class KMS {
} catch (Exception e) {
throw new IOException(e);
}
kmsAudit.ok(user, GENERATE_EEK, name, "");
kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
retJSON = new ArrayList();
for (EncryptedKeyVersion edek : retEdeks) {
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
@ -380,7 +374,7 @@ public class KMS {
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
Object retJSON;
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName);
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
byte[] iv = Base64.decodeBase64(ivStr);
KMSClientProvider.checkNotNull(encMaterialStr,
@ -391,7 +385,7 @@ public class KMS {
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
iv, KeyProviderCryptoExtension.EEK, encMaterial));
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
kmsAudit.ok(user, DECRYPT_EEK, keyName, "");
kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
} else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
@ -412,9 +406,9 @@ public class KMS {
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
kmsAudit.ok(user, GET_KEY_VERSIONS, name, "");
kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}

View File

@ -50,11 +50,11 @@ public class KMSAudit {
private final AtomicLong accessCount = new AtomicLong(-1);
private final String keyName;
private final String user;
private final String op;
private final KMS.KMSOp op;
private final String extraMsg;
private final long startTime = System.currentTimeMillis();
private AuditEvent(String keyName, String user, String op, String msg) {
private AuditEvent(String keyName, String user, KMS.KMSOp op, String msg) {
this.keyName = keyName;
this.user = user;
this.op = op;
@ -77,7 +77,7 @@ public class KMSAudit {
return user;
}
public String getOp() {
public KMS.KMSOp getOp() {
return op;
}
@ -90,8 +90,9 @@ public class KMSAudit {
OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
}
private static Set<String> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK
private static Set<KMS.KMSOp> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
KMS.KMSOp.GET_KEY_VERSION, KMS.KMSOp.GET_CURRENT_KEY,
KMS.KMSOp.DECRYPT_EEK, KMS.KMSOp.GENERATE_EEK
);
private Cache<String, AuditEvent> cache;
@ -137,10 +138,10 @@ public class KMSAudit {
event.getExtraMsg());
}
private void op(OpStatus opStatus, final String op, final String user,
private void op(OpStatus opStatus, final KMS.KMSOp op, final String user,
final String key, final String extraMsg) {
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
&& !Strings.isNullOrEmpty(op)
&& (op != null)
&& AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op);
if (opStatus == OpStatus.UNAUTHORIZED) {
@ -167,7 +168,7 @@ public class KMSAudit {
}
} else {
List<String> kvs = new LinkedList<String>();
if (!Strings.isNullOrEmpty(op)) {
if (op != null) {
kvs.add("op=" + op);
}
if (!Strings.isNullOrEmpty(key)) {
@ -185,16 +186,16 @@ public class KMSAudit {
}
}
public void ok(Principal user, String op, String key,
public void ok(Principal user, KMS.KMSOp op, String key,
String extraMsg) {
op(OpStatus.OK, op, user.getName(), key, extraMsg);
}
public void ok(Principal user, String op, String extraMsg) {
public void ok(Principal user, KMS.KMSOp op, String extraMsg) {
op(OpStatus.OK, op, user.getName(), null, extraMsg);
}
public void unauthorized(Principal user, String op, String key) {
public void unauthorized(Principal user, KMS.KMSOp op, String key) {
op(OpStatus.UNAUTHORIZED, op, user.getName(), key, "");
}
@ -211,7 +212,7 @@ public class KMSAudit {
+ " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
}
private static String createCacheKey(String user, String key, String op) {
private static String createCacheKey(String user, String key, KMS.KMSOp op) {
return user + "#" + key + "#" + op;
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import java.io.File;
@ -26,6 +27,7 @@ import java.net.URL;
/**
* Utility class to load KMS configuration files.
*/
@InterfaceAudience.Private
public class KMSConfiguration {
public static final String KMS_CONFIG_DIR = "kms.config.dir";

View File

@ -17,12 +17,15 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.jmx.JMXJsonServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
@InterfaceAudience.Private
public class KMSJMXServlet extends JMXJsonServlet {
@Override

View File

@ -23,6 +23,7 @@ import java.io.OutputStream;
import java.io.PrintStream;
import java.security.Principal;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator;
import org.junit.After;
@ -82,16 +83,16 @@ public class TestKMSAudit {
public void testAggregation() throws Exception {
Principal luser = Mockito.mock(Principal.class);
Mockito.when(luser.getName()).thenReturn("luser");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg");
kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DELETE_KEY, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.ROLL_NEW_VERSION, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500);
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500);
String out = getAndResetLogOutput();
System.out.println(out);
@ -110,15 +111,15 @@ public class TestKMSAudit {
public void testAggregationUnauth() throws Exception {
Principal luser = Mockito.mock(Principal.class);
Mockito.when(luser.getName()).thenReturn("luser");
kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2");
kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2");
Thread.sleep(1000);
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k3");
kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
Thread.sleep(2000);
String out = getAndResetLogOutput();
System.out.println(out);

View File

@ -39,12 +39,14 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
@ -67,7 +69,6 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@ -75,7 +76,6 @@ import java.security.PrivilegedExceptionAction;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
/**
* HttpFSServer implementation of the FileSystemAccess FileSystem.
@ -217,34 +217,15 @@ public class HttpFSFileSystem extends FileSystem
}
private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private DelegationTokenAuthenticatedURL authURL;
private DelegationTokenAuthenticatedURL.Token authToken =
new DelegationTokenAuthenticatedURL.Token();
private URI uri;
private InetSocketAddress httpFSAddr;
private Path workingDir;
private UserGroupInformation realUser;
private String doAs;
private Token<?> delegationToken;
//This method enables handling UGI doAs with SPNEGO, we have to
//fallback to the realuser who logged in with Kerberos credentials
private <T> T doAsRealUserIfNecessary(final Callable<T> callable)
throws IOException {
try {
if (realUser.getShortUserName().equals(doAs)) {
return callable.call();
} else {
return realUser.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
}
} catch (Exception ex) {
throw new IOException(ex.toString(), ex);
}
}
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the
@ -291,20 +272,26 @@ public class HttpFSFileSystem extends FileSystem
private HttpURLConnection getConnection(final String method,
Map<String, String> params, Map<String, List<String>> multiValuedParams,
Path path, boolean makeQualified) throws IOException {
if (!realUser.getShortUserName().equals(doAs)) {
params.put(DO_AS_PARAM, doAs);
}
HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
if (makeQualified) {
path = makeQualified(path);
}
final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() {
@Override
public HttpURLConnection call() throws Exception {
return getConnection(url, method);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<HttpURLConnection>() {
@Override
public HttpURLConnection run() throws Exception {
return getConnection(url, method);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
});
}
}
/**
@ -321,12 +308,8 @@ public class HttpFSFileSystem extends FileSystem
* @throws IOException thrown if an IO error occurrs.
*/
private HttpURLConnection getConnection(URL url, String method) throws IOException {
Class<? extends Authenticator> klass =
getConf().getClass("httpfs.authenticator.class",
HttpFSKerberosAuthenticator.class, Authenticator.class);
Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
try {
HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
HttpURLConnection conn = authURL.openConnection(url, authToken);
conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true);
@ -357,10 +340,17 @@ public class HttpFSFileSystem extends FileSystem
super.initialize(name, conf);
try {
uri = new URI(name.getScheme() + "://" + name.getAuthority());
httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
} catch (URISyntaxException ex) {
throw new IOException(ex);
}
Class<? extends DelegationTokenAuthenticator> klass =
getConf().getClass("httpfs.authenticator.class",
KerberosDelegationTokenAuthenticator.class,
DelegationTokenAuthenticator.class);
DelegationTokenAuthenticator authenticator =
ReflectionUtils.newInstance(klass, getConf());
authURL = new DelegationTokenAuthenticatedURL(authenticator);
}
@Override
@ -1059,38 +1049,57 @@ public class HttpFSFileSystem extends FileSystem
@Override
public Token<?> getDelegationToken(final String renewer)
throws IOException {
return doAsRealUserIfNecessary(new Callable<Token<?>>() {
@Override
public Token<?> call() throws Exception {
return HttpFSKerberosAuthenticator.
getDelegationToken(uri, httpFSAddr, authToken, renewer);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Token<?>>() {
@Override
public Token<?> run() throws Exception {
return authURL.getDelegationToken(uri.toURL(), authToken,
renewer);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
});
}
}
public long renewDelegationToken(final Token<?> token) throws IOException {
return doAsRealUserIfNecessary(new Callable<Long>() {
@Override
public Long call() throws Exception {
return HttpFSKerberosAuthenticator.
renewDelegationToken(uri, authToken, token);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
return authURL.renewDelegationToken(uri.toURL(), authToken);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
});
}
}
public void cancelDelegationToken(final Token<?> token) throws IOException {
HttpFSKerberosAuthenticator.
cancelDelegationToken(uri, authToken, token);
authURL.cancelDelegationToken(uri.toURL(), authToken);
}
@Override
public Token<?> getRenewToken() {
return delegationToken;
return null; //TODO : for renewer
}
@Override
@SuppressWarnings("unchecked")
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
delegationToken = token;
//TODO : for renewer
}
@Override

View File

@ -1,188 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.json.simple.JSONObject;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
/**
* A <code>KerberosAuthenticator</code> subclass that fallback to
* {@link HttpFSPseudoAuthenticator}.
*/
@InterfaceAudience.Private
public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
/**
* Returns the fallback authenticator if the server does not use
* Kerberos SPNEGO HTTP authentication.
*
* @return a {@link HttpFSPseudoAuthenticator} instance.
*/
@Override
protected Authenticator getFallBackAuthenticator() {
return new HttpFSPseudoAuthenticator();
}
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
public static void injectDelegationToken(Map<String, String> params,
Token<?> dtToken)
throws IOException {
if (dtToken != null) {
params.put(DELEGATION_PARAM, dtToken.encodeToUrlString());
}
}
private boolean hasDelegationToken(URL url) {
return url.getQuery().contains(DELEGATION_PARAM + "=");
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
super.authenticate(url, token);
}
}
public static final String OP_PARAM = "op";
public static Token<?> getDelegationToken(URI fsURI,
InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
String renewer) throws IOException {
DelegationTokenOperation op =
DelegationTokenOperation.GETDELEGATIONTOKEN;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, op.toString());
params.put(RENEWER_PARAM,renewer);
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(op.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
String tokenStr = (String)
json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
SecurityUtil.setTokenService(dToken, httpFSAddr);
return dToken;
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
public static long renewDelegationToken(URI fsURI,
AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM,
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(
DelegationTokenOperation.RENEWDELEGATIONTOKEN.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
return (Long)(json.get(RENEW_DELEGATION_TOKEN_JSON));
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
public static void cancelDelegationToken(URI fsURI,
AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM,
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(
DelegationTokenOperation.CANCELDELEGATIONTOKEN.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
}

View File

@ -20,7 +20,10 @@ package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
@ -32,7 +35,9 @@ import java.util.Properties;
* from HttpFSServer's server configuration.
*/
@InterfaceAudience.Private
public class HttpFSAuthenticationFilter extends AuthenticationFilter {
public class HttpFSAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
private static final String CONF_PREFIX = "httpfs.authentication.";
private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
@ -50,7 +55,8 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
* @return hadoop-auth configuration read from HttpFSServer's configuration.
*/
@Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) {
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException{
Properties props = new Properties();
Configuration conf = HttpFSServerWebApp.get().getConfig();
@ -64,11 +70,6 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
}
}
if (props.getProperty(AUTH_TYPE).equals("kerberos")) {
props.setProperty(AUTH_TYPE,
HttpFSKerberosAuthenticationHandler.class.getName());
}
String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);

View File

@ -1,230 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.token.Token;
import org.json.simple.JSONObject;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
/**
* Server side <code>AuthenticationHandler</code> that authenticates requests
* using the incoming delegation token as a 'delegation' query string parameter.
* <p/>
* If not delegation token is present in the request it delegates to the
* {@link KerberosAuthenticationHandler}
*/
@InterfaceAudience.Private
public class HttpFSKerberosAuthenticationHandler
extends KerberosAuthenticationHandler {
static final Set<String> DELEGATION_TOKEN_OPS =
new HashSet<String>();
static {
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
public static final String TYPE = "kerberos-dt";
/**
* Returns authentication type of the handler.
*
* @return <code>delegationtoken-kerberos</code>
*/
@Override
public String getType() {
return TYPE;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = request.getParameter(HttpFSFileSystem.OP_PARAM);
op = (op != null) ? op.toUpperCase() : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
DelegationTokenOperation dtOp =
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
if (dtOp.requiresKerberosCredentials() && token == null) {
response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
MessageFormat.format(
"Operation [{0}] requires SPNEGO authentication established",
dtOp));
requestContinues = false;
} else {
DelegationTokenManager tokenManager =
HttpFSServerWebApp.get().get(DelegationTokenManager.class);
try {
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
String renewerParam =
request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
if (renewerParam == null) {
renewerParam = token.getUserName();
}
Token<?> dToken = tokenManager.createToken(
UserGroupInformation.getCurrentUser(), renewerParam);
map = delegationTokenToJSON(dToken);
break;
case RENEWDELEGATIONTOKEN:
case CANCELDELEGATIONTOKEN:
String tokenParam =
request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM);
if (tokenParam == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]",
dtOp, HttpFSKerberosAuthenticator.TOKEN_PARAM));
requestContinues = false;
} else {
if (dtOp == DelegationTokenOperation.CANCELDELEGATIONTOKEN) {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenParam);
tokenManager.cancelToken(dt,
UserGroupInformation.getCurrentUser().getUserName());
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenParam);
long expirationTime =
tokenManager.renewToken(dt, token.getUserName());
map = new HashMap();
map.put("long", expirationTime);
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
JSONObject.writeJSONString(map, writer);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
} catch (DelegationTokenManagerException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be [{2}]",
request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam =
request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(delegationParam);
DelegationTokenManager tokenManager =
HttpFSServerWebApp.get().get(DelegationTokenManager.class);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
} catch (Throwable ex) {
throw new AuthenticationException("Could not verify DelegationToken, " +
ex.toString(), ex);
}
} else {
token = super.authenticate(request, response);
}
return token;
}
}

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
/**
* Service interface to manage HttpFS delegation tokens.
*/
@InterfaceAudience.Private
public interface DelegationTokenManager {
/**
* Creates a delegation token.
*
* @param ugi UGI creating the token.
* @param renewer token renewer.
* @return new delegation token.
* @throws DelegationTokenManagerException thrown if the token could not be
* created.
*/
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer)
throws DelegationTokenManagerException;
/**
* Renews a delegation token.
*
* @param token delegation token to renew.
* @param renewer token renewer.
* @return epoc expiration time.
* @throws DelegationTokenManagerException thrown if the token could not be
* renewed.
*/
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws DelegationTokenManagerException;
/**
* Cancels a delegation token.
*
* @param token delegation token to cancel.
* @param canceler token canceler.
* @throws DelegationTokenManagerException thrown if the token could not be
* canceled.
*/
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler)
throws DelegationTokenManagerException;
/**
* Verifies a delegation token.
*
* @param token delegation token to verify.
* @return the UGI for the token.
* @throws DelegationTokenManagerException thrown if the token could not be
* verified.
*/
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
throws DelegationTokenManagerException;
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by the {@link DelegationTokenManager} service implementation.
*/
@InterfaceAudience.Private
public class DelegationTokenManagerException extends XException {
public enum ERROR implements XException.ERROR {
DT01("Could not verify delegation token, {0}"),
DT02("Could not renew delegation token, {0}"),
DT03("Could not cancel delegation token, {0}"),
DT04("Could not create delegation token, {0}");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
public DelegationTokenManagerException(ERROR error, Object... params) {
super(error, params);
}
}

View File

@ -1,242 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* DelegationTokenManager service implementation.
*/
@InterfaceAudience.Private
public class DelegationTokenManagerService extends BaseService
implements DelegationTokenManager {
private static final String PREFIX = "delegation.token.manager";
private static final String UPDATE_INTERVAL = "update.interval";
private static final String MAX_LIFETIME = "max.lifetime";
private static final String RENEW_INTERVAL = "renew.interval";
private static final long HOUR = 60 * 60 * 1000;
private static final long DAY = 24 * HOUR;
DelegationTokenSecretManager secretManager = null;
private Text tokenKind;
public DelegationTokenManagerService() {
super(PREFIX);
}
/**
* Initializes the service.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
@Override
protected void init() throws ServiceException {
long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
tokenKind = (HttpFSServerWebApp.get().isSslEnabled())
? SWebHdfsFileSystem.TOKEN_KIND : WebHdfsFileSystem.TOKEN_KIND;
secretManager = new DelegationTokenSecretManager(tokenKind, updateInterval,
maxLifetime,
renewInterval, HOUR);
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new ServiceException(ServiceException.ERROR.S12,
DelegationTokenManager.class.getSimpleName(),
ex.toString(), ex);
}
}
/**
* Destroys the service.
*/
@Override
public void destroy() {
secretManager.stopThreads();
super.destroy();
}
/**
* Returns the service interface.
*
* @return the service interface.
*/
@Override
public Class getInterface() {
return DelegationTokenManager.class;
}
/**
* Creates a delegation token.
*
* @param ugi UGI creating the token.
* @param renewer token renewer.
* @return new delegation token.
* @throws DelegationTokenManagerException thrown if the token could not be
* created.
*/
@Override
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer)
throws DelegationTokenManagerException {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier tokenIdentifier =
new DelegationTokenIdentifier(tokenKind, owner, new Text(renewer), realUser);
Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
try {
SecurityUtil.setTokenService(token,
HttpFSServerWebApp.get().getAuthority());
} catch (ServerException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT04, ex.toString(), ex);
}
return token;
}
/**
* Renews a delegation token.
*
* @param token delegation token to renew.
* @param renewer token renewer.
* @return epoc expiration time.
* @throws DelegationTokenManagerException thrown if the token could not be
* renewed.
*/
@Override
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws DelegationTokenManagerException {
try {
return secretManager.renewToken(token, renewer);
} catch (IOException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT02, ex.toString(), ex);
}
}
/**
* Cancels a delegation token.
*
* @param token delegation token to cancel.
* @param canceler token canceler.
* @throws DelegationTokenManagerException thrown if the token could not be
* canceled.
*/
@Override
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler)
throws DelegationTokenManagerException {
try {
secretManager.cancelToken(token, canceler);
} catch (IOException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT03, ex.toString(), ex);
}
}
/**
* Verifies a delegation token.
*
* @param token delegation token to verify.
* @return the UGI for the token.
* @throws DelegationTokenManagerException thrown if the token could not be
* verified.
*/
@Override
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
throws DelegationTokenManagerException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
try {
id.readFields(dis);
dis.close();
secretManager.verifyToken(id, token.getPassword());
} catch (Exception ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT01, ex.toString(), ex);
}
return id.getUser();
}
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
/**
* Create a secret manager
*
* @param delegationKeyUpdateInterval the number of seconds for rolling new
* secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens
* @param delegationTokenRenewInterval how often the tokens must be renewed
* @param delegationTokenRemoverScanInterval how often the tokens are
* scanned
* for expired tokens
*/
public DelegationTokenSecretManager(Text tokenKind, long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
}
}

View File

@ -35,7 +35,6 @@
org.apache.hadoop.lib.service.scheduler.SchedulerService,
org.apache.hadoop.lib.service.security.GroupsService,
org.apache.hadoop.lib.service.security.ProxyUserService,
org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
</value>
<description>
@ -226,12 +225,4 @@
</description>
</property>
<property>
<name>httpfs.user.provider.user.pattern</name>
<value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
<description>
Valid pattern for user and group names, it must be a valid java regex.
</description>
</property>
</configuration>

View File

@ -17,15 +17,19 @@
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import javax.servlet.ServletException;
import java.util.Properties;
public class HttpFSKerberosAuthenticationHandlerForTesting
extends HttpFSKerberosAuthenticationHandler {
extends KerberosDelegationTokenAuthenticationHandler {
@Override
public void init(Properties config) throws ServletException {
//NOP overwrite to avoid Kerberos initialization
config.setProperty(TOKEN_KIND, "t");
initTokenManager(config);
}
@Override

View File

@ -1,94 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.wsrs.UserProvider;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.List;
public class TestHttpFSCustomUserName extends HFSTestCase {
@Test
@TestDir
@TestJetty
public void defaultUserName() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
Assert.assertEquals(UserProvider.USER_PATTERN_DEFAULT,
UserProvider.getUserPattern().pattern());
server.destroy();
}
@Test
@TestDir
@TestJetty
public void customUserName() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
httpfsConf.set(UserProvider.USER_PATTERN_KEY, "1");
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
Assert.assertEquals("1", UserProvider.getUserPattern().pattern());
server.destroy();
}
}

View File

@ -1,316 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.lib.servlet.ServerWebApp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
@Test
@TestDir
public void testManagementOperationsWebHdfsFileSystem() throws Exception {
testManagementOperations(WebHdfsFileSystem.TOKEN_KIND);
}
@Test
@TestDir
public void testManagementOperationsSWebHdfsFileSystem() throws Exception {
try {
System.setProperty(HttpFSServerWebApp.NAME +
ServerWebApp.SSL_ENABLED, "true");
testManagementOperations(SWebHdfsFileSystem.TOKEN_KIND);
} finally {
System.getProperties().remove(HttpFSServerWebApp.NAME +
ServerWebApp.SSL_ENABLED);
}
}
private void testManagementOperations(Text expectedTokenKind) throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
14000));
AuthenticationHandler handler =
new HttpFSKerberosAuthenticationHandlerForTesting();
try {
server.init();
handler.init(null);
testNonManagementOperation(handler);
testManagementOperationErrors(handler);
testGetToken(handler, null, expectedTokenKind);
testGetToken(handler, "foo", expectedTokenKind);
testCancelToken(handler);
testRenewToken(handler);
} finally {
if (handler != null) {
handler.destroy();
}
server.destroy();
}
}
private void testNonManagementOperation(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(HttpFSFileSystem.Operation.CREATE.toString());
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("requires SPNEGO"));
}
private void testGetToken(AuthenticationHandler handler, String renewer,
Text expectedTokenKind) throws Exception {
DelegationTokenOperation op = DelegationTokenOperation.GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM)).
thenReturn(renewer);
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token, Mockito.never()).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
json = (JSONObject) json.get(tokenLabel);
String tokenStr;
tokenStr = (String)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
private void testCancelToken(AuthenticationHandler handler)
throws Exception {
DelegationTokenOperation op =
DelegationTokenOperation.CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
thenReturn(token.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(token);
Assert.fail();
}
catch (DelegationTokenManagerException ex) {
Assert.assertTrue(ex.toString().contains("DT01"));
}
}
private void testRenewToken(AuthenticationHandler handler)
throws Exception {
DelegationTokenOperation op =
DelegationTokenOperation.RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("equires SPNEGO authentication established"));
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
thenReturn(dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dToken);
}
@Test
@TestDir
public void testAuthenticate() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
14000));
AuthenticationHandler handler =
new HttpFSKerberosAuthenticationHandlerForTesting();
try {
server.init();
handler.init(null);
testValidDelegationToken(handler);
testInvalidDelegationToken(handler);
} finally {
if (handler != null) {
handler.destroy();
}
server.destroy();
}
}
private void testValidDelegationToken(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
thenReturn(dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(HttpFSKerberosAuthenticationHandler.TYPE,
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
thenReturn("invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.json.simple.JSONArray;
import org.junit.Assert;
@ -43,7 +45,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
@ -682,7 +683,7 @@ public class TestHttpFSServer extends HFSTestCase {
AuthenticationToken token =
new AuthenticationToken("u", "p",
HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000);
Signer signer = new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned = signer.sign(token.toString());
@ -706,9 +707,9 @@ public class TestHttpFSServer extends HFSTestCase {
JSONObject json = (JSONObject)
new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json = (JSONObject)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);

View File

@ -23,11 +23,11 @@ import org.apache.hadoop.fs.DelegationTokenRenewer;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.KerberosTestUtils;
import org.apache.hadoop.test.TestDir;
@ -166,9 +166,9 @@ public class TestHttpFSWithKerberos extends HFSTestCase {
.parse(new InputStreamReader(conn.getInputStream()));
json =
(JSONObject) json
.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String) json
.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
//access httpfs using the delegation token
url = new URL(TestJettyHelper.getJettyURL(),

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.lib.service.hadoop.FileSystemAccessService;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.lib.service.scheduler.SchedulerService;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
public class TestDelegationTokenManagerService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("httpfs.services", StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName(),
DelegationTokenManagerService.class.getName())));
Server server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
server.init();
DelegationTokenManager tm = server.get(DelegationTokenManager.class);
Assert.assertNotNull(tm);
server.destroy();
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void tokens() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",",
Arrays.asList(DelegationTokenManagerService.class.getName())));
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 14000));
server.init();
DelegationTokenManager tm = server.get(DelegationTokenManager.class);
Token token = tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (DelegationTokenManagerException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
}

View File

@ -724,6 +724,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
FSDataInputStream fis = clientCache.getDfsInputStream(userName,
Nfs3Utils.getFileIdPath(handle));
if (fis == null) {
return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
}
try {
readCount = fis.read(offset, readbuffer, 0, count);
} catch (IOException e) {

View File

@ -278,13 +278,11 @@ public class TestRpcProgramNfs3 {
readReq.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
/* Hits HDFS-6582. It needs to be fixed first.
READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
*/
// Attempt by a priviledged user should pass.
READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),

View File

@ -370,12 +370,38 @@ Release 2.6.0 - UNRELEASED
HDFS-6781. Separate HDFS commands from CommandsManual.apt.vm. (Akira
Ajisaka via Arpit Agarwal)
HDFS-6728. Dynamically add new volumes to DataStorage, formatted if
necessary. (Lei Xu via atm)
HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
Xu via atm)
HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
(Ming Ma via wheat9)
HDFS-6772. Get DN storages out of blockContentsStale state faster after
NN restarts. (Ming Ma via Arpit Agarwal)
HDFS-573. Porting libhdfs to Windows. (cnauroth)
HDFS-6828. Separate block replica dispatching from Balancer. (szetszwo via
jing9)
HDFS-6837. Code cleanup for Balancer and Dispatcher. (szetszwo via
jing9)
HDFS-6838. Code cleanup for unnecessary INode replacement.
(Jing Zhao via wheat9)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
BUG FIXES
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
insecure HDFS (Allen Wittenauer via raviprak)
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
due to a long edit log sync op. (Liang Xie via cnauroth)
@ -462,6 +488,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6791. A block could remain under replicated if all of its replicas are on
decommissioned nodes. (Ming Ma via jing9)
HDFS-6582. Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler)
(Abhiraj Butala via brandonli)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -361,16 +361,97 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<profiles>
<profile>
<id>windows</id>
<id>native-win</id>
<activation>
<activeByDefault>false</activeByDefault>
<os>
<family>windows</family>
</os>
</activation>
<properties>
<windows.build>true</windows.build>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-os</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireOS>
<family>windows</family>
<message>native-win build only supported on Windows</message>
</requireOS>
</rules>
<fail>true</fail>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>make</id>
<phase>compile</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="${project.build.directory}/native"/>
<exec executable="cmake" dir="${project.build.directory}/native"
failonerror="true">
<arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 'Visual Studio 10 Win64'"/>
</exec>
<exec executable="msbuild" dir="${project.build.directory}/native"
failonerror="true">
<arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/>
</exec>
<!-- Copy for inclusion in distribution. -->
<copy todir="${project.build.directory}/bin">
<fileset dir="${project.build.directory}/native/target/bin/Release"/>
</copy>
</target>
</configuration>
</execution>
<execution>
<id>native_tests</id>
<phase>test</phase>
<goals><goal>run</goal></goals>
<configuration>
<skip>${skipTests}</skip>
<target>
<property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/>
<macrodef name="run-test">
<attribute name="test"/>
<sequential>
<echo message="Running @{test}"/>
<exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/">
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<!-- HADOOP_HOME required to find winutils. -->
<env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
<!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
<env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
</exec>
<echo message="Finished @{test}"/>
</sequential>
</macrodef>
<run-test test="test_libhdfs_threaded"/>
<echo message="Skipping test_libhdfs_zerocopy"/>
<run-test test="test_native_mini_dfs"/>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>native</id>
@ -408,21 +489,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<phase>test</phase>
<goals><goal>run</goal></goals>
<configuration>
<skip>${skipTests}</skip>
<target>
<property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/>
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
<arg value="-c"/>
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<env key="SKIPTESTS" value="${skipTests}"/>
</exec>
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
<arg value="-c"/>
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/>
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<env key="SKIPTESTS" value="${skipTests}"/>
</exec>
<macrodef name="run-test">
<attribute name="test"/>
<sequential>
<echo message="Running @{test}"/>
<exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/">
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
<env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
</exec>
<echo message="Finished @{test}"/>
</sequential>
</macrodef>
<run-test test="test_libhdfs_threaded"/>
<run-test test="test_libhdfs_zerocopy"/>
<run-test test="test_native_mini_dfs"/>
</target>
</configuration>
</execution>

View File

@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH)
MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
endif (NOT GENERATED_JAVAH)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
if (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
# Set warning level 4.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
# Skip "unreferenced formal parameter".
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
# Skip "conditional expression is constant".
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
# Skip deprecated POSIX function warnings.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
# Skip CRT non-secure function warnings. If we can convert usage of
# strerror, getenv and ctime to their secure CRT equivalents, then we can
# re-enable the CRT non-secure function warnings.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
# Omit unneeded headers.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
set(OS_DIR main/native/libhdfs/os/windows)
set(OUT_DIR target/bin)
else (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
set(OS_DIR main/native/libhdfs/os/posix)
set(OS_LINK_LIBRARIES pthread)
set(OUT_DIR target/usr/local/lib)
endif (WIN32)
include_directories(
${GENERATED_JAVAH}
@ -87,6 +117,7 @@ include_directories(
${JNI_INCLUDE_DIRS}
main/native
main/native/libhdfs
${OS_DIR}
)
set(_FUSE_DFS_VERSION 0.1.0)
@ -96,6 +127,9 @@ add_dual_library(hdfs
main/native/libhdfs/exception.c
main/native/libhdfs/jni_helper.c
main/native/libhdfs/hdfs.c
main/native/libhdfs/common/htable.c
${OS_DIR}/mutexes.c
${OS_DIR}/thread_local_storage.c
)
if (NEED_LINK_DL)
set(LIB_DL dl)
@ -104,17 +138,14 @@ endif(NEED_LINK_DL)
target_link_dual_libraries(hdfs
${JAVA_JVM_LIBRARY}
${LIB_DL}
pthread
${OS_LINK_LIBRARIES}
)
dual_output_directory(hdfs target/usr/local/lib)
dual_output_directory(hdfs ${OUT_DIR})
set(LIBHDFS_VERSION "0.0.0")
set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION})
add_library(posix_util
main/native/util/posix_util.c
)
add_executable(test_libhdfs_ops
main/native/libhdfs/test/test_libhdfs_ops.c
)
@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_dfs
add_executable(test_libhdfs_threaded
main/native/libhdfs/expect.c
main/native/libhdfs/test_libhdfs_threaded.c
${OS_DIR}/thread.c
)
target_link_libraries(test_libhdfs_threaded
hdfs
native_mini_dfs
pthread
${OS_LINK_LIBRARIES}
)
add_executable(test_libhdfs_zerocopy
@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy
target_link_libraries(test_libhdfs_zerocopy
hdfs
native_mini_dfs
pthread
${OS_LINK_LIBRARIES}
)
add_executable(test_libhdfs_vecsum
main/native/libhdfs/test/vecsum.c
)
target_link_libraries(test_libhdfs_vecsum
hdfs
pthread
rt
)
# Skip vecsum on Windows. This could be made to work in the future by
# introducing an abstraction layer over the sys/mman.h functions.
if (NOT WIN32)
add_executable(test_libhdfs_vecsum
main/native/libhdfs/test/vecsum.c
)
target_link_libraries(test_libhdfs_vecsum
hdfs
pthread
rt
)
endif(NOT WIN32)
IF(REQUIRE_LIBWEBHDFS)
add_subdirectory(contrib/libwebhdfs)

View File

@ -1671,9 +1671,11 @@ public class DFSUtil {
.setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
// initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: "
+ SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
httpAddr.getHostName()));
if (UserGroupInformation.isSecurityEnabled()) {
LOG.info("Starting web server as: "
+ SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
httpAddr.getHostName()));
}
if (policy.isHttpEnabled()) {
if (httpAddr.getPort() == 0) {

View File

@ -1,4 +1,4 @@
/*
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -15,25 +15,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.balancer;
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
/**
* Exit status - The values associated with each exit status is directly mapped
* to the process's exit code in command line.
*/
public enum ExitStatus {
SUCCESS(0),
IN_PROGRESS(1),
ALREADY_RUNNING(-1),
NO_MOVE_BLOCK(-2),
NO_MOVE_PROGRESS(-3),
IO_EXCEPTION(-4),
ILLEGAL_ARGUMENTS(-5),
INTERRUPTED(-6);
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
private final int code;
public class RMAppAttemptNewSavedEvent extends RMAppAttemptEvent {
final Exception storedException;
public RMAppAttemptNewSavedEvent(ApplicationAttemptId appAttemptId,
Exception storedException) {
super(appAttemptId, RMAppAttemptEventType.ATTEMPT_NEW_SAVED);
this.storedException = storedException;
private ExitStatus(int code) {
this.code = code;
}
public Exception getStoredException() {
return storedException;
/** @return the command line exit code. */
public int getExitCode() {
return code;
}
}

View File

@ -34,6 +34,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
@ -90,14 +94,16 @@ public class NameNodeConnector implements Closeable {
return blockpoolID;
}
/** @return the namenode proxy. */
public NamenodeProtocol getNamenode() {
return namenode;
/** @return blocks with locations. */
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
return namenode.getBlocks(datanode, size);
}
/** @return the client proxy. */
public ClientProtocol getClient() {
return client;
/** @return live datanode storage reports. */
public DatanodeStorageReport[] getLiveDatanodeStorageReport()
throws IOException {
return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
}
/** @return the key manager */

View File

@ -136,6 +136,9 @@ public class DatanodeManager {
/** The number of stale DataNodes */
private volatile int numStaleNodes;
/** The number of stale storages */
private volatile int numStaleStorages;
/**
* Whether or not this cluster has ever consisted of more than 1 rack,
* according to the NetworkTopology.
@ -1142,6 +1145,22 @@ public class DatanodeManager {
return this.numStaleNodes;
}
/**
* Get the number of content stale storages.
*/
public int getNumStaleStorages() {
return numStaleStorages;
}
/**
* Set the number of content stale storages.
*
* @param numStaleStorages The number of content stale storages.
*/
void setNumStaleStorages(int numStaleStorages) {
this.numStaleStorages = numStaleStorages;
}
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live,
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {

View File

@ -256,6 +256,7 @@ class HeartbeatManager implements DatanodeStatistics {
DatanodeID dead = null;
// check the number of stale nodes
int numOfStaleNodes = 0;
int numOfStaleStorages = 0;
synchronized(this) {
for (DatanodeDescriptor d : datanodes) {
if (dead == null && dm.isDatanodeDead(d)) {
@ -265,10 +266,17 @@ class HeartbeatManager implements DatanodeStatistics {
if (d.isStale(dm.getStaleInterval())) {
numOfStaleNodes++;
}
DatanodeStorageInfo[] storageInfos = d.getStorageInfos();
for(DatanodeStorageInfo storageInfo : storageInfos) {
if (storageInfo.areBlockContentsStale()) {
numOfStaleStorages++;
}
}
}
// Set the number of stale nodes in the DatanodeManager
dm.setNumStaleNodes(numOfStaleNodes);
dm.setNumStaleStorages(numOfStaleStorages);
}
allAlive = dead == null;

View File

@ -601,7 +601,7 @@ class BPOfferService {
LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ " with " + actor.state + " state");
actor.reRegister();
return true;
return false;
}
writeLock();
try {

View File

@ -223,6 +223,18 @@ class BPServiceActor implements Runnable {
register();
}
// This is useful to make sure NN gets Heartbeat before Blockreport
// upon NN restart while DN keeps retrying Otherwise,
// 1. NN restarts.
// 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
// 3. After reregistration completes, DN will send Blockreport first.
// 4. Given NN receives Blockreport after Heartbeat, it won't mark
// DatanodeStorageInfo#blockContentsStale to false until the next
// Blockreport.
void scheduleHeartbeat() {
lastHeartbeat = 0;
}
/**
* This methods arranges for the data node to send the block report at
* the next heartbeat.
@ -902,6 +914,7 @@ class BPServiceActor implements Runnable {
retrieveNamespaceInfo();
// and re-register
register();
scheduleHeartbeat();
}
}

View File

@ -36,8 +36,10 @@ import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -106,13 +108,22 @@ public class BlockPoolSliceStorage extends Storage {
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
Set<String> existingStorageDirs = new HashSet<String>();
for (int i = 0; i < getNumStorageDirs(); i++) {
existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
}
// 1. For each BP data directory analyze the state and
// check whether all is consistent before transitioning.
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(
dataDirs.size());
for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next();
if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
LOG.info("Storage directory " + dataDir + " has already been used.");
it.remove();
continue;
}
StorageDirectory sd = new StorageDirectory(dataDir, null, true);
StorageState curState;
try {

View File

@ -55,6 +55,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@ -171,6 +172,170 @@ public class DataStorage extends Storage {
return null;
}
/**
* {{@inheritDoc org.apache.hadoop.hdfs.server.common.Storage#writeAll()}}
*/
private void writeAll(Collection<StorageDirectory> dirs) throws IOException {
this.layoutVersion = getServiceLayoutVersion();
for (StorageDirectory dir : dirs) {
writeProperties(dir);
}
}
/**
* Add a list of volumes to be managed by DataStorage. If the volume is empty,
* format it, otherwise recover it from previous transitions if required.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @throws IOException
*/
synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt)
throws IOException {
// Similar to recoverTransitionRead, it first ensures the datanode level
// format is completed.
List<StorageLocation> tmpDataDirs =
new ArrayList<StorageLocation>(dataDirs);
addStorageLocations(datanode, nsInfo, tmpDataDirs, startOpt, false, true);
Collection<File> bpDataDirs = new ArrayList<File>();
String bpid = nsInfo.getBlockPoolID();
for (StorageLocation dir : dataDirs) {
File dnRoot = dir.getFile();
File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, new File(dnRoot,
STORAGE_DIR_CURRENT));
bpDataDirs.add(bpRoot);
}
// mkdir for the list of BlockPoolStorage
makeBlockPoolDataDir(bpDataDirs, null);
BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid);
if (bpStorage == null) {
bpStorage = new BlockPoolSliceStorage(
nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
nsInfo.getClusterID());
}
bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
addBlockPoolStorage(bpid, bpStorage);
}
/**
* Add a list of volumes to be managed by this DataStorage. If the volume is
* empty, it formats the volume, otherwise it recovers it from previous
* transitions if required.
*
* If isInitialize is false, only the directories that have finished the
* doTransition() process will be added into DataStorage.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @param isInitialize whether it is called when DataNode starts up.
* @throws IOException
*/
private synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt, boolean isInitialize, boolean ignoreExistingDirs)
throws IOException {
Set<String> existingStorageDirs = new HashSet<String>();
for (int i = 0; i < getNumStorageDirs(); i++) {
existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
}
// 1. For each data directory calculate its state and check whether all is
// consistent before transitioning. Format and recover.
ArrayList<StorageState> dataDirStates =
new ArrayList<StorageState>(dataDirs.size());
List<StorageDirectory> addedStorageDirectories =
new ArrayList<StorageDirectory>();
for(Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next().getFile();
if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
LOG.info("Storage directory " + dataDir + " has already been used.");
it.remove();
continue;
}
StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
// ignore this storage
LOG.info("Storage directory " + dataDir + " does not exist");
it.remove();
continue;
case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted for "
+ nsInfo.getBlockPoolID());
LOG.info("Formatting ...");
format(sd, nsInfo, datanode.getDatanodeUuid());
break;
default: // recovery part is common
sd.doRecover(curState);
}
} catch (IOException ioe) {
sd.unlock();
LOG.warn("Ignoring storage directory " + dataDir
+ " due to an exception", ioe);
//continue with other good dirs
continue;
}
if (isInitialize) {
addStorageDir(sd);
}
addedStorageDirectories.add(sd);
dataDirStates.add(curState);
}
if (dataDirs.size() == 0 || dataDirStates.size() == 0) {
// none of the data dirs exist
if (ignoreExistingDirs) {
return;
}
throw new IOException(
"All specified directories are not accessible or do not exist.");
}
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or rollback
// while others could be up-to-date for the regular startup.
for (Iterator<StorageDirectory> it = addedStorageDirectories.iterator();
it.hasNext(); ) {
StorageDirectory sd = it.next();
try {
doTransition(datanode, sd, nsInfo, startOpt);
createStorageID(sd);
} catch (IOException e) {
if (!isInitialize) {
sd.unlock();
it.remove();
continue;
}
unlockAll();
throw e;
}
}
// 3. Update all successfully loaded storages. Some of them might have just
// been formatted.
this.writeAll(addedStorageDirectories);
// 4. Make newly loaded storage directories visible for service.
if (!isInitialize) {
this.storageDirs.addAll(addedStorageDirectories);
}
}
/**
* Analyze storage directories.
* Recover from previous transitions if required.
@ -193,71 +358,13 @@ public class DataStorage extends Storage {
// DN storage has been initialized, no need to do anything
return;
}
LOG.info("Data-node version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+ " and name-node layout version: " + nsInfo.getLayoutVersion());
LOG.info("DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+ " and NameNode layout version: " + nsInfo.getLayoutVersion());
// 1. For each data directory calculate its state and
// check whether all is consistent before transitioning.
// Format and recover.
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
for(Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next().getFile();
StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch(curState) {
case NORMAL:
break;
case NON_EXISTENT:
// ignore this storage
LOG.info("Storage directory " + dataDir + " does not exist");
it.remove();
continue;
case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted");
LOG.info("Formatting ...");
format(sd, nsInfo, datanode.getDatanodeUuid());
break;
default: // recovery part is common
sd.doRecover(curState);
}
} catch (IOException ioe) {
sd.unlock();
LOG.warn("Ignoring storage directory " + dataDir
+ " due to an exception", ioe);
//continue with other good dirs
continue;
}
// add to the storage list
addStorageDir(sd);
dataDirStates.add(curState);
}
addStorageLocations(datanode, nsInfo, dataDirs, startOpt, true, false);
if (dataDirs.size() == 0 || dataDirStates.size() == 0) // none of the data dirs exist
throw new IOException(
"All specified directories are not accessible or do not exist.");
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or rollback
// while others could be uptodate for the regular startup.
try {
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
createStorageID(getStorageDir(idx));
}
} catch (IOException e) {
unlockAll();
throw e;
}
// 3. Update all storages. Some of them might have just been formatted.
this.writeAll();
// 4. mark DN storage is initialized
// mark DN storage is initialized
this.initialized = true;
}

View File

@ -78,7 +78,7 @@ public class StorageLocation {
* @return A StorageLocation object if successfully parsed, null otherwise.
* Does not throw any exceptions.
*/
static StorageLocation parse(String rawLocation)
public static StorageLocation parse(String rawLocation)
throws IOException, SecurityException {
Matcher matcher = regex.matcher(rawLocation);
StorageType storageType = StorageType.DEFAULT;

View File

@ -22,6 +22,7 @@ import java.io.File;
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@ -39,6 +40,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@ -91,6 +93,10 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
/** @return a list of volumes. */
public List<V> getVolumes();
/** Add an array of StorageLocation to FsDataset. */
public void addVolumes(Collection<StorageLocation> volumes)
throws IOException;
/** @return a storage with the given storage ID */
public DatanodeStorage getStorage(final String storageUuid);

View File

@ -61,6 +61,7 @@ class FsDatasetAsyncDiskService {
private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final DataNode datanode;
private final ThreadGroup threadGroup;
private Map<File, ThreadPoolExecutor> executors
= new HashMap<File, ThreadPoolExecutor>();
@ -70,42 +71,52 @@ class FsDatasetAsyncDiskService {
*
* The AsyncDiskServices uses one ThreadPool per volume to do the async
* disk operations.
*
* @param volumes The roots of the data volumes.
*/
FsDatasetAsyncDiskService(DataNode datanode, File[] volumes) {
FsDatasetAsyncDiskService(DataNode datanode) {
this.datanode = datanode;
this.threadGroup = new ThreadGroup(getClass().getSimpleName());
}
final ThreadGroup threadGroup = new ThreadGroup(getClass().getSimpleName());
// Create one ThreadPool per volume
for (int v = 0 ; v < volumes.length; v++) {
final File vol = volumes[v];
ThreadFactory threadFactory = new ThreadFactory() {
int counter = 0;
private void addExecutorForVolume(final File volume) {
ThreadFactory threadFactory = new ThreadFactory() {
int counter = 0;
@Override
public Thread newThread(Runnable r) {
int thisIndex;
synchronized (this) {
thisIndex = counter++;
}
Thread t = new Thread(threadGroup, r);
t.setName("Async disk worker #" + thisIndex +
" for volume " + vol);
return t;
}
};
@Override
public Thread newThread(Runnable r) {
int thisIndex;
synchronized (this) {
thisIndex = counter++;
}
Thread t = new Thread(threadGroup, r);
t.setName("Async disk worker #" + thisIndex +
" for volume " + volume);
return t;
}
};
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(vol, executor);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(volume, executor);
}
/**
* Starts AsyncDiskService for a new volume
* @param volume the root of the new data volume.
*/
synchronized void addVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
if (executor != null) {
throw new RuntimeException("Volume " + volume + " is already existed.");
}
addExecutorForVolume(volume);
}
synchronized long countPendingDeletions() {

View File

@ -202,6 +202,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
final Map<String, DatanodeStorage> storageMap;
final FsDatasetAsyncDiskService asyncDiskService;
final FsDatasetCache cacheManager;
private final Configuration conf;
private final int validVolsRequired;
final ReplicaMap volumeMap;
@ -216,6 +217,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
) throws IOException {
this.datanode = datanode;
this.dataStorage = storage;
this.conf = conf;
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
final int volFailuresTolerated =
@ -242,38 +244,76 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
}
storageMap = new HashMap<String, DatanodeStorage>();
final List<FsVolumeImpl> volArray = new ArrayList<FsVolumeImpl>(
storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = storage.getStorageDir(idx);
final File dir = sd.getCurrentDir();
final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot());
volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf,
storageType));
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
}
volumeMap = new ReplicaMap(this);
@SuppressWarnings("unchecked")
final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
ReflectionUtils.newInstance(conf.getClass(
DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
RoundRobinVolumeChoosingPolicy.class,
VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volArray, volsFailed, blockChooserImpl);
volumes.initializeReplicaMaps(volumeMap);
volumes = new FsVolumeList(volsFailed, blockChooserImpl);
asyncDiskService = new FsDatasetAsyncDiskService(datanode);
File[] roots = new File[storage.getNumStorageDirs()];
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
roots[idx] = storage.getStorageDir(idx).getCurrentDir();
addVolume(dataLocations, storage.getStorageDir(idx));
}
asyncDiskService = new FsDatasetAsyncDiskService(datanode, roots);
cacheManager = new FsDatasetCache(this);
registerMBean(datanode.getDatanodeUuid());
}
private void addVolume(Collection<StorageLocation> dataLocations,
Storage.StorageDirectory sd) throws IOException {
final File dir = sd.getCurrentDir();
final StorageType storageType =
getStorageTypeFromLocations(dataLocations, sd.getRoot());
// If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
// nothing needed to be rolled back to make various data structures, e.g.,
// storageMap and asyncDiskService, consistent.
FsVolumeImpl fsVolume = new FsVolumeImpl(
this, sd.getStorageUuid(), dir, this.conf, storageType);
fsVolume.getVolumeMap(volumeMap);
volumes.addVolume(fsVolume);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(),
DatanodeStorage.State.NORMAL,
storageType));
asyncDiskService.addVolume(sd.getCurrentDir());
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
/**
* Add an array of StorageLocation to FsDataset.
*
* @pre dataStorage must have these volumes.
* @param volumes
* @throws IOException
*/
@Override
public synchronized void addVolumes(Collection<StorageLocation> volumes)
throws IOException {
final Collection<StorageLocation> dataLocations =
DataNode.getStorageLocations(this.conf);
Map<String, Storage.StorageDirectory> allStorageDirs =
new HashMap<String, Storage.StorageDirectory>();
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
allStorageDirs.put(sd.getRoot().getAbsolutePath(), sd);
}
for (StorageLocation vol : volumes) {
String key = vol.getFile().getAbsolutePath();
if (!allStorageDirs.containsKey(key)) {
LOG.warn("Attempt to add an invalid volume: " + vol.getFile());
} else {
addVolume(dataLocations, allStorageDirs.get(key));
}
}
}
private StorageType getStorageTypeFromLocations(
Collection<StorageLocation> dataLocations, File dir) {
for (StorageLocation dataLocation : dataLocations) {

View File

@ -40,9 +40,8 @@ class FsVolumeList {
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
private volatile int numFailedVolumes;
FsVolumeList(List<FsVolumeImpl> volumes, int failedVols,
FsVolumeList(int failedVols,
VolumeChoosingPolicy<FsVolumeImpl> blockChooser) {
this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
@ -102,12 +101,6 @@ class FsVolumeList {
return remaining;
}
void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
for (FsVolumeImpl v : volumes) {
v.getVolumeMap(globalReplicaMap);
}
}
void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
long totalStartTime = Time.monotonicNow();
final List<IOException> exceptions = Collections.synchronizedList(
@ -205,6 +198,19 @@ class FsVolumeList {
return volumes.toString();
}
/**
* Dynamically add new volumes to the existing volumes that this DN manages.
* @param newVolume the instance of new FsVolumeImpl.
*/
synchronized void addVolume(FsVolumeImpl newVolume) {
// Make a copy of volumes to add new volumes.
final List<FsVolumeImpl> volumeList = volumes == null ?
new ArrayList<FsVolumeImpl>() :
new ArrayList<FsVolumeImpl>(volumes);
volumeList.add(newVolume);
volumes = Collections.unmodifiableList(volumeList);
FsDatasetImpl.LOG.info("Added new volume: " + newVolume.toString());
}
void addBlockPool(final String bpid, final Configuration conf) throws IOException {
long totalStartTime = Time.monotonicNow();

View File

@ -786,8 +786,6 @@ public class FSDirectory implements Closeable {
checkSnapshot(srcInode, null);
}
private class RenameOperation {
private final INodesInPath srcIIP;
private final INodesInPath dstIIP;
@ -820,7 +818,7 @@ public class FSDirectory implements Closeable {
// snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree.
if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId());
srcChild.recordModification(srcIIP.getLatestSnapshotId());
}
// check srcChild for reference
@ -950,8 +948,7 @@ public class FSDirectory implements Closeable {
updateCount(iip, 0, dsDelta, true);
}
file = file.setFileReplication(replication, iip.getLatestSnapshotId(),
inodeMap);
file.setFileReplication(replication, iip.getLatestSnapshotId());
final short newBR = file.getBlockReplication();
// check newBR < oldBR case.
@ -1234,8 +1231,7 @@ public class FSDirectory implements Closeable {
// record modification
final int latestSnapshot = iip.getLatestSnapshotId();
targetNode = targetNode.recordModification(latestSnapshot);
iip.setLastINode(targetNode);
targetNode.recordModification(latestSnapshot);
// Remove the node from the namespace
long removed = removeLastINode(iip);
@ -2161,7 +2157,7 @@ public class FSDirectory implements Closeable {
}
final int latest = iip.getLatestSnapshotId();
dirNode = dirNode.recordModification(latest);
dirNode.recordModification(latest);
dirNode.setQuota(nsQuota, dsQuota);
return dirNode;
}

View File

@ -2733,7 +2733,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
boolean writeToEditLog,
int latestSnapshot, boolean logRetryCache)
throws IOException {
file = file.recordModification(latestSnapshot);
file.recordModification(latestSnapshot);
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine);
leaseManager.addLease(cons.getFileUnderConstructionFeature()
@ -4441,7 +4441,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
Preconditions.checkArgument(uc != null);
leaseManager.removeLease(uc.getClientName(), src);
pendingFile = pendingFile.recordModification(latestSnapshot);
pendingFile.recordModification(latestSnapshot);
// The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here
@ -6342,7 +6342,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
}
@Override // FSNamesystemMBean
public int getNumLiveDataNodes() {
return getBlockManager().getDatanodeManager().getNumLiveDataNodes();
@ -6388,6 +6387,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return getBlockManager().getDatanodeManager().getNumStaleNodes();
}
/**
* Storages are marked as "content stale" after NN restart or fails over and
* before NN receives the first Heartbeat followed by the first Blockreport.
*/
@Override // FSNamesystemMBean
public int getNumStaleStorages() {
return getBlockManager().getDatanodeManager().getNumStaleStorages();
}
/**
* Sets the current generation stamp for legacy blocks
*/

View File

@ -97,9 +97,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** Set user */
final INode setUser(String user, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setUser(user);
return nodeToUpdate;
recordModification(latestSnapshotId);
setUser(user);
return this;
}
/**
* @param snapshotId
@ -122,9 +122,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** Set group */
final INode setGroup(String group, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setGroup(group);
return nodeToUpdate;
recordModification(latestSnapshotId);
setGroup(group);
return this;
}
/**
@ -148,9 +148,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** Set the {@link FsPermission} of this {@link INode} */
INode setPermission(FsPermission permission, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setPermission(permission);
return nodeToUpdate;
recordModification(latestSnapshotId);
setPermission(permission);
return this;
}
abstract AclFeature getAclFeature(int snapshotId);
@ -164,18 +164,18 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.addAclFeature(aclFeature);
return nodeToUpdate;
recordModification(latestSnapshotId);
addAclFeature(aclFeature);
return this;
}
abstract void removeAclFeature();
final INode removeAclFeature(int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.removeAclFeature();
return nodeToUpdate;
recordModification(latestSnapshotId);
removeAclFeature();
return this;
}
/**
@ -199,9 +199,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.addXAttrFeature(xAttrFeature);
return nodeToUpdate;
recordModification(latestSnapshotId);
addXAttrFeature(xAttrFeature);
return this;
}
/**
@ -211,9 +211,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
final INode removeXAttrFeature(int lastestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(lastestSnapshotId);
nodeToUpdate.removeXAttrFeature();
return nodeToUpdate;
recordModification(lastestSnapshotId);
removeXAttrFeature();
return this;
}
/**
@ -298,11 +298,8 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
* @param latestSnapshotId The id of the latest snapshot that has been taken.
* Note that it is {@link Snapshot#CURRENT_STATE_ID}
* if no snapshots have been taken.
* @return The current inode, which usually is the same object of this inode.
* However, in some cases, this inode may be replaced with a new inode
* for maintaining snapshots. The current inode is then the new inode.
*/
abstract INode recordModification(final int latestSnapshotId)
abstract void recordModification(final int latestSnapshotId)
throws QuotaExceededException;
/** Check whether it's a reference. */
@ -652,9 +649,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
/** Set the last modification time of inode. */
public final INode setModificationTime(long modificationTime,
int latestSnapshotId) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setModificationTime(modificationTime);
return nodeToUpdate;
recordModification(latestSnapshotId);
setModificationTime(modificationTime);
return this;
}
/**
@ -682,9 +679,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
*/
public final INode setAccessTime(long accessTime, int latestSnapshotId)
throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setAccessTime(accessTime);
return nodeToUpdate;
recordModification(latestSnapshotId);
setAccessTime(accessTime);
return this;
}

View File

@ -318,7 +318,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
}
@Override
public INodeDirectory recordModification(int latestSnapshotId)
public void recordModification(int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@ -330,7 +330,6 @@ public class INodeDirectory extends INodeWithAdditionalFields
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
return this;
}
/**

View File

@ -284,7 +284,7 @@ public class INodeFile extends INodeWithAdditionalFields
}
@Override
public INodeFile recordModification(final int latestSnapshotId)
public void recordModification(final int latestSnapshotId)
throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@ -296,7 +296,6 @@ public class INodeFile extends INodeWithAdditionalFields
// record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
}
return this;
}
public FileDiffList getDiffs() {
@ -344,11 +343,10 @@ public class INodeFile extends INodeWithAdditionalFields
/** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication,
int latestSnapshotId, final INodeMap inodeMap)
throws QuotaExceededException {
final INodeFile nodeToUpdate = recordModification(latestSnapshotId);
nodeToUpdate.setFileReplication(replication);
return nodeToUpdate;
int latestSnapshotId) throws QuotaExceededException {
recordModification(latestSnapshotId);
setFileReplication(replication);
return this;
}
/** @return preferred block size (in bytes) of the file. */

View File

@ -93,9 +93,8 @@ public class INodeMap {
"", "", new FsPermission((short) 0)), 0, 0) {
@Override
INode recordModification(int latestSnapshotId)
void recordModification(int latestSnapshotId)
throws QuotaExceededException {
return null;
}
@Override

View File

@ -287,11 +287,9 @@ public abstract class INodeReference extends INode {
}
@Override
final INode recordModification(int latestSnapshotId)
final void recordModification(int latestSnapshotId)
throws QuotaExceededException {
referred.recordModification(latestSnapshotId);
// reference is never replaced
return this;
}
@Override // used by WithCount

View File

@ -47,12 +47,11 @@ public class INodeSymlink extends INodeWithAdditionalFields {
}
@Override
INode recordModification(int latestSnapshotId) throws QuotaExceededException {
void recordModification(int latestSnapshotId) throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)) {
INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
}
return this;
}
/** @return true unconditionally. */

View File

@ -151,4 +151,11 @@ public interface FSNamesystemMBean {
* @return number of blocks pending deletion
*/
long getPendingDeletionBlocks();
/**
* Number of content stale storages.
* @return number of content stale storages
*/
public int getNumStaleStorages();
}

View File

@ -22,6 +22,9 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* A BlockCommand is an instruction to a datanode to register with the namenode.
* This command can't be combined with other commands in the same response.
* This is because after the datanode processes RegisterCommand, it will skip
* the rest of the DatanodeCommands in the same HeartbeatResponse.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving

View File

@ -37,6 +37,10 @@ ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF(FUSE_FOUND)
add_library(posix_util
../util/posix_util.c
)
add_executable(fuse_dfs
fuse_dfs.c
fuse_options.c

View File

@ -0,0 +1,271 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/htable.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
struct htable_pair {
void *key;
void *val;
};
/**
* A hash table which uses linear probing.
*/
struct htable {
uint32_t capacity;
uint32_t used;
htable_hash_fn_t hash_fun;
htable_eq_fn_t eq_fun;
struct htable_pair *elem;
};
/**
* An internal function for inserting a value into the hash table.
*
* Note: this function assumes that you have made enough space in the table.
*
* @param nelem The new element to insert.
* @param capacity The capacity of the hash table.
* @param hash_fun The hash function to use.
* @param key The key to insert.
* @param val The value to insert.
*/
static void htable_insert_internal(struct htable_pair *nelem,
uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
void *val)
{
uint32_t i;
i = hash_fun(key, capacity);
while (1) {
if (!nelem[i].key) {
nelem[i].key = key;
nelem[i].val = val;
return;
}
i++;
if (i == capacity) {
i = 0;
}
}
}
static int htable_realloc(struct htable *htable, uint32_t new_capacity)
{
struct htable_pair *nelem;
uint32_t i, old_capacity = htable->capacity;
htable_hash_fn_t hash_fun = htable->hash_fun;
nelem = calloc(new_capacity, sizeof(struct htable_pair));
if (!nelem) {
return ENOMEM;
}
for (i = 0; i < old_capacity; i++) {
struct htable_pair *pair = htable->elem + i;
htable_insert_internal(nelem, new_capacity, hash_fun,
pair->key, pair->val);
}
free(htable->elem);
htable->elem = nelem;
htable->capacity = new_capacity;
return 0;
}
struct htable *htable_alloc(uint32_t size,
htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
{
struct htable *htable;
htable = calloc(1, sizeof(*htable));
if (!htable) {
return NULL;
}
size = (size + 1) >> 1;
size = size << 1;
if (size < HTABLE_MIN_SIZE) {
size = HTABLE_MIN_SIZE;
}
htable->hash_fun = hash_fun;
htable->eq_fun = eq_fun;
htable->used = 0;
if (htable_realloc(htable, size)) {
free(htable);
return NULL;
}
return htable;
}
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
{
uint32_t i;
for (i = 0; i != htable->capacity; ++i) {
struct htable_pair *elem = htable->elem + i;
if (elem->key) {
fun(ctx, elem->key, elem->val);
}
}
}
void htable_free(struct htable *htable)
{
if (htable) {
free(htable->elem);
free(htable);
}
}
int htable_put(struct htable *htable, void *key, void *val)
{
int ret;
uint32_t nused;
// NULL is not a valid key value.
// This helps us implement htable_get_internal efficiently, since we know
// that we can stop when we encounter the first NULL key.
if (!key) {
return EINVAL;
}
// NULL is not a valid value. Otherwise the results of htable_get would
// be confusing (does a NULL return mean entry not found, or that the
// entry was found and was NULL?)
if (!val) {
return EINVAL;
}
// Re-hash if we have used more than half of the hash table
nused = htable->used + 1;
if (nused >= (htable->capacity / 2)) {
ret = htable_realloc(htable, htable->capacity * 2);
if (ret)
return ret;
}
htable_insert_internal(htable->elem, htable->capacity,
htable->hash_fun, key, val);
htable->used++;
return 0;
}
static int htable_get_internal(const struct htable *htable,
const void *key, uint32_t *out)
{
uint32_t start_idx, idx;
start_idx = htable->hash_fun(key, htable->capacity);
idx = start_idx;
while (1) {
struct htable_pair *pair = htable->elem + idx;
if (!pair->key) {
// We always maintain the invariant that the entries corresponding
// to a given key are stored in a contiguous block, not separated
// by any NULLs. So if we encounter a NULL, our search is over.
return ENOENT;
} else if (htable->eq_fun(pair->key, key)) {
*out = idx;
return 0;
}
idx++;
if (idx == htable->capacity) {
idx = 0;
}
if (idx == start_idx) {
return ENOENT;
}
}
}
void *htable_get(const struct htable *htable, const void *key)
{
uint32_t idx;
if (htable_get_internal(htable, key, &idx)) {
return NULL;
}
return htable->elem[idx].val;
}
void htable_pop(struct htable *htable, const void *key,
void **found_key, void **found_val)
{
uint32_t hole, i;
const void *nkey;
if (htable_get_internal(htable, key, &hole)) {
*found_key = NULL;
*found_val = NULL;
return;
}
i = hole;
htable->used--;
// We need to maintain the compactness invariant used in
// htable_get_internal. This invariant specifies that the entries for any
// given key are never separated by NULLs (although they may be separated
// by entries for other keys.)
while (1) {
i++;
if (i == htable->capacity) {
i = 0;
}
nkey = htable->elem[i].key;
if (!nkey) {
*found_key = htable->elem[hole].key;
*found_val = htable->elem[hole].val;
htable->elem[hole].key = NULL;
htable->elem[hole].val = NULL;
return;
} else if (htable->eq_fun(key, nkey)) {
htable->elem[hole].key = htable->elem[i].key;
htable->elem[hole].val = htable->elem[i].val;
hole = i;
}
}
}
uint32_t htable_used(const struct htable *htable)
{
return htable->used;
}
uint32_t htable_capacity(const struct htable *htable)
{
return htable->capacity;
}
uint32_t ht_hash_string(const void *str, uint32_t max)
{
const char *s = str;
uint32_t hash = 0;
while (*s) {
hash = (hash * 31) + *s;
s++;
}
return hash % max;
}
int ht_compare_string(const void *a, const void *b)
{
return strcmp(a, b) == 0;
}
// vim: ts=4:sw=4:tw=79:et

View File

@ -0,0 +1,161 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HADOOP_CORE_COMMON_HASH_TABLE
#define HADOOP_CORE_COMMON_HASH_TABLE
#include <inttypes.h>
#include <stdio.h>
#include <stdint.h>
#define HTABLE_MIN_SIZE 4
struct htable;
/**
* An HTable hash function.
*
* @param key The key.
* @param capacity The total capacity.
*
* @return The hash slot. Must be less than the capacity.
*/
typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
/**
* An HTable equality function. Compares two keys.
*
* @param a First key.
* @param b Second key.
*
* @return nonzero if the keys are equal.
*/
typedef int (*htable_eq_fn_t)(const void *a, const void *b);
/**
* Allocate a new hash table.
*
* @param capacity The minimum suggested starting capacity.
* @param hash_fun The hash function to use in this hash table.
* @param eq_fun The equals function to use in this hash table.
*
* @return The new hash table on success; NULL on OOM.
*/
struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
htable_eq_fn_t eq_fun);
typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
/**
* Visit all of the entries in the hash table.
*
* @param htable The hash table.
* @param fun The callback function to invoke on each key and value.
* @param ctx Context pointer to pass to the callback.
*/
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
/**
* Free the hash table.
*
* It is up the calling code to ensure that the keys and values inside the
* table are de-allocated, if that is necessary.
*
* @param htable The hash table.
*/
void htable_free(struct htable *htable);
/**
* Add an entry to the hash table.
*
* @param htable The hash table.
* @param key The key to add. This cannot be NULL.
* @param fun The value to add. This cannot be NULL.
*
* @return 0 on success;
* EEXIST if the value already exists in the table;
* ENOMEM if there is not enough memory to add the element.
* EFBIG if the hash table has too many entries to fit in 32
* bits.
*/
int htable_put(struct htable *htable, void *key, void *val);
/**
* Get an entry from the hash table.
*
* @param htable The hash table.
* @param key The key to find.
*
* @return NULL if there is no such entry; the entry otherwise.
*/
void *htable_get(const struct htable *htable, const void *key);
/**
* Get an entry from the hash table and remove it.
*
* @param htable The hash table.
* @param key The key for the entry find and remove.
* @param found_key (out param) NULL if the entry was not found; the found key
* otherwise.
* @param found_val (out param) NULL if the entry was not found; the found
* value otherwise.
*/
void htable_pop(struct htable *htable, const void *key,
void **found_key, void **found_val);
/**
* Get the number of entries used in the hash table.
*
* @param htable The hash table.
*
* @return The number of entries used in the hash table.
*/
uint32_t htable_used(const struct htable *htable);
/**
* Get the capacity of the hash table.
*
* @param htable The hash table.
*
* @return The capacity of the hash table.
*/
uint32_t htable_capacity(const struct htable *htable);
/**
* Hash a string.
*
* @param str The string.
* @param max Maximum hash value
*
* @return A number less than max.
*/
uint32_t ht_hash_string(const void *str, uint32_t max);
/**
* Compare two strings.
*
* @param a The first string.
* @param b The second string.
*
* @return 1 if the strings are identical; 0 otherwise.
*/
int ht_compare_string(const void *a, const void *b);
#endif
// vim: ts=4:sw=4:tw=79:et

View File

@ -19,8 +19,8 @@
#include "exception.h"
#include "hdfs.h"
#include "jni_helper.h"
#include "platform.h"
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -35,54 +35,54 @@ struct ExceptionInfo {
static const struct ExceptionInfo gExceptionInfo[] = {
{
.name = "java.io.FileNotFoundException",
.noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
.excErrno = ENOENT,
"java.io.FileNotFoundException",
NOPRINT_EXC_FILE_NOT_FOUND,
ENOENT,
},
{
.name = "org.apache.hadoop.security.AccessControlException",
.noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
.excErrno = EACCES,
"org.apache.hadoop.security.AccessControlException",
NOPRINT_EXC_ACCESS_CONTROL,
EACCES,
},
{
.name = "org.apache.hadoop.fs.UnresolvedLinkException",
.noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
.excErrno = ENOLINK,
"org.apache.hadoop.fs.UnresolvedLinkException",
NOPRINT_EXC_UNRESOLVED_LINK,
ENOLINK,
},
{
.name = "org.apache.hadoop.fs.ParentNotDirectoryException",
.noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
.excErrno = ENOTDIR,
"org.apache.hadoop.fs.ParentNotDirectoryException",
NOPRINT_EXC_PARENT_NOT_DIRECTORY,
ENOTDIR,
},
{
.name = "java.lang.IllegalArgumentException",
.noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
.excErrno = EINVAL,
"java.lang.IllegalArgumentException",
NOPRINT_EXC_ILLEGAL_ARGUMENT,
EINVAL,
},
{
.name = "java.lang.OutOfMemoryError",
.noPrintFlag = 0,
.excErrno = ENOMEM,
"java.lang.OutOfMemoryError",
0,
ENOMEM,
},
{
.name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
.noPrintFlag = 0,
.excErrno = EROFS,
"org.apache.hadoop.hdfs.server.namenode.SafeModeException",
0,
EROFS,
},
{
.name = "org.apache.hadoop.fs.FileAlreadyExistsException",
.noPrintFlag = 0,
.excErrno = EEXIST,
"org.apache.hadoop.fs.FileAlreadyExistsException",
0,
EEXIST,
},
{
.name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
.noPrintFlag = 0,
.excErrno = EDQUOT,
"org.apache.hadoop.hdfs.protocol.QuotaExceededException",
0,
EDQUOT,
},
{
.name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
.noPrintFlag = 0,
.excErrno = ESTALE,
"org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
0,
ESTALE,
},
};
@ -113,6 +113,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
jstring jStr = NULL;
jvalue jVal;
jthrowable jthr;
const char *stackTrace;
jthr = classNameOfObject(exc, env, &className);
if (jthr) {
@ -148,7 +149,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
destroyLocalReference(env, jthr);
} else {
jStr = jVal.l;
const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
if (!stackTrace) {
fprintf(stderr, "(unable to get stack trace for %s exception: "
"GetStringUTFChars error.)\n", className);

View File

@ -34,13 +34,14 @@
* usually not what you want.)
*/
#include "platform.h"
#include <jni.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <search.h>
#include <pthread.h>
#include <errno.h>
/**
@ -109,7 +110,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object.
*/
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
const char *fmt, ...) __attribute__((format(printf, 4, 5)));
const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
/**
* Print out information about the pending exception and free it.
@ -124,7 +125,7 @@ int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object.
*/
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
/**
* Get a local reference to the pending exception and clear it.
@ -150,6 +151,7 @@ jthrowable getPendingExceptionAndClear(JNIEnv *env);
* @return A local reference to a RuntimeError
*/
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
TYPE_CHECKED_PRINTF_FORMAT(2, 3);
#undef TYPE_CHECKED_PRINTF_FORMAT
#endif

View File

@ -49,18 +49,18 @@ int expectFileStats(hdfsFile file,
stats->totalShortCircuitBytesRead,
stats->totalZeroCopyBytesRead);
if (expectedTotalBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
}
if (expectedTotalLocalBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalLocalBytesRead,
EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
stats->totalLocalBytesRead);
}
if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead,
EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
stats->totalShortCircuitBytesRead);
}
if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead,
EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
stats->totalZeroCopyBytesRead);
}
hdfsFileFreeReadStatistics(stats);

View File

@ -126,6 +126,18 @@ struct hdfsFile_internal;
} \
} while (0);
#define EXPECT_UINT64_EQ(x, y) \
do { \
uint64_t __my_ret__ = y; \
int __my_errno__ = errno; \
if (__my_ret__ != (x)) { \
fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
"value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
__FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
return -1; \
} \
} while (0);
#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
ret = expr; \
if (!ret) \

View File

@ -19,20 +19,18 @@
#include "config.h"
#include "exception.h"
#include "jni_helper.h"
#include "platform.h"
#include "common/htable.h"
#include "os/mutexes.h"
#include "os/thread_local_storage.h"
#include <stdio.h>
#include <string.h>
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
static volatile int hashTableInited = 0;
#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
static struct htable *gClassRefHTable = NULL;
/** The Native return types that methods could return */
#define VOID 'V'
#define JVOID 'V'
#define JOBJECT 'L'
#define JARRAYOBJECT '['
#define JBOOLEAN 'Z'
@ -51,40 +49,10 @@ static volatile int hashTableInited = 0;
*/
#define MAX_HASH_TABLE_ELEM 4096
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
/** Pthreads thread-local storage for each library thread. */
struct hdfsTls {
JNIEnv *env;
};
/**
* The function that is called whenever a thread with libhdfs thread local data
* is destroyed.
*
* @param v The thread-local data
* Length of buffer for retrieving created JVMs. (We only ever create one.)
*/
static void hdfsThreadDestructor(void *v)
{
struct hdfsTls *tls = v;
JavaVM *vm;
JNIEnv *env = tls->env;
jint ret;
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
"error %d\n", ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
free(tls);
}
#define VM_BUF_LENGTH 1
void destroyLocalReference(JNIEnv *env, jobject jObject)
{
@ -138,67 +106,6 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
return NULL;
}
static int hashTableInit(void)
{
if (!hashTableInited) {
LOCK_HASH_TABLE();
if (!hashTableInited) {
if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
fprintf(stderr, "error creating hashtable, <%d>: %s\n",
errno, strerror(errno));
UNLOCK_HASH_TABLE();
return 0;
}
hashTableInited = 1;
}
UNLOCK_HASH_TABLE();
}
return 1;
}
static int insertEntryIntoTable(const char *key, void *data)
{
ENTRY e, *ep;
if (key == NULL || data == NULL) {
return 0;
}
if (! hashTableInit()) {
return -1;
}
e.data = data;
e.key = (char*)key;
LOCK_HASH_TABLE();
ep = hsearch(e, ENTER);
UNLOCK_HASH_TABLE();
if (ep == NULL) {
fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
key, errno, strerror(errno));
}
return 0;
}
static void* searchEntryFromTable(const char *key)
{
ENTRY e,*ep;
if (key == NULL) {
return NULL;
}
hashTableInit();
e.key = (char*)key;
LOCK_HASH_TABLE();
ep = hsearch(e, FIND);
UNLOCK_HASH_TABLE();
if (ep != NULL) {
return ep->data;
}
return NULL;
}
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
jobject instObj, const char *className,
const char *methName, const char *methSignature, ...)
@ -235,7 +142,7 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
}
retval->l = jobj;
}
else if (returnType == VOID) {
else if (returnType == JVOID) {
if (methType == STATIC) {
(*env)->CallStaticVoidMethodV(env, cls, mid, args);
}
@ -325,11 +232,11 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
{
jclass cls;
jthrowable jthr;
jmethodID mid = 0;
jthr = globalClassReference(className, env, &cls);
if (jthr)
return jthr;
jmethodID mid = 0;
jthr = validateMethodType(env, methType);
if (jthr)
return jthr;
@ -350,25 +257,50 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
{
jclass clsLocalRef;
jclass cls = searchEntryFromTable(className);
if (cls) {
*out = cls;
return NULL;
jthrowable jthr = NULL;
jclass local_clazz = NULL;
jclass clazz = NULL;
int ret;
mutexLock(&hdfsHashMutex);
if (!gClassRefHTable) {
gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
ht_compare_string);
if (!gClassRefHTable) {
jthr = newRuntimeError(env, "htable_alloc failed\n");
goto done;
}
}
clsLocalRef = (*env)->FindClass(env,className);
if (clsLocalRef == NULL) {
return getPendingExceptionAndClear(env);
clazz = htable_get(gClassRefHTable, className);
if (clazz) {
*out = clazz;
goto done;
}
cls = (*env)->NewGlobalRef(env, clsLocalRef);
if (cls == NULL) {
(*env)->DeleteLocalRef(env, clsLocalRef);
return getPendingExceptionAndClear(env);
local_clazz = (*env)->FindClass(env,className);
if (!local_clazz) {
jthr = getPendingExceptionAndClear(env);
goto done;
}
(*env)->DeleteLocalRef(env, clsLocalRef);
insertEntryIntoTable(className, cls);
*out = cls;
return NULL;
clazz = (*env)->NewGlobalRef(env, local_clazz);
if (!clazz) {
jthr = getPendingExceptionAndClear(env);
goto done;
}
ret = htable_put(gClassRefHTable, (void*)className, clazz);
if (ret) {
jthr = newRuntimeError(env, "htable_put failed with error "
"code %d\n", ret);
goto done;
}
*out = clazz;
jthr = NULL;
done:
mutexUnlock(&hdfsHashMutex);
(*env)->DeleteLocalRef(env, local_clazz);
if (jthr && clazz) {
(*env)->DeleteGlobalRef(env, clazz);
}
return jthr;
}
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
@ -436,14 +368,24 @@ done:
*/
static JNIEnv* getGlobalJNIEnv(void)
{
const jsize vmBufLength = 1;
JavaVM* vmBuf[vmBufLength];
JavaVM* vmBuf[VM_BUF_LENGTH];
JNIEnv *env;
jint rv = 0;
jint noVMs = 0;
jthrowable jthr;
char *hadoopClassPath;
const char *hadoopClassPathVMArg = "-Djava.class.path=";
size_t optHadoopClassPathLen;
char *optHadoopClassPath;
int noArgs = 1;
char *hadoopJvmArgs;
char jvmArgDelims[] = " ";
char *str, *token, *savePtr;
JavaVMInitArgs vm_args;
JavaVM *vm;
JavaVMOption *options;
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
return NULL;
@ -451,23 +393,19 @@ static JNIEnv* getGlobalJNIEnv(void)
if (noVMs == 0) {
//Get the environment variables for initializing the JVM
char *hadoopClassPath = getenv("CLASSPATH");
hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
return NULL;
}
char *hadoopClassPathVMArg = "-Djava.class.path=";
size_t optHadoopClassPathLen = strlen(hadoopClassPath) +
optHadoopClassPathLen = strlen(hadoopClassPath) +
strlen(hadoopClassPathVMArg) + 1;
char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
snprintf(optHadoopClassPath, optHadoopClassPathLen,
"%s%s", hadoopClassPathVMArg, hadoopClassPath);
// Determine the # of LIBHDFS_OPTS args
int noArgs = 1;
char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
char jvmArgDelims[] = " ";
char *str, *token, *savePtr;
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
hadoopJvmArgs = strdup(hadoopJvmArgs);
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
@ -480,7 +418,12 @@ static JNIEnv* getGlobalJNIEnv(void)
}
// Now that we know the # args, populate the options array
JavaVMOption options[noArgs];
options = calloc(noArgs, sizeof(JavaVMOption));
if (!options) {
fputs("Call to calloc failed\n", stderr);
free(optHadoopClassPath);
return NULL;
}
options[0].optionString = optHadoopClassPath;
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
@ -495,8 +438,6 @@ static JNIEnv* getGlobalJNIEnv(void)
}
//Create the VM
JavaVMInitArgs vm_args;
JavaVM *vm;
vm_args.version = JNI_VERSION_1_2;
vm_args.options = options;
vm_args.nOptions = noArgs;
@ -508,6 +449,7 @@ static JNIEnv* getGlobalJNIEnv(void)
free(hadoopJvmArgs);
}
free(optHadoopClassPath);
free(options);
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
@ -523,7 +465,7 @@ static JNIEnv* getGlobalJNIEnv(void)
}
else {
//Attach this thread to the VM
JavaVM* vm = vmBuf[0];
vm = vmBuf[0];
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread "
@ -557,54 +499,27 @@ static JNIEnv* getGlobalJNIEnv(void)
JNIEnv* getJNIEnv(void)
{
JNIEnv *env;
struct hdfsTls *tls;
int ret;
#ifdef HAVE_BETTER_TLS
static __thread struct hdfsTls *quickTls = NULL;
if (quickTls)
return quickTls->env;
#endif
pthread_mutex_lock(&jvmMutex);
if (!gTlsKeyInitialized) {
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
if (ret) {
pthread_mutex_unlock(&jvmMutex);
fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
"error %d\n", ret);
return NULL;
}
gTlsKeyInitialized = 1;
THREAD_LOCAL_STORAGE_GET_QUICK();
mutexLock(&jvmMutex);
if (threadLocalStorageGet(&env)) {
mutexUnlock(&jvmMutex);
return NULL;
}
tls = pthread_getspecific(gTlsKey);
if (tls) {
pthread_mutex_unlock(&jvmMutex);
return tls->env;
if (env) {
mutexUnlock(&jvmMutex);
return env;
}
env = getGlobalJNIEnv();
pthread_mutex_unlock(&jvmMutex);
mutexUnlock(&jvmMutex);
if (!env) {
fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
return NULL;
fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
return NULL;
}
tls = calloc(1, sizeof(struct hdfsTls));
if (!tls) {
fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
sizeof(struct hdfsTls));
return NULL;
if (threadLocalStorageSet(env)) {
return NULL;
}
tls->env = env;
ret = pthread_setspecific(gTlsKey, tls);
if (ret) {
fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
"error code %d\n", ret);
hdfsThreadDestructor(tls);
return NULL;
}
#ifdef HAVE_BETTER_TLS
quickTls = tls;
#endif
THREAD_LOCAL_STORAGE_SET_QUICK(env);
return env;
}

View File

@ -24,8 +24,6 @@
#include <stdlib.h>
#include <stdarg.h>
#include <search.h>
#include <pthread.h>
#include <errno.h>
#define PATH_SEPARATOR ':'

View File

@ -21,6 +21,7 @@
#include "hdfs_test.h"
#include "jni_helper.h"
#include "native_mini_dfs.h"
#include "platform.h"
#include <errno.h>
#include <jni.h>
@ -347,10 +348,11 @@ error_dlr_nn:
int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
struct hdfsBuilder *bld)
{
int port, ret;
int ret;
tPort port;
hdfsBuilderSetNameNode(bld, "localhost");
port = nmdGetNameNodePort(cl);
port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) {
fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
return EIO;

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_MUTEXES_H
#define LIBHDFS_MUTEXES_H
/*
* Defines abstraction over platform-specific mutexes. libhdfs has no formal
* initialization function that users would call from a single-threaded context
* to initialize the library. This creates a challenge for bootstrapping the
* mutexes. To address this, all required mutexes are pre-defined here with
* external storage. Platform-specific implementations must guarantee that the
* mutexes are initialized via static initialization.
*/
#include "platform.h"
/** Mutex protecting the class reference hash table. */
extern mutex hdfsHashMutex;
/** Mutex protecting singleton JVM instance. */
extern mutex jvmMutex;
/**
* Locks a mutex.
*
* @param m mutex
* @return 0 if successful, non-zero otherwise
*/
int mutexLock(mutex *m);
/**
* Unlocks a mutex.
*
* @param m mutex
* @return 0 if successful, non-zero otherwise
*/
int mutexUnlock(mutex *m);
#endif

View File

@ -16,23 +16,28 @@
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event;
#include "os/mutexes.h"
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
#include <pthread.h>
#include <stdio.h>
public class RMAppAttemptUpdateSavedEvent extends RMAppAttemptEvent {
mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
final Exception updatedException;
public RMAppAttemptUpdateSavedEvent(ApplicationAttemptId appAttemptId,
Exception updatedException) {
super(appAttemptId, RMAppAttemptEventType.ATTEMPT_UPDATE_SAVED);
this.updatedException = updatedException;
}
public Exception getUpdatedException() {
return updatedException;
int mutexLock(mutex *m) {
int ret = pthread_mutex_lock(m);
if (ret) {
fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
ret);
}
return ret;
}
int mutexUnlock(mutex *m) {
int ret = pthread_mutex_unlock(m);
if (ret) {
fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
ret);
}
return ret;
}

View File

@ -16,21 +16,19 @@
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
#ifndef LIBHDFS_PLATFORM_H
#define LIBHDFS_PLATFORM_H
import org.apache.hadoop.yarn.api.records.ApplicationId;
#include <pthread.h>
public class RMAppNewSavedEvent extends RMAppEvent {
/* Use gcc type-checked format arguments. */
#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
__attribute__((format(printf, formatArg, varArgs)))
private final Exception storedException;
/*
* Mutex and thread data types defined by pthreads.
*/
typedef pthread_mutex_t mutex;
typedef pthread_t threadId;
public RMAppNewSavedEvent(ApplicationId appId, Exception storedException) {
super(appId, RMAppEventType.APP_NEW_SAVED);
this.storedException = storedException;
}
public Exception getStoredException() {
return storedException;
}
}
#endif

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread.h"
#include <pthread.h>
#include <stdio.h>
/**
* Defines a helper function that adapts function pointer provided by caller to
* the type required by pthread_create.
*
* @param toRun thread to run
* @return void* result of running thread (always NULL)
*/
static void* runThread(void *toRun) {
const thread *t = toRun;
t->start(t->arg);
return NULL;
}
int threadCreate(thread *t) {
int ret;
ret = pthread_create(&t->id, NULL, runThread, t);
if (ret) {
fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
}
return ret;
}
int threadJoin(const thread *t) {
int ret = pthread_join(t->id, NULL);
if (ret) {
fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
}
return ret;
}

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread_local_storage.h"
#include <jni.h>
#include <pthread.h>
#include <stdio.h>
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
/**
* The function that is called whenever a thread with libhdfs thread local data
* is destroyed.
*
* @param v The thread-local data
*/
static void hdfsThreadDestructor(void *v)
{
JavaVM *vm;
JNIEnv *env = v;
jint ret;
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
}
int threadLocalStorageGet(JNIEnv **env)
{
int ret = 0;
if (!gTlsKeyInitialized) {
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
if (ret) {
fprintf(stderr,
"threadLocalStorageGet: pthread_key_create failed with error %d\n",
ret);
return ret;
}
gTlsKeyInitialized = 1;
}
*env = pthread_getspecific(gTlsKey);
return ret;
}
int threadLocalStorageSet(JNIEnv *env)
{
int ret = pthread_setspecific(gTlsKey, env);
if (ret) {
fprintf(stderr,
"threadLocalStorageSet: pthread_setspecific failed with error %d\n",
ret);
hdfsThreadDestructor(env);
}
return ret;
}

Some files were not shown because too many files have changed in this diff Show More