merge from trunk r1617527

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1617532 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Charles Lamb 2014-08-12 17:02:07 +00:00
commit 74f7be0887
198 changed files with 10565 additions and 4166 deletions

View File

@ -210,6 +210,7 @@ Requirements:
* Maven 3.0 or later * Maven 3.0 or later
* Findbugs 1.3.9 (if running findbugs) * Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.5.0 * ProtocolBuffer 2.5.0
* CMake 2.6 or newer
* Windows SDK or Visual Studio 2010 Professional * Windows SDK or Visual Studio 2010 Professional
* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip * Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
* zlib headers (if building native code bindings for zlib) * zlib headers (if building native code bindings for zlib)

View File

@ -144,6 +144,15 @@
<artifactId>maven-jar-plugin</artifactId> <artifactId>maven-jar-plugin</artifactId>
<executions> <executions>
<execution> <execution>
<id>prepare-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
<execution>
<id>prepare-test-jar</id>
<phase>prepare-package</phase>
<goals> <goals>
<goal>test-jar</goal> <goal>test-jar</goal>
</goals> </goals>

View File

@ -120,32 +120,6 @@ public String toString() {
return token; return token;
} }
/**
* Return the hashcode for the token.
*
* @return the hashcode for the token.
*/
@Override
public int hashCode() {
return (token != null) ? token.hashCode() : 0;
}
/**
* Return if two token instances are equal.
*
* @param o the other token instance.
*
* @return if this instance and the other instance are equal.
*/
@Override
public boolean equals(Object o) {
boolean eq = false;
if (o instanceof Token) {
Token other = (Token) o;
eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
}
return eq;
}
} }
private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class; private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@ -208,6 +182,16 @@ public AuthenticatedURL(Authenticator authenticator,
this.authenticator.setConnectionConfigurator(connConfigurator); this.authenticator.setConnectionConfigurator(connConfigurator);
} }
/**
* Returns the {@link Authenticator} instance used by the
* <code>AuthenticatedURL</code>.
*
* @return the {@link Authenticator} instance
*/
protected Authenticator getAuthenticator() {
return authenticator;
}
/** /**
* Returns an authenticated {@link HttpURLConnection}. * Returns an authenticated {@link HttpURLConnection}.
* *

View File

@ -127,6 +127,7 @@ public class AuthenticationFilter implements Filter {
public static final String SIGNATURE_PROVIDER_ATTRIBUTE = public static final String SIGNATURE_PROVIDER_ATTRIBUTE =
"org.apache.hadoop.security.authentication.util.SignerSecretProvider"; "org.apache.hadoop.security.authentication.util.SignerSecretProvider";
private Properties config;
private Signer signer; private Signer signer;
private SignerSecretProvider secretProvider; private SignerSecretProvider secretProvider;
private AuthenticationHandler authHandler; private AuthenticationHandler authHandler;
@ -150,7 +151,7 @@ public class AuthenticationFilter implements Filter {
public void init(FilterConfig filterConfig) throws ServletException { public void init(FilterConfig filterConfig) throws ServletException {
String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX); String configPrefix = filterConfig.getInitParameter(CONFIG_PREFIX);
configPrefix = (configPrefix != null) ? configPrefix + "." : ""; configPrefix = (configPrefix != null) ? configPrefix + "." : "";
Properties config = getConfiguration(configPrefix, filterConfig); config = getConfiguration(configPrefix, filterConfig);
String authHandlerName = config.getProperty(AUTH_TYPE, null); String authHandlerName = config.getProperty(AUTH_TYPE, null);
String authHandlerClassName; String authHandlerClassName;
if (authHandlerName == null) { if (authHandlerName == null) {
@ -224,6 +225,17 @@ public void init(FilterConfig filterConfig) throws ServletException {
cookiePath = config.getProperty(COOKIE_PATH, null); cookiePath = config.getProperty(COOKIE_PATH, null);
} }
/**
* Returns the configuration properties of the {@link AuthenticationFilter}
* without the prefix. The returned properties are the same that the
* {@link #getConfiguration(String, FilterConfig)} method returned.
*
* @return the configuration properties.
*/
protected Properties getConfiguration() {
return config;
}
/** /**
* Returns the authentication handler being used. * Returns the authentication handler being used.
* *
@ -457,7 +469,7 @@ public Principal getUserPrincipal() {
createAuthCookie(httpResponse, signedToken, getCookieDomain(), createAuthCookie(httpResponse, signedToken, getCookieDomain(),
getCookiePath(), token.getExpires(), isHttps); getCookiePath(), token.getExpires(), isHttps);
} }
filterChain.doFilter(httpRequest, httpResponse); doFilter(filterChain, httpRequest, httpResponse);
} }
} else { } else {
unauthorizedResponse = false; unauthorizedResponse = false;
@ -481,6 +493,15 @@ public Principal getUserPrincipal() {
} }
} }
/**
* Delegates call to the servlet filter chain. Sub-classes my override this
* method to perform pre and post tasks.
*/
protected void doFilter(FilterChain filterChain, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
filterChain.doFilter(request, response);
}
/** /**
* Creates the Hadoop authentication HTTP cookie. * Creates the Hadoop authentication HTTP cookie.
* *

View File

@ -142,11 +142,30 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
*/ */
public static final String NAME_RULES = TYPE + ".name.rules"; public static final String NAME_RULES = TYPE + ".name.rules";
private String type;
private String keytab; private String keytab;
private GSSManager gssManager; private GSSManager gssManager;
private Subject serverSubject = new Subject(); private Subject serverSubject = new Subject();
private List<LoginContext> loginContexts = new ArrayList<LoginContext>(); private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
/**
* Creates a Kerberos SPNEGO authentication handler with the default
* auth-token type, <code>kerberos</code>.
*/
public KerberosAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Kerberos SPNEGO authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public KerberosAuthenticationHandler(String type) {
this.type = type;
}
/** /**
* Initializes the authentication handler instance. * Initializes the authentication handler instance.
* <p/> * <p/>
@ -249,7 +268,7 @@ public void destroy() {
*/ */
@Override @Override
public String getType() { public String getType() {
return TYPE; return type;
} }
/** /**

View File

@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8"); private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private boolean acceptAnonymous; private boolean acceptAnonymous;
private String type;
/**
* Creates a Hadoop pseudo authentication handler with the default auth-token
* type, <code>simple</code>.
*/
public PseudoAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Hadoop pseudo authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public PseudoAuthenticationHandler(String type) {
this.type = type;
}
/** /**
* Initializes the authentication handler instance. * Initializes the authentication handler instance.
@ -96,7 +115,7 @@ public void destroy() {
*/ */
@Override @Override
public String getType() { public String getType() {
return TYPE; return type;
} }
/** /**

View File

@ -33,36 +33,6 @@ public void testToken() throws Exception {
token = new AuthenticatedURL.Token("foo"); token = new AuthenticatedURL.Token("foo");
Assert.assertTrue(token.isSet()); Assert.assertTrue(token.isSet());
Assert.assertEquals("foo", token.toString()); Assert.assertEquals("foo", token.toString());
AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
Assert.assertEquals(token1.hashCode(), token2.hashCode());
Assert.assertTrue(token1.equals(token2));
token1 = new AuthenticatedURL.Token();
token2 = new AuthenticatedURL.Token("foo");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token();
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token("foo");
Assert.assertEquals(token1.hashCode(), token2.hashCode());
Assert.assertTrue(token1.equals(token2));
token1 = new AuthenticatedURL.Token("bar");
token2 = new AuthenticatedURL.Token("foo");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token("bar");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
} }
@Test @Test
@ -137,4 +107,12 @@ public void testConnectionConfigurator() throws Exception {
Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any()); Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
} }
@Test
public void testGetAuthenticator() throws Exception {
Authenticator authenticator = Mockito.mock(Authenticator.class);
AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
Assert.assertEquals(authenticator, aURL.getAuthenticator());
}
} }

View File

@ -199,6 +199,9 @@ Trunk (Unreleased)
HADOOP-10936. Change default KeyProvider bitlength to 128. (wang) HADOOP-10936. Change default KeyProvider bitlength to 128. (wang)
HADOOP-10224. JavaKeyStoreProvider has to protect against corrupting
underlying store. (asuresh via tucu)
BUG FIXES BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled. HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -421,6 +424,9 @@ Trunk (Unreleased)
HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit HADOOP-10939. Fix TestKeyProviderFactory testcases to use default 128 bit
length keys. (Arun Suresh via wang) length keys. (Arun Suresh via wang)
HADOOP-10862. Miscellaneous trivial corrections to KMS classes.
(asuresh via tucu)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd) HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -490,6 +496,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10791. AuthenticationFilter should support externalizing the HADOOP-10791. AuthenticationFilter should support externalizing the
secret for signing and provide rotation support. (rkanter via tucu) secret for signing and provide rotation support. (rkanter via tucu)
HADOOP-10771. Refactor HTTP delegation support out of httpfs to common.
(tucu)
HADOOP-10835. Implement HTTP proxyuser support in HTTP authentication
client/server libraries. (tucu)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES
@ -521,9 +533,6 @@ Release 2.6.0 - UNRELEASED
HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry. HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
(Benoy Antony via umamahesh) (Benoy Antony via umamahesh)
HADOOP-10876. The constructor of Path should not take an empty URL as a
parameter. (Zhihai Xu via wang)
HADOOP-10928. Incorrect usage on `hadoop credential list`. HADOOP-10928. Incorrect usage on `hadoop credential list`.
(Josh Elser via wang) (Josh Elser via wang)
@ -545,6 +554,12 @@ Release 2.6.0 - UNRELEASED
HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel) HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
(lmccay via brandonli)
HADOOP-10402. Configuration.getValByRegex does not substitute for
variables. (Robert Kanter via kasha)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -203,6 +203,17 @@
<artifactId>hadoop-auth</artifactId> <artifactId>hadoop-auth</artifactId>
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency> <dependency>
<groupId>com.jcraft</groupId> <groupId>com.jcraft</groupId>
<artifactId>jsch</artifactId> <artifactId>jsch</artifactId>

View File

@ -1781,7 +1781,7 @@ public void setStrings(String name, String... values) {
public char[] getPassword(String name) throws IOException { public char[] getPassword(String name) throws IOException {
char[] pass = null; char[] pass = null;
pass = getPasswordFromCredenitalProviders(name); pass = getPasswordFromCredentialProviders(name);
if (pass == null) { if (pass == null) {
pass = getPasswordFromConfig(name); pass = getPasswordFromConfig(name);
@ -1797,7 +1797,7 @@ public char[] getPassword(String name) throws IOException {
* @return password or null if not found * @return password or null if not found
* @throws IOException * @throws IOException
*/ */
protected char[] getPasswordFromCredenitalProviders(String name) protected char[] getPasswordFromCredentialProviders(String name)
throws IOException { throws IOException {
char[] pass = null; char[] pass = null;
try { try {
@ -2755,7 +2755,8 @@ public Map<String,String> getValByRegex(String regex) {
item.getValue() instanceof String) { item.getValue() instanceof String) {
m = p.matcher((String)item.getKey()); m = p.matcher((String)item.getKey());
if(m.find()) { // match if(m.find()) { // match
result.put((String) item.getKey(), (String) item.getValue()); result.put((String) item.getKey(),
substituteVars(getProps().getProperty((String) item.getKey())));
} }
} }
} }

View File

@ -27,8 +27,11 @@
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.ProviderUtils; import org.apache.hadoop.security.ProviderUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.crypto.spec.SecretKeySpec; import javax.crypto.spec.SecretKeySpec;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.ObjectInputStream; import java.io.ObjectInputStream;
@ -80,6 +83,9 @@
@InterfaceAudience.Private @InterfaceAudience.Private
public class JavaKeyStoreProvider extends KeyProvider { public class JavaKeyStoreProvider extends KeyProvider {
private static final String KEY_METADATA = "KeyMetadata"; private static final String KEY_METADATA = "KeyMetadata";
private static Logger LOG =
LoggerFactory.getLogger(JavaKeyStoreProvider.class);
public static final String SCHEME_NAME = "jceks"; public static final String SCHEME_NAME = "jceks";
public static final String KEYSTORE_PASSWORD_FILE_KEY = public static final String KEYSTORE_PASSWORD_FILE_KEY =
@ -115,6 +121,10 @@ private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
if (pwFile != null) { if (pwFile != null) {
ClassLoader cl = Thread.currentThread().getContextClassLoader(); ClassLoader cl = Thread.currentThread().getContextClassLoader();
URL pwdFile = cl.getResource(pwFile); URL pwdFile = cl.getResource(pwFile);
if (pwdFile == null) {
// Provided Password file does not exist
throw new IOException("Password file does not exists");
}
if (pwdFile != null) { if (pwdFile != null) {
InputStream is = pwdFile.openStream(); InputStream is = pwdFile.openStream();
try { try {
@ -129,19 +139,25 @@ private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
password = KEYSTORE_PASSWORD_DEFAULT; password = KEYSTORE_PASSWORD_DEFAULT;
} }
try { try {
Path oldPath = constructOldPath(path);
Path newPath = constructNewPath(path);
keyStore = KeyStore.getInstance(SCHEME_NAME); keyStore = KeyStore.getInstance(SCHEME_NAME);
FsPermission perm = null;
if (fs.exists(path)) { if (fs.exists(path)) {
// save off permissions in case we need to // flush did not proceed to completion
// rewrite the keystore in flush() // _NEW should not exist
FileStatus s = fs.getFileStatus(path); if (fs.exists(newPath)) {
permissions = s.getPermission(); throw new IOException(
String.format("Keystore not loaded due to some inconsistency "
keyStore.load(fs.open(path), password); + "('%s' and '%s' should not exist together)!!", path, newPath));
}
perm = tryLoadFromPath(path, oldPath);
} else { } else {
permissions = new FsPermission("700"); perm = tryLoadIncompleteFlush(oldPath, newPath);
// required to create an empty keystore. *sigh*
keyStore.load(null, password);
} }
// Need to save off permissions in case we need to
// rewrite the keystore in flush()
permissions = perm;
} catch (KeyStoreException e) { } catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e); throw new IOException("Can't create keystore", e);
} catch (NoSuchAlgorithmException e) { } catch (NoSuchAlgorithmException e) {
@ -154,6 +170,136 @@ private JavaKeyStoreProvider(URI uri, Configuration conf) throws IOException {
writeLock = lock.writeLock(); writeLock = lock.writeLock();
} }
/**
* Try loading from the user specified path, else load from the backup
* path in case Exception is not due to bad/wrong password
* @param path Actual path to load from
* @param backupPath Backup path (_OLD)
* @return The permissions of the loaded file
* @throws NoSuchAlgorithmException
* @throws CertificateException
* @throws IOException
*/
private FsPermission tryLoadFromPath(Path path, Path backupPath)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(path, password);
// Remove _OLD if exists
if (fs.exists(backupPath)) {
fs.delete(backupPath, true);
}
LOG.debug("KeyStore loaded successfully !!");
} catch (IOException ioe) {
// If file is corrupted for some reason other than
// wrong password try the _OLD file if exits
if (!isBadorWrongPassword(ioe)) {
perm = loadFromPath(backupPath, password);
// Rename CURRENT to CORRUPTED
renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
+ System.currentTimeMillis()));
renameOrFail(backupPath, path);
LOG.debug(String.format(
"KeyStore loaded successfully from '%s' since '%s'"
+ "was corrupted !!", backupPath, path));
} else {
throw ioe;
}
}
return perm;
}
/**
* The KeyStore might have gone down during a flush, In which case either the
* _NEW or _OLD files might exists. This method tries to load the KeyStore
* from one of these intermediate files.
* @param oldPath the _OLD file created during flush
* @param newPath the _NEW file created during flush
* @return The permissions of the loaded file
* @throws IOException
* @throws NoSuchAlgorithmException
* @throws CertificateException
*/
private FsPermission tryLoadIncompleteFlush(Path oldPath, Path newPath)
throws IOException, NoSuchAlgorithmException, CertificateException {
FsPermission perm = null;
// Check if _NEW exists (in case flush had finished writing but not
// completed the re-naming)
if (fs.exists(newPath)) {
perm = loadAndReturnPerm(newPath, oldPath);
}
// try loading from _OLD (An earlier Flushing MIGHT not have completed
// writing completely)
if ((perm == null) && fs.exists(oldPath)) {
perm = loadAndReturnPerm(oldPath, newPath);
}
// If not loaded yet,
// required to create an empty keystore. *sigh*
if (perm == null) {
keyStore.load(null, password);
LOG.debug("KeyStore initialized anew successfully !!");
perm = new FsPermission("700");
}
return perm;
}
private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
throws NoSuchAlgorithmException, CertificateException,
IOException {
FsPermission perm = null;
try {
perm = loadFromPath(pathToLoad, password);
renameOrFail(pathToLoad, path);
LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
pathToLoad));
if (fs.exists(pathToDelete)) {
fs.delete(pathToDelete, true);
}
} catch (IOException e) {
// Check for password issue : don't want to trash file due
// to wrong password
if (isBadorWrongPassword(e)) {
throw e;
}
}
return perm;
}
private boolean isBadorWrongPassword(IOException ioe) {
// As per documentation this is supposed to be the way to figure
// if password was correct
if (ioe.getCause() instanceof UnrecoverableKeyException) {
return true;
}
// Unfortunately that doesn't seem to work..
// Workaround :
if ((ioe.getCause() == null)
&& (ioe.getMessage() != null)
&& ((ioe.getMessage().contains("Keystore was tampered")) || (ioe
.getMessage().contains("password was incorrect")))) {
return true;
}
return false;
}
private FsPermission loadFromPath(Path p, char[] password)
throws IOException, NoSuchAlgorithmException, CertificateException {
FileStatus s = fs.getFileStatus(p);
keyStore.load(fs.open(p), password);
return s.getPermission();
}
private Path constructNewPath(Path path) {
Path newPath = new Path(path.toString() + "_NEW");
return newPath;
}
private Path constructOldPath(Path path) {
Path oldPath = new Path(path.toString() + "_OLD");
return oldPath;
}
@Override @Override
public KeyVersion getKeyVersion(String versionName) throws IOException { public KeyVersion getKeyVersion(String versionName) throws IOException {
readLock.lock(); readLock.lock();
@ -352,11 +498,22 @@ public KeyVersion rollNewVersion(String name,
@Override @Override
public void flush() throws IOException { public void flush() throws IOException {
Path newPath = constructNewPath(path);
Path oldPath = constructOldPath(path);
writeLock.lock(); writeLock.lock();
try { try {
if (!changed) { if (!changed) {
return; return;
} }
// Might exist if a backup has been restored etc.
if (fs.exists(newPath)) {
renameOrFail(newPath, new Path(newPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
}
if (fs.exists(oldPath)) {
renameOrFail(oldPath, new Path(oldPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
}
// put all of the updates into the keystore // put all of the updates into the keystore
for(Map.Entry<String, Metadata> entry: cache.entrySet()) { for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
try { try {
@ -366,25 +523,77 @@ public void flush() throws IOException {
throw new IOException("Can't set metadata key " + entry.getKey(),e ); throw new IOException("Can't set metadata key " + entry.getKey(),e );
} }
} }
// Save old File first
boolean fileExisted = backupToOld(oldPath);
// write out the keystore // write out the keystore
FSDataOutputStream out = FileSystem.create(fs, path, permissions); // Write to _NEW path first :
try { try {
keyStore.store(out, password); writeToNew(newPath);
} catch (KeyStoreException e) { } catch (IOException ioe) {
throw new IOException("Can't store keystore " + this, e); // rename _OLD back to curent and throw Exception
} catch (NoSuchAlgorithmException e) { revertFromOld(oldPath, fileExisted);
throw new IOException("No such algorithm storing keystore " + this, e); throw ioe;
} catch (CertificateException e) {
throw new IOException("Certificate exception storing keystore " + this,
e);
} }
out.close(); // Rename _NEW to CURRENT and delete _OLD
cleanupNewAndOld(newPath, oldPath);
changed = false; changed = false;
} finally { } finally {
writeLock.unlock(); writeLock.unlock();
} }
} }
private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
// Rename _NEW to CURRENT
renameOrFail(newPath, path);
// Delete _OLD
if (fs.exists(oldPath)) {
fs.delete(oldPath, true);
}
}
private void writeToNew(Path newPath) throws IOException {
FSDataOutputStream out =
FileSystem.create(fs, newPath, permissions);
try {
keyStore.store(out, password);
} catch (KeyStoreException e) {
throw new IOException("Can't store keystore " + this, e);
} catch (NoSuchAlgorithmException e) {
throw new IOException(
"No such algorithm storing keystore " + this, e);
} catch (CertificateException e) {
throw new IOException(
"Certificate exception storing keystore " + this, e);
}
out.close();
}
private void revertFromOld(Path oldPath, boolean fileExisted)
throws IOException {
if (fileExisted) {
renameOrFail(oldPath, path);
}
}
private boolean backupToOld(Path oldPath)
throws IOException {
boolean fileExisted = false;
if (fs.exists(path)) {
renameOrFail(path, oldPath);
fileExisted = true;
}
return fileExisted;
}
private void renameOrFail(Path src, Path dest)
throws IOException {
if (!fs.rename(src, dest)) {
throw new IOException("Rename unsuccessful : "
+ String.format("'%s' to '%s'", src, dest));
}
}
@Override @Override
public String toString() { public String toString() {
return uri.toString(); return uri.toString();

View File

@ -512,7 +512,7 @@ private List<String[]> createKeySets(String[] keyNames) {
List<String> batch = new ArrayList<String>(); List<String> batch = new ArrayList<String>();
int batchLen = 0; int batchLen = 0;
for (String name : keyNames) { for (String name : keyNames) {
int additionalLen = KMSRESTConstants.KEY_OP.length() + 1 + name.length(); int additionalLen = KMSRESTConstants.KEY.length() + 1 + name.length();
batchLen += additionalLen; batchLen += additionalLen;
// topping at 1500 to account for initial URL and encoded names // topping at 1500 to account for initial URL and encoded names
if (batchLen > 1500) { if (batchLen > 1500) {
@ -536,7 +536,7 @@ public Metadata[] getKeysMetadata(String ... keyNames) throws IOException {
for (String[] keySet : keySets) { for (String[] keySet : keySets) {
if (keyNames.length > 0) { if (keyNames.length > 0) {
Map<String, Object> queryStr = new HashMap<String, Object>(); Map<String, Object> queryStr = new HashMap<String, Object>();
queryStr.put(KMSRESTConstants.KEY_OP, keySet); queryStr.put(KMSRESTConstants.KEY, keySet);
URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null, URL url = createURL(KMSRESTConstants.KEYS_METADATA_RESOURCE, null,
null, queryStr); null, queryStr);
HttpURLConnection conn = createConnection(url, HTTP_GET); HttpURLConnection conn = createConnection(url, HTTP_GET);

View File

@ -37,7 +37,7 @@ public class KMSRESTConstants {
public static final String EEK_SUB_RESOURCE = "_eek"; public static final String EEK_SUB_RESOURCE = "_eek";
public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion"; public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
public static final String KEY_OP = "key"; public static final String KEY = "key";
public static final String EEK_OP = "eek_op"; public static final String EEK_OP = "eek_op";
public static final String EEK_GENERATE = "generate"; public static final String EEK_GENERATE = "generate";
public static final String EEK_DECRYPT = "decrypt"; public static final String EEK_DECRYPT = "decrypt";

View File

@ -128,20 +128,7 @@ private void checkPathArg( String path ) throws IllegalArgumentException {
"Can not create a Path from an empty string"); "Can not create a Path from an empty string");
} }
} }
/** check URI parameter of Path constructor. */
private void checkPathArg(URI aUri) throws IllegalArgumentException {
// disallow construction of a Path from an empty URI
if (aUri == null) {
throw new IllegalArgumentException(
"Can not create a Path from a null URI");
}
if (aUri.toString().isEmpty()) {
throw new IllegalArgumentException(
"Can not create a Path from an empty URI");
}
}
/** Construct a path from a String. Path strings are URIs, but with /** Construct a path from a String. Path strings are URIs, but with
* unescaped elements and some additional normalization. */ * unescaped elements and some additional normalization. */
public Path(String pathString) throws IllegalArgumentException { public Path(String pathString) throws IllegalArgumentException {
@ -189,7 +176,6 @@ public Path(String pathString) throws IllegalArgumentException {
* Construct a path from a URI * Construct a path from a URI
*/ */
public Path(URI aUri) { public Path(URI aUri) {
checkPathArg(aUri);
uri = aUri.normalize(); uri = aUri.normalize();
} }

View File

@ -21,14 +21,18 @@
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsFilter; import org.apache.hadoop.metrics2.MetricsFilter;
import static org.apache.hadoop.metrics2.lib.Interns.*; import static org.apache.hadoop.metrics2.lib.Interns.*;
class MetricsCollectorImpl implements MetricsCollector, @InterfaceAudience.Private
@VisibleForTesting
public class MetricsCollectorImpl implements MetricsCollector,
Iterable<MetricsRecordBuilderImpl> { Iterable<MetricsRecordBuilderImpl> {
private final List<MetricsRecordBuilderImpl> rbs = Lists.newArrayList(); private final List<MetricsRecordBuilderImpl> rbs = Lists.newArrayList();

View File

@ -89,6 +89,14 @@ public MutableStat(String name, String description,
this(name, description, sampleName, valueName, false); this(name, description, sampleName, valueName, false);
} }
/**
* Set whether to display the extended stats (stdev, min/max etc.) or not
* @param extended enable/disable displaying extended stats
*/
public synchronized void setExtended(boolean extended) {
this.extended = extended;
}
/** /**
* Add a number of samples and their sum to the running stat * Add a number of samples and their sum to the running stat
* @param numSamples number of samples * @param numSamples number of samples

View File

@ -0,0 +1,343 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* The <code>DelegationTokenAuthenticatedURL</code> is a
* {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
* functionality.
* <p/>
* The authentication mechanisms supported by default are Hadoop Simple
* authentication (also known as pseudo authentication) and Kerberos SPNEGO
* authentication.
* <p/>
* Additional authentication mechanisms can be supported via {@link
* DelegationTokenAuthenticator} implementations.
* <p/>
* The default {@link DelegationTokenAuthenticator} is the {@link
* KerberosDelegationTokenAuthenticator} class which supports
* automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
* the {@link PseudoDelegationTokenAuthenticator} class.
* <p/>
* <code>AuthenticatedURL</code> instances are not thread-safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
/**
* Constant used in URL's query string to perform a proxy user request, the
* value of the <code>DO_AS</code> parameter is the user the request will be
* done on behalf of.
*/
static final String DO_AS = "doAs";
/**
* Client side authentication token that handles Delegation Tokens.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static class Token extends AuthenticatedURL.Token {
private
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
delegationToken;
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken() {
return delegationToken;
}
}
private static Class<? extends DelegationTokenAuthenticator>
DEFAULT_AUTHENTICATOR = KerberosDelegationTokenAuthenticator.class;
/**
* Sets the default {@link DelegationTokenAuthenticator} class to use when an
* {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
*
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @param authenticator the authenticator class to use as default.
*/
public static void setDefaultDelegationTokenAuthenticator(
Class<? extends DelegationTokenAuthenticator> authenticator) {
DEFAULT_AUTHENTICATOR = authenticator;
}
/**
* Returns the default {@link DelegationTokenAuthenticator} class to use when
* an {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
* <p/>
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @return the delegation token authenticator class to use as default.
*/
public static Class<? extends DelegationTokenAuthenticator>
getDefaultDelegationTokenAuthenticator() {
return DEFAULT_AUTHENTICATOR;
}
private static DelegationTokenAuthenticator
obtainDelegationTokenAuthenticator(DelegationTokenAuthenticator dta) {
try {
return (dta != null) ? dta : DEFAULT_AUTHENTICATOR.newInstance();
} catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
* An instance of the default {@link DelegationTokenAuthenticator} will be
* used.
*/
public DelegationTokenAuthenticatedURL() {
this(null, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator) {
this(authenticator, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code> using the default
* {@link DelegationTokenAuthenticator} class.
*
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
ConnectionConfigurator connConfigurator) {
this(null, connConfigurator);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator,
ConnectionConfigurator connConfigurator) {
super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
}
/**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
* it contains a Delegation Token, otherwise use the configured
* {@link DelegationTokenAuthenticator} to authenticate the connection.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
@Override
public HttpURLConnection openConnection(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
return (token instanceof Token) ? openConnection(url, (Token) token)
: super.openConnection(url ,token);
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token)
throws IOException, AuthenticationException {
return openConnection(url, token, null);
}
private URL augmentURL(URL url, Map<String, String> params)
throws IOException {
if (params != null && params.size() > 0) {
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> param : params.entrySet()) {
sb.append(separator).append(param.getKey()).append("=").append(
param.getValue());
separator = "&";
}
url = new URL(sb.toString());
}
return url;
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
* the request will be done on behalf of the specified <code>doAs</code> user.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @param doAs user to do the the request on behalf of, if NULL the request is
* as self.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
// delegation token
Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
if (!creds.getAllTokens().isEmpty()) {
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
url.getPort());
Text service = SecurityUtil.buildTokenService(serviceAddr);
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
creds.getToken(service);
if (dt != null) {
extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
dt.encodeToUrlString());
}
}
// proxyuser
if (doAs != null) {
extraParams.put(DO_AS, URLEncoder.encode(doAs, "UTF-8"));
}
url = augmentURL(url, extraParams);
return super.openConnection(url, token);
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @return a delegation token.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken(URL url, Token token, String renewer)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
try {
token.delegationToken =
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
getDelegationToken(url, token, renewer);
return token.delegationToken;
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url, Token token)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
return ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
renewDelegationToken(url, token, token.delegationToken);
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url, Token token)
throws IOException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
cancelDelegationToken(url, token, token.delegationToken);
} finally {
token.delegationToken = null;
}
}
}

View File

@ -0,0 +1,274 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.io.Writer;
import java.nio.charset.Charset;
import java.security.Principal;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
/**
* The <code>DelegationTokenAuthenticationFilter</code> filter is a
* {@link AuthenticationFilter} with Hadoop Delegation Token support.
* <p/>
* By default it uses it own instance of the {@link
* AbstractDelegationTokenSecretManager}. For situations where an external
* <code>AbstractDelegationTokenSecretManager</code> is required (i.e. one that
* shares the secret with <code>AbstractDelegationTokenSecretManager</code>
* instance running in other services), the external
* <code>AbstractDelegationTokenSecretManager</code> must be set as an
* attribute in the {@link ServletContext} of the web application using the
* {@link #DELEGATION_TOKEN_SECRET_MANAGER_ATTR} attribute name (
* 'hadoop.http.delegation-token-secret-manager').
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenAuthenticationFilter
extends AuthenticationFilter {
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String ERROR_EXCEPTION_JSON = "exception";
private static final String ERROR_MESSAGE_JSON = "message";
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*/
public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR =
"hadoop.http.delegation-token-secret-manager";
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private static final ThreadLocal<UserGroupInformation> UGI_TL =
new ThreadLocal<UserGroupInformation>();
public static final String PROXYUSER_PREFIX = "proxyuser";
private SaslRpcServer.AuthMethod handlerAuthMethod;
/**
* It delegates to
* {@link AuthenticationFilter#getConfiguration(String, FilterConfig)} and
* then overrides the {@link AuthenticationHandler} to use if authentication
* type is set to <code>simple</code> or <code>kerberos</code> in order to use
* the corresponding implementation with delegation token support.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
* @return hadoop-auth de-prefixed configuration for the filter and handler.
*/
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties props = super.getConfiguration(configPrefix, filterConfig);
String authType = props.getProperty(AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
return props;
}
/**
* Returns the proxyuser configuration. All returned properties must start
* with <code>proxyuser.</code>'
* <p/>
* Subclasses may override this method if the proxyuser configuration is
* read from other place than the filter init parameters.
*
* @param filterConfig filter configuration object
* @return the proxyuser configuration properties.
* @throws ServletException thrown if the configuration could not be created.
*/
protected Configuration getProxyuserConfiguration(FilterConfig filterConfig)
throws ServletException {
// this filter class gets the configuration from the filter configs, we are
// creating an empty configuration and injecting the proxyuser settings in
// it. In the initialization of the filter, the returned configuration is
// passed to the ProxyUsers which only looks for 'proxyusers.' properties.
Configuration conf = new Configuration(false);
Enumeration<?> names = filterConfig.getInitParameterNames();
while (names.hasMoreElements()) {
String name = (String) names.nextElement();
if (name.startsWith(PROXYUSER_PREFIX + ".")) {
String value = filterConfig.getInitParameter(name);
conf.set(name, value);
}
}
return conf;
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
super.init(filterConfig);
AuthenticationHandler handler = getAuthenticationHandler();
AbstractDelegationTokenSecretManager dtSecretManager =
(AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
if (dtSecretManager != null && handler
instanceof DelegationTokenAuthenticationHandler) {
DelegationTokenAuthenticationHandler dtHandler =
(DelegationTokenAuthenticationHandler) getAuthenticationHandler();
dtHandler.setExternalDelegationTokenSecretManager(dtSecretManager);
}
if (handler instanceof PseudoAuthenticationHandler ||
handler instanceof PseudoDelegationTokenAuthenticationHandler) {
setHandlerAuthMethod(SaslRpcServer.AuthMethod.SIMPLE);
}
if (handler instanceof KerberosAuthenticationHandler ||
handler instanceof KerberosDelegationTokenAuthenticationHandler) {
setHandlerAuthMethod(SaslRpcServer.AuthMethod.KERBEROS);
}
// proxyuser configuration
Configuration conf = getProxyuserConfiguration(filterConfig);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, PROXYUSER_PREFIX);
}
protected void setHandlerAuthMethod(SaslRpcServer.AuthMethod authMethod) {
this.handlerAuthMethod = authMethod;
}
@VisibleForTesting
static String getDoAs(HttpServletRequest request) {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (DelegationTokenAuthenticatedURL.DO_AS.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
static UserGroupInformation getHttpUserGroupInformationInContext() {
return UGI_TL.get();
}
@Override
protected void doFilter(FilterChain filterChain, HttpServletRequest request,
HttpServletResponse response) throws IOException, ServletException {
boolean requestCompleted = false;
UserGroupInformation ugi = null;
AuthenticationToken authToken = (AuthenticationToken)
request.getUserPrincipal();
if (authToken != null && authToken != AuthenticationToken.ANONYMOUS) {
// if the request was authenticated because of a delegation token,
// then we ignore proxyuser (this is the same as the RPC behavior).
ugi = (UserGroupInformation) request.getAttribute(
DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE);
if (ugi == null) {
String realUser = request.getUserPrincipal().getName();
ugi = UserGroupInformation.createRemoteUser(realUser,
handlerAuthMethod);
String doAsUser = getDoAs(request);
if (doAsUser != null) {
ugi = UserGroupInformation.createProxyUser(doAsUser, ugi);
try {
ProxyUsers.authorize(ugi, request.getRemoteHost());
} catch (AuthorizationException ex) {
String msg = String.format(
"User '%s' from host '%s' not allowed to impersonate user '%s'",
realUser, request.getRemoteHost(), doAsUser);
response.setStatus(HttpServletResponse.SC_FORBIDDEN);
response.setContentType(APPLICATION_JSON_MIME);
Map<String, String> json = new HashMap<String, String>();
json.put(ERROR_EXCEPTION_JSON,
AuthorizationException.class.getName());
json.put(ERROR_MESSAGE_JSON, msg);
Writer writer = response.getWriter();
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writeValue(writer, json);
requestCompleted = true;
}
}
}
UGI_TL.set(ugi);
}
if (!requestCompleted) {
final UserGroupInformation ugiF = ugi;
try {
request = new HttpServletRequestWrapper(request) {
@Override
public String getAuthType() {
return (ugiF != null) ? handlerAuthMethod.toString() : null;
}
@Override
public String getRemoteUser() {
return (ugiF != null) ? ugiF.getShortUserName() : null;
}
@Override
public Principal getUserPrincipal() {
return (ugiF != null) ? new Principal() {
@Override
public String getName() {
return ugiF.getUserName();
}
} : null;
}
};
super.doFilter(filterChain, request, response);
} finally {
UGI_TL.remove();
}
}
}
}

View File

@ -0,0 +1,359 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the wrapped {@link AuthenticationHandler} configuration
* properties, this handler supports the following properties prefixed
* with the type of the wrapped <code>AuthenticationHandler</code>:
* <ul>
* <li>delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticationHandler
implements AuthenticationHandler {
protected static final String TYPE_POSTFIX = "-dt";
public static final String PREFIX = "delegation-token.";
public static final String TOKEN_KIND = PREFIX + "token-kind.sec";
public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String MAX_LIFETIME = PREFIX + "max-lifetime.sec";
public static final long MAX_LIFETIME_DEFAULT = 7 * 24 * 60 * 60;
public static final String RENEW_INTERVAL = PREFIX + "renew-interval.sec";
public static final long RENEW_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String REMOVAL_SCAN_INTERVAL = PREFIX +
"removal-scan-interval.sec";
public static final long REMOVAL_SCAN_INTERVAL_DEFAULT = 60 * 60;
private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<String>();
static final String DELEGATION_TOKEN_UGI_ATTRIBUTE =
"hadoop.security.delegation-token.ugi";
static {
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
private AuthenticationHandler authHandler;
private DelegationTokenManager tokenManager;
private String authType;
public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
authHandler = handler;
authType = handler.getType();
}
@VisibleForTesting
DelegationTokenManager getTokenManager() {
return tokenManager;
}
@Override
public void init(Properties config) throws ServletException {
authHandler.init(config);
initTokenManager(config);
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
tokenManager.setExternalDelegationTokenSecretManager(secretManager);
}
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
String configPrefix = authHandler.getType() + ".";
Configuration conf = new Configuration(false);
for (Map.Entry entry : config.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
String tokenKind = conf.get(TOKEN_KIND);
if (tokenKind == null) {
throw new IllegalArgumentException(
"The configuration does not define the token kind");
}
tokenKind = tokenKind.trim();
long updateInterval = conf.getLong(configPrefix + UPDATE_INTERVAL,
UPDATE_INTERVAL_DEFAULT);
long maxLifeTime = conf.getLong(configPrefix + MAX_LIFETIME,
MAX_LIFETIME_DEFAULT);
long renewInterval = conf.getLong(configPrefix + RENEW_INTERVAL,
RENEW_INTERVAL_DEFAULT);
long removalScanInterval = conf.getLong(
configPrefix + REMOVAL_SCAN_INTERVAL, REMOVAL_SCAN_INTERVAL_DEFAULT);
tokenManager = new DelegationTokenManager(new Text(tokenKind),
updateInterval * 1000, maxLifeTime * 1000, renewInterval * 1000,
removalScanInterval * 1000);
tokenManager.init();
}
@Override
public void destroy() {
tokenManager.destroy();
authHandler.destroy();
}
@Override
public String getType() {
return authType;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.OP_PARAM);
op = (op != null) ? op.toUpperCase() : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp =
KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
boolean doManagement;
if (dtOp.requiresKerberosCredentials() && token == null) {
token = authenticate(request, response);
if (token == null) {
requestContinues = false;
doManagement = false;
} else {
doManagement = true;
}
} else {
doManagement = true;
}
if (doManagement) {
UserGroupInformation requestUgi = (token != null)
? UserGroupInformation.createRemoteUser(token.getUserName())
: null;
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String renewer = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
try {
Token<?> dToken = tokenManager.createToken(requestUgi, renewer);
map = delegationTokenToJSON(dToken);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
break;
case RENEWDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String tokenToRenew = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToRenew == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
try {
dt.decodeFromUrlString(tokenToRenew);
long expirationTime = tokenManager.renewToken(dt,
requestUgi.getShortUserName());
map = new HashMap();
map.put("long", expirationTime);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
break;
case CANCELDELEGATIONTOKEN:
String tokenToCancel = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToCancel == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
try {
dt.decodeFromUrlString(tokenToCancel);
tokenManager.cancelToken(dt, (requestUgi != null)
? requestUgi.getShortUserName() : null);
} catch (IOException ex) {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
"Invalid delegation token, cannot cancel");
requestContinues = false;
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writeValue(writer, map);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be " +
"[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(
KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(delegationParam);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
request.setAttribute(DELEGATION_TOKEN_UGI_ATTRIBUTE, ugi);
} catch (Throwable ex) {
throw new AuthenticationException("Could not verify DelegationToken, " +
ex.toString(), ex);
}
} else {
token = authHandler.authenticate(request, response);
}
return token;
}
}

View File

@ -0,0 +1,250 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* {@link Authenticator} wrapper that enhances an {@link Authenticator} with
* Delegation Token support.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticator implements Authenticator {
private static Logger LOG =
LoggerFactory.getLogger(DelegationTokenAuthenticator.class);
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String OP_PARAM = "op";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
private Authenticator authenticator;
public DelegationTokenAuthenticator(Authenticator authenticator) {
this.authenticator = authenticator;
}
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
authenticator.setConnectionConfigurator(configurator);
}
private boolean hasDelegationToken(URL url) {
String queryStr = url.getQuery();
return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
authenticator.authenticate(url, token);
}
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public Token<AbstractDelegationTokenIdentifier> getDelegationToken(URL url,
AuthenticatedURL.Token token, String renewer)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.GETDELEGATIONTOKEN, renewer, null, true);
json = (Map) json.get(DELEGATION_TOKEN_JSON);
String tokenStr = (String) json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
InetSocketAddress service = new InetSocketAddress(url.getHost(),
url.getPort());
SecurityUtil.setTokenService(dToken, service);
return dToken;
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.RENEWDELEGATIONTOKEN, null, dToken, true);
return (Long) json.get(RENEW_DELEGATION_TOKEN_JSON);
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException {
try {
doDelegationTokenOperation(url, token,
DelegationTokenOperation.CANCELDELEGATIONTOKEN, null, dToken, false);
} catch (AuthenticationException ex) {
throw new IOException("This should not happen: " + ex.getMessage(), ex);
}
}
private Map doDelegationTokenOperation(URL url,
AuthenticatedURL.Token token, DelegationTokenOperation operation,
String renewer, Token<?> dToken, boolean hasResponse)
throws IOException, AuthenticationException {
Map ret = null;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, operation.toString());
if (renewer != null) {
params.put(RENEWER_PARAM, renewer);
}
if (dToken != null) {
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
}
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(separator).append(entry.getKey()).append("=").
append(URLEncoder.encode(entry.getValue(), "UTF8"));
separator = "&";
}
url = new URL(sb.toString());
AuthenticatedURL aUrl = new AuthenticatedURL(this);
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(operation.getHttpMethod());
validateResponse(conn, HttpURLConnection.HTTP_OK);
if (hasResponse) {
String contentType = conn.getHeaderField(CONTENT_TYPE);
contentType = (contentType != null) ? contentType.toLowerCase()
: null;
if (contentType != null &&
contentType.contains(APPLICATION_JSON_MIME)) {
try {
ObjectMapper mapper = new ObjectMapper();
ret = mapper.readValue(conn.getInputStream(), Map.class);
} catch (Exception ex) {
throw new AuthenticationException(String.format(
"'%s' did not handle the '%s' delegation token operation: %s",
url.getAuthority(), operation, ex.getMessage()), ex);
}
} else {
throw new AuthenticationException(String.format("'%s' did not " +
"respond with JSON to the '%s' delegation token operation",
url.getAuthority(), operation));
}
}
return ret;
}
@SuppressWarnings("unchecked")
private static void validateResponse(HttpURLConnection conn, int expected)
throws IOException {
int status = conn.getResponseCode();
if (status != expected) {
try {
conn.getInputStream().close();
} catch (IOException ex) {
//NOP
}
String msg = String.format("HTTP status, expected [%d], got [%d]: %s",
expected, status, conn.getResponseMessage());
LOG.debug(msg);
throw new IOException(msg);
}
}
}

View File

@ -15,21 +15,24 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.lib.service; package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/** /**
* HttpFS <code>DelegationTokenIdentifier</code> implementation. * Concrete delegation token identifier used by {@link DelegationTokenManager},
* {@link KerberosDelegationTokenAuthenticationHandler} and
* {@link DelegationTokenAuthenticationFilter}.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenIdentifier public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier { extends AbstractDelegationTokenIdentifier {
private Text kind = WebHdfsFileSystem.TOKEN_KIND; private Text kind;
public DelegationTokenIdentifier(Text kind) { public DelegationTokenIdentifier(Text kind) {
this.kind = kind; this.kind = kind;
@ -50,8 +53,8 @@ public DelegationTokenIdentifier(Text kind, Text owner, Text renewer,
} }
/** /**
* Returns the kind, <code>TOKEN_KIND</code>. * Return the delegation token kind
* @return returns <code>TOKEN_KIND</code>. * @return returns the delegation token kind
*/ */
@Override @Override
public Text getKind() { public Text getKind() {

View File

@ -0,0 +1,153 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Delegation Token Manager used by the
* {@link KerberosDelegationTokenAuthenticationHandler}.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class DelegationTokenManager {
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
public DelegationTokenSecretManager(Text tokenKind,
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
}
private AbstractDelegationTokenSecretManager secretManager = null;
private boolean managedSecretManager;
private Text tokenKind;
public DelegationTokenManager(Text tokenKind,
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
this.secretManager = new DelegationTokenSecretManager(tokenKind,
delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
managedSecretManager = true;
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
this.secretManager.stopThreads();
this.secretManager = secretManager;
this.tokenKind = secretManager.createIdentifier().getKind();
managedSecretManager = false;
}
public void init() {
if (managedSecretManager) {
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new RuntimeException("Could not start " +
secretManager.getClass() + ": " + ex.toString(), ex);
}
}
}
public void destroy() {
if (managedSecretManager) {
secretManager.stopThreads();
}
}
@SuppressWarnings("unchecked")
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer) {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier tokenIdentifier = new DelegationTokenIdentifier(
tokenKind, owner, new Text(renewer), realUser);
return new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
}
@SuppressWarnings("unchecked")
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws IOException {
return secretManager.renewToken(token, renewer);
}
@SuppressWarnings("unchecked")
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler) throws IOException {
canceler = (canceler != null) ? canceler :
verifyToken(token).getShortUserName();
secretManager.cancelToken(token, canceler);
}
@SuppressWarnings("unchecked")
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier>
token) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
id.readFields(dis);
dis.close();
secretManager.verifyToken(id, token.getPassword());
return id.getUser();
}
}

View File

@ -15,33 +15,29 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.security.token.delegation.web;
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException; import javax.servlet.http.HttpServletRequest;
/** /**
* A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's * Util class that returns the remote {@link UserGroupInformation} in scope
* <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user). * for the HTTP request.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class HttpFSPseudoAuthenticator extends PseudoAuthenticator { public class HttpUserGroupInformation {
/** /**
* Return the client user name. * Returns the remote {@link UserGroupInformation} in context for the current
* HTTP request, taking into account proxy user requests.
* *
* @return the client user name. * @return the remote {@link UserGroupInformation}, <code>NULL</code> if none.
*/ */
@Override public static UserGroupInformation get() {
protected String getUserName() { return DelegationTokenAuthenticationFilter.
try { getHttpUserGroupInformationInContext();
return UserGroupInformation.getLoginUser().getUserName();
} catch (IOException ex) {
throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
}
} }
} }

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>kerberos.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>kerberos.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>kerberos.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public KerberosDelegationTokenAuthenticationHandler() {
super(new KerberosAuthenticationHandler(KerberosAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
/**
* The <code>KerberosDelegationTokenAuthenticator</code> provides support for
* Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
* Token operations.
* <p/>
* It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
* endpoint does not trigger a SPNEGO authentication
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public KerberosDelegationTokenAuthenticator() {
super(new KerberosAuthenticator() {
@Override
protected Authenticator getFallBackAuthenticator() {
return new PseudoDelegationTokenAuthenticator();
}
});
}
}

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>simple.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>simple.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>simple.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>simple.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>simple.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public PseudoDelegationTokenAuthenticationHandler() {
super(new PseudoAuthenticationHandler(PseudoAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException;
/**
* The <code>PseudoDelegationTokenAuthenticator</code> provides support for
* Hadoop's pseudo authentication mechanism that accepts
* the user name specified as a query string parameter and support for Hadoop
* Delegation Token operations.
* <p/>
* This mimics the model of Hadoop Simple authentication trusting the
* {@link UserGroupInformation#getCurrentUser()} value.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public PseudoDelegationTokenAuthenticator() {
super(new PseudoAuthenticator() {
@Override
protected String getUserName() {
try {
return UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
});
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
/**
* Servlet utility methods.
*/
@InterfaceAudience.Private
class ServletUtils {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
/**
* Extract a query string parameter without triggering http parameters
* processing by the servlet container.
*
* @param request the request
* @param name the parameter to get the value.
* @return the parameter value, or <code>NULL</code> if the parameter is not
* defined.
* @throws IOException thrown if there was an error parsing the query string.
*/
public static String getParameter(HttpServletRequest request, String name)
throws IOException {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (name.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
}

View File

@ -178,6 +178,14 @@ public void testVariableSubstitution() throws IOException {
// check that expansion also occurs for getInt() // check that expansion also occurs for getInt()
assertTrue(conf.getInt("intvar", -1) == 42); assertTrue(conf.getInt("intvar", -1) == 42);
assertTrue(conf.getInt("my.int", -1) == 42); assertTrue(conf.getInt("my.int", -1) == 42);
Map<String, String> results = conf.getValByRegex("^my.*file$");
assertTrue(results.keySet().contains("my.relfile"));
assertTrue(results.keySet().contains("my.fullfile"));
assertTrue(results.keySet().contains("my.file"));
assertEquals(-1, results.get("my.relfile").indexOf("${"));
assertEquals(-1, results.get("my.fullfile").indexOf("${"));
assertEquals(-1, results.get("my.file").indexOf("${"));
} }
public void testFinalParam() throws IOException { public void testFinalParam() throws IOException {

View File

@ -220,11 +220,76 @@ public void testJksProvider() throws Exception {
assertTrue(s.getPermission().toString().equals("rwx------")); assertTrue(s.getPermission().toString().equals("rwx------"));
assertTrue(file + " should exist", file.isFile()); assertTrue(file + " should exist", file.isFile());
// Corrupt file and Check if JKS can reload from _OLD file
File oldFile = new File(file.getPath() + "_OLD");
file.renameTo(oldFile);
file.delete();
file.createNewFile();
assertTrue(oldFile.exists());
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
assertTrue(file.exists());
assertTrue(oldFile + "should be deleted", !oldFile.exists());
verifyAfterReload(file, provider);
assertTrue(!oldFile.exists());
// _NEW and current file should not exist together
File newFile = new File(file.getPath() + "_NEW");
newFile.createNewFile();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.fail("_NEW and current file should not exist together !!");
} catch (Exception e) {
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
// Load from _NEW file
file.renameTo(newFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _NEW file !!");
// Ignore
}
verifyAfterReload(file, provider);
// _NEW exists but corrupt.. must load from _OLD
newFile.createNewFile();
file.renameTo(oldFile);
file.delete();
try {
provider = KeyProviderFactory.getProviders(conf).get(0);
Assert.assertFalse(newFile.exists());
Assert.assertFalse(oldFile.exists());
} catch (Exception e) {
Assert.fail("JKS should load from _OLD file !!");
// Ignore
} finally {
if (newFile.exists()) {
newFile.delete();
}
}
verifyAfterReload(file, provider);
// check permission retention after explicit change // check permission retention after explicit change
fs.setPermission(path, new FsPermission("777")); fs.setPermission(path, new FsPermission("777"));
checkPermissionRetention(conf, ourUrl, path); checkPermissionRetention(conf, ourUrl, path);
} }
private void verifyAfterReload(File file, KeyProvider provider)
throws IOException {
List<String> existingKeys = provider.getKeys();
assertTrue(existingKeys.contains("key4"));
assertTrue(existingKeys.contains("key3"));
assertTrue(file.exists());
}
public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception { public void checkPermissionRetention(Configuration conf, String ourUrl, Path path) throws Exception {
KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0); KeyProvider provider = KeyProviderFactory.getProviders(conf).get(0);
// let's add a new key and flush and check that permissions are still set to 777 // let's add a new key and flush and check that permissions are still set to 777

View File

@ -26,13 +26,11 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil; import org.apache.hadoop.io.AvroTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import com.google.common.base.Joiner; import com.google.common.base.Joiner;
import junit.framework.TestCase; import junit.framework.TestCase;
import static org.junit.Assert.fail;
public class TestPath extends TestCase { public class TestPath extends TestCase {
/** /**
@ -307,28 +305,6 @@ public void testURI() throws URISyntaxException, IOException {
// if the child uri is absolute path // if the child uri is absolute path
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI( assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString()); "foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
// empty URI
URI uri3 = new URI("");
assertEquals("", uri3.toString());
try {
path = new Path(uri3);
fail("Expected exception for empty URI");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("Can not create a Path"
+ " from an empty URI", e);
}
// null URI
uri3 = null;
try {
path = new Path(uri3);
fail("Expected exception for null URI");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("Can not create a Path"
+ " from a null URI", e);
}
} }
/** Test URIs created from Path objects */ /** Test URIs created from Path objects */

View File

@ -0,0 +1,326 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Map;
import java.util.Properties;
public class TestDelegationTokenAuthenticationHandlerWithMocks {
public static class MockDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public MockDelegationTokenAuthenticationHandler() {
super(new AuthenticationHandler() {
@Override
public String getType() {
return "T";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "mock");
return null;
}
});
}
}
private DelegationTokenAuthenticationHandler handler;
@Before
public void setUp() throws Exception {
Properties conf = new Properties();
conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
handler = new MockDelegationTokenAuthenticationHandler();
handler.initTokenManager(conf);
}
@After
public void cleanUp() {
handler.destroy();
}
@Test
public void testManagementOperations() throws Exception {
testNonManagementOperation();
testManagementOperationErrors();
testGetToken(null, new Text("foo"));
testGetToken("bar", new Text("foo"));
testCancelToken();
testRenewToken();
}
private void testNonManagementOperation() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn("CREATE");
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" +
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.toString()
);
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).thenReturn(
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.getHttpMethod()
);
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(
Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock"));
}
private void testGetToken(String renewer, Text expectedTokenKind)
throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(
new StringWriter()));
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.RENEWER_PARAM + "=" + renewer);
Mockito.reset(response);
Mockito.reset(token);
Mockito.when(token.getUserName()).thenReturn("user");
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = DelegationTokenAuthenticator.
DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
ObjectMapper jsonMapper = new ObjectMapper();
Map json = jsonMapper.readValue(responseOutput, Map.class);
json = (Map) json.get(tokenLabel);
String tokenStr;
tokenStr = (String) json.get(DelegationTokenAuthenticator.
DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
handler.getTokenManager().verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
private void testCancelToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
token.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
handler.getTokenManager().verifyToken(token);
Assert.fail();
} catch (SecretManager.InvalidToken ex) {
//NOP
} catch (Throwable ex) {
Assert.fail();
}
}
private void testRenewToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(Mockito.eq(
KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock")
);
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
handler.getTokenManager().verifyToken(dToken);
}
@Test
public void testAuthenticate() throws Exception {
testValidDelegationToken();
testInvalidDelegationToken();
}
private void testValidDelegationToken() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=" +
dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().
getShortUserName(), token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(handler.getType(),
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
public class TestDelegationTokenManager {
private static final long DAY_IN_SECS = 86400;
@Test
public void testDTManager() throws Exception {
DelegationTokenManager tm = new DelegationTokenManager(new Text("foo"),
DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS);
tm.init();
Token<DelegationTokenIdentifier> token =
tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (IOException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
tm.destroy();
}
}

View File

@ -0,0 +1,869 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.ServletHolder;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.servlet.Filter;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.URL;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
public class TestWebDelegationToken {
private static final String OK_USER = "ok-user";
private static final String FAIL_USER = "fail-user";
private static final String FOO_USER = "foo";
private Server jetty;
public static class DummyAuthenticationHandler
implements AuthenticationHandler {
@Override
public String getType() {
return "dummy";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
if (request.getParameter("authenticated") != null) {
token = new AuthenticationToken(request.getParameter("authenticated"),
"U", "test");
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
}
return token;
}
}
public static class DummyDelegationTokenAuthenticationHandler extends
DelegationTokenAuthenticationHandler {
public DummyDelegationTokenAuthenticationHandler() {
super(new DummyAuthenticationHandler());
}
@Override
public void init(Properties config) throws ServletException {
Properties conf = new Properties(config);
conf.setProperty(TOKEN_KIND, "token-kind");
initTokenManager(conf);
}
}
public static class AFilter extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
DummyDelegationTokenAuthenticationHandler.class.getName());
return conf;
}
}
public static class PingServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
Writer writer = resp.getWriter();
writer.write("ping: ");
IOUtils.copy(req.getReader(), writer);
resp.setStatus(HttpServletResponse.SC_OK);
}
}
protected Server createJettyServer() {
try {
InetAddress localhost = InetAddress.getLocalHost();
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
jetty = new Server(0);
jetty.getConnectors()[0].setHost("localhost");
jetty.getConnectors()[0].setPort(port);
return jetty;
} catch (Exception ex) {
throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
ex);
}
}
protected String getJettyURL() {
Connector c = jetty.getConnectors()[0];
return "http://" + c.getHost() + ":" + c.getPort();
}
@Before
public void setUp() throws Exception {
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
jetty = createJettyServer();
}
@After
public void cleanUp() throws Exception {
jetty.stop();
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
}
protected Server getJetty() {
return jetty;
}
@Test
public void testRawHttpCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
// unauthenticated access to URL
HttpURLConnection conn = (HttpURLConnection) nonAuthURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to URL
conn = (HttpURLConnection) authURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// unauthenticated access to get delegation token
URL url = new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to get delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ObjectMapper mapper = new ObjectMapper();
Map map = mapper.readValue(conn.getInputStream(), Map.class);
String dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// delegation token access to URL
url = new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// delegation token and authenticated access to URL
url = new URL(authURL.toExternalForm() + "&delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, unauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// renewew delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, authenticated access to URL, not renewer
url = new URL(getJettyURL() +
"/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// cancel delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// cancel canceled delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
conn.getResponseCode());
// get new delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
mapper = new ObjectMapper();
map = mapper.readValue(conn.getInputStream(), Map.class);
dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// cancel delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
} finally {
jetty.stop();
}
}
@Test
public void testDelegationTokenAuthenticatorCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
try {
aUrl.getDelegationToken(nonAuthURL, token, FOO_USER);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
aUrl.renewDelegationToken(authURL, token);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
try {
aUrl.renewDelegationToken(authURL2, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(authURL, token, FOO_USER);
aUrl.cancelDelegationToken(authURL, token);
aUrl.getDelegationToken(authURL, token, FOO_USER);
aUrl.cancelDelegationToken(nonAuthURL, token);
aUrl.getDelegationToken(authURL, token, FOO_USER);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
} finally {
jetty.stop();
}
}
private static class DummyDelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
public DummyDelegationTokenSecretManager() {
super(10000, 10000, 10000, 10000);
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(new Text("fooKind"));
}
}
@Test
public void testExternalDelegationTokenSecretManager() throws Exception {
DummyDelegationTokenSecretManager secretMgr
= new DummyDelegationTokenSecretManager();
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
secretMgr.startThreads();
context.setAttribute(DelegationTokenAuthenticationFilter.
DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretMgr);
jetty.start();
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
aUrl.getDelegationToken(authURL, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("fooKind"),
token.getDelegationToken().getKind());
} finally {
jetty.stop();
secretMgr.stopThreads();
}
}
public static class NoDTFilter extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class NoDTHandlerDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class UserServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(req.getUserPrincipal().getName());
}
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTFilter()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTFilter.class);
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTHandler()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTHandlerDTAFilter.class);
}
// we are, also, implicitly testing KerberosDelegationTokenAuthenticator
// fallback here
private void testDelegationTokenAuthenticatedURLWithNoDT(
Class<? extends Filter> filterClass) throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(filterClass), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
try {
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains(
"delegation token operation"));
}
return null;
}
});
} finally {
jetty.stop();
}
}
public static class PseudoDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
@Override
protected org.apache.hadoop.conf.Configuration getProxyuserConfiguration(
FilterConfig filterConfig) throws ServletException {
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration(false);
conf.set("proxyuser.foo.users", OK_USER);
conf.set("proxyuser.foo.hosts", "localhost");
return conf;
}
}
@Test
public void testFallbackToPseudoDelegationTokenAuthenticator()
throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
return null;
}
});
} finally {
jetty.stop();
}
}
public static class KDTAFilter extends DelegationTokenAuthenticationFilter {
static String keytabFile;
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(KerberosAuthenticationHandler.KEYTAB, keytabFile);
conf.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
"HTTP/localhost");
conf.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
}
private static class KerberosConfiguration extends Configuration {
private String principal;
private String keytab;
public KerberosConfiguration(String principal, String keytab) {
this.principal = principal;
this.keytab = keytab;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("principal", principal);
options.put("keyTab", keytab);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
public static <T> T doAsKerberosUser(String principal, String keytab,
final Callable<T> callable) throws Exception {
LoginContext loginContext = null;
try {
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
new KerberosConfiguration(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
@Test
public void testKerberosDelegationTokenAuthenticator() throws Exception {
// setting hadoop security to kerberos
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = new File("target/" + UUID.randomUUID().toString());
Assert.assertTrue(testDir.mkdirs());
MiniKdc kdc = new MiniKdc(MiniKdc.createConf(), testDir);
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(KDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
kdc.start();
File keytabFile = new File(testDir, "test.keytab");
kdc.createPrincipal(keytabFile, "client", "HTTP/localhost");
KDTAFilter.keytabFile = keytabFile.getAbsolutePath();
jetty.start();
final DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
final URL url = new URL(getJettyURL() + "/foo/bar");
try {
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains("GSSException"));
}
doAsKerberosUser("client", keytabFile.getAbsolutePath(),
new Callable<Void>() {
@Override
public Void call() throws Exception {
aUrl.getDelegationToken(url, token, "client");
Assert.assertNotNull(token.getDelegationToken());
aUrl.renewDelegationToken(url, token);
Assert.assertNotNull(token.getDelegationToken());
aUrl.getDelegationToken(url, token, FOO_USER);
Assert.assertNotNull(token.getDelegationToken());
try {
aUrl.renewDelegationToken(url, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(url, token, FOO_USER);
aUrl.cancelDelegationToken(url, token);
Assert.assertNull(token.getDelegationToken());
return null;
}
});
} finally {
jetty.stop();
kdc.stop();
}
}
@Test
public void testProxyUser() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// proxyuser using authentication handler authentication
HttpURLConnection conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(OK_USER, ret.get(0));
// unauthorized proxy user using authentication handler authentication
conn = aUrl.openConnection(url, token, FAIL_USER);
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// proxy using delegation token authentication
aUrl.getDelegationToken(url, token, FOO_USER);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
ugi.addToken(token.getDelegationToken());
token = new DelegationTokenAuthenticatedURL.Token();
// requests using delegation token as auth do not honor doAs
conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals(FOO_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
public static class UGIServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
UserGroupInformation ugi = HttpUserGroupInformation.get();
if (ugi != null) {
String ret = "remoteuser=" + req.getRemoteUser() + ":ugi=" +
ugi.getShortUserName();
if (ugi.getAuthenticationMethod() ==
UserGroupInformation.AuthenticationMethod.PROXY) {
ret = "realugi=" + ugi.getRealUser().getShortUserName() + ":" + ret;
}
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(ret);
} else {
resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
}
}
}
@Test
public void testHttpUGI() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UGIServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER);
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
// user foo
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("remoteuser=" + FOO_USER+ ":ugi=" + FOO_USER,
ret.get(0));
// user ok-user via proxyuser foo
conn = aUrl.openConnection(url, token, OK_USER);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("realugi=" + FOO_USER +":remoteuser=" + OK_USER +
":ugi=" + OK_USER, ret.get(0));
return null;
}
});
} finally {
jetty.stop();
}
}
}

View File

@ -47,7 +47,6 @@
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.security.Principal; import java.security.Principal;
import java.text.MessageFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
@ -59,19 +58,14 @@
@Path(KMSRESTConstants.SERVICE_VERSION) @Path(KMSRESTConstants.SERVICE_VERSION)
@InterfaceAudience.Private @InterfaceAudience.Private
public class KMS { public class KMS {
public static final String CREATE_KEY = "CREATE_KEY";
public static final String DELETE_KEY = "DELETE_KEY";
public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
public static final String GET_KEYS = "GET_KEYS";
public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
public static final String GET_METADATA = "GET_METADATA";
public static final String GET_KEY_VERSION = "GET_KEY_VERSION"; public static enum KMSOp {
public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY"; CREATE_KEY, DELETE_KEY, ROLL_NEW_VERSION,
public static final String GENERATE_EEK = "GENERATE_EEK"; GET_KEYS, GET_KEYS_METADATA,
public static final String DECRYPT_EEK = "DECRYPT_EEK"; GET_KEY_VERSIONS, GET_METADATA, GET_KEY_VERSION, GET_CURRENT_KEY,
GENERATE_EEK, DECRYPT_EEK
}
private KeyProviderCryptoExtension provider; private KeyProviderCryptoExtension provider;
private KMSAudit kmsAudit; private KMSAudit kmsAudit;
@ -91,22 +85,22 @@ private static Principal getPrincipal(SecurityContext securityContext)
private static final String UNAUTHORIZED_MSG_WITH_KEY = private static final String UNAUTHORIZED_MSG_WITH_KEY =
"User:{0} not allowed to do ''{1}'' on ''{2}''"; "User:%s not allowed to do '%s' on '%s'";
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
"User:{0} not allowed to do ''{1}''"; "User:%s not allowed to do '%s'";
private void assertAccess(KMSACLs.Type aclType, Principal principal, private void assertAccess(KMSACLs.Type aclType, Principal principal,
String operation) throws AccessControlException { KMSOp operation) throws AccessControlException {
assertAccess(aclType, principal, operation, null); assertAccess(aclType, principal, operation, null);
} }
private void assertAccess(KMSACLs.Type aclType, Principal principal, private void assertAccess(KMSACLs.Type aclType, Principal principal,
String operation, String key) throws AccessControlException { KMSOp operation, String key) throws AccessControlException {
if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) { if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
KMSWebApp.getUnauthorizedCallsMeter().mark(); KMSWebApp.getUnauthorizedCallsMeter().mark();
kmsAudit.unauthorized(principal, operation, key); kmsAudit.unauthorized(principal, operation, key);
throw new AuthorizationException(MessageFormat.format( throw new AuthorizationException(String.format(
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY (key != null) ? UNAUTHORIZED_MSG_WITH_KEY
: UNAUTHORIZED_MSG_WITHOUT_KEY, : UNAUTHORIZED_MSG_WITHOUT_KEY,
principal.getName(), operation, key)); principal.getName(), operation, key));
@ -135,7 +129,7 @@ public Response createKey(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD); String name = (String) jsonKey.get(KMSRESTConstants.NAME_FIELD);
KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD); KMSClientProvider.checkNotEmpty(name, KMSRESTConstants.NAME_FIELD);
assertAccess(KMSACLs.Type.CREATE, user, CREATE_KEY, name); assertAccess(KMSACLs.Type.CREATE, user, KMSOp.CREATE_KEY, name);
String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD); String cipher = (String) jsonKey.get(KMSRESTConstants.CIPHER_FIELD);
String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD); String material = (String) jsonKey.get(KMSRESTConstants.MATERIAL_FIELD);
int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD)) int length = (jsonKey.containsKey(KMSRESTConstants.LENGTH_FIELD))
@ -146,7 +140,7 @@ public Response createKey(@Context SecurityContext securityContext,
jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD); jsonKey.get(KMSRESTConstants.ATTRIBUTES_FIELD);
if (material != null) { if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user, assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
CREATE_KEY + " with user provided material", name); KMSOp.CREATE_KEY, name);
} }
KeyProvider.Options options = new KeyProvider.Options( KeyProvider.Options options = new KeyProvider.Options(
KMSWebApp.getConfiguration()); KMSWebApp.getConfiguration());
@ -165,7 +159,7 @@ public Response createKey(@Context SecurityContext securityContext,
provider.flush(); provider.flush();
kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" + kmsAudit.ok(user, KMSOp.CREATE_KEY, name, "UserProvidedMaterial:" +
(material != null) + " Description:" + description); (material != null) + " Description:" + description);
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@ -186,12 +180,12 @@ public Response deleteKey(@Context SecurityContext securityContext,
@PathParam("name") String name) throws Exception { @PathParam("name") String name) throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
assertAccess(KMSACLs.Type.DELETE, user, DELETE_KEY, name); assertAccess(KMSACLs.Type.DELETE, user, KMSOp.DELETE_KEY, name);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
provider.deleteKey(name); provider.deleteKey(name);
provider.flush(); provider.flush();
kmsAudit.ok(user, DELETE_KEY, name, ""); kmsAudit.ok(user, KMSOp.DELETE_KEY, name, "");
return Response.ok().build(); return Response.ok().build();
} }
@ -205,13 +199,13 @@ public Response rolloverKey(@Context SecurityContext securityContext,
throws Exception { throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
assertAccess(KMSACLs.Type.ROLLOVER, user, ROLL_NEW_VERSION, name); assertAccess(KMSACLs.Type.ROLLOVER, user, KMSOp.ROLL_NEW_VERSION, name);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
String material = (String) String material = (String)
jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD); jsonMaterial.get(KMSRESTConstants.MATERIAL_FIELD);
if (material != null) { if (material != null) {
assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user, assertAccess(KMSACLs.Type.SET_KEY_MATERIAL, user,
ROLL_NEW_VERSION + " with user provided material", name); KMSOp.ROLL_NEW_VERSION, name);
} }
KeyProvider.KeyVersion keyVersion = (material != null) KeyProvider.KeyVersion keyVersion = (material != null)
? provider.rollNewVersion(name, Base64.decodeBase64(material)) ? provider.rollNewVersion(name, Base64.decodeBase64(material))
@ -219,7 +213,7 @@ public Response rolloverKey(@Context SecurityContext securityContext,
provider.flush(); provider.flush();
kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" + kmsAudit.ok(user, KMSOp.ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
(material != null) + " NewVersion:" + keyVersion.getVersionName()); (material != null) + " NewVersion:" + keyVersion.getVersionName());
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) { if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@ -233,15 +227,15 @@ public Response rolloverKey(@Context SecurityContext securityContext,
@Path(KMSRESTConstants.KEYS_METADATA_RESOURCE) @Path(KMSRESTConstants.KEYS_METADATA_RESOURCE)
@Produces(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON)
public Response getKeysMetadata(@Context SecurityContext securityContext, public Response getKeysMetadata(@Context SecurityContext securityContext,
@QueryParam(KMSRESTConstants.KEY_OP) List<String> keyNamesList) @QueryParam(KMSRESTConstants.KEY) List<String> keyNamesList)
throws Exception { throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]); String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA); assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_KEYS_METADATA);
KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames); KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta); Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
kmsAudit.ok(user, GET_KEYS_METADATA, ""); kmsAudit.ok(user, KMSOp.GET_KEYS_METADATA, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@ -252,9 +246,9 @@ public Response getKeyNames(@Context SecurityContext securityContext)
throws Exception { throws Exception {
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS); assertAccess(KMSACLs.Type.GET_KEYS, user, KMSOp.GET_KEYS);
Object json = provider.getKeys(); Object json = provider.getKeys();
kmsAudit.ok(user, GET_KEYS, ""); kmsAudit.ok(user, KMSOp.GET_KEYS, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@ -276,9 +270,9 @@ public Response getMetadata(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getAdminCallsMeter().mark(); KMSWebApp.getAdminCallsMeter().mark();
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name); assertAccess(KMSACLs.Type.GET_METADATA, user, KMSOp.GET_METADATA, name);
Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name)); Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
kmsAudit.ok(user, GET_METADATA, name, ""); kmsAudit.ok(user, KMSOp.GET_METADATA, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@ -292,9 +286,9 @@ public Response getCurrentVersion(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark(); KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_CURRENT_KEY, name);
Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name)); Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
kmsAudit.ok(user, GET_CURRENT_KEY, name, ""); kmsAudit.ok(user, KMSOp.GET_CURRENT_KEY, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }
@ -308,9 +302,9 @@ public Response getKeyVersion(@Context SecurityContext securityContext,
KMSClientProvider.checkNotEmpty(versionName, "versionName"); KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSWebApp.getKeyCallsMeter().mark(); KMSWebApp.getKeyCallsMeter().mark();
KeyVersion keyVersion = provider.getKeyVersion(versionName); KeyVersion keyVersion = provider.getKeyVersion(versionName);
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSION);
if (keyVersion != null) { if (keyVersion != null) {
kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), ""); kmsAudit.ok(user, KMSOp.GET_KEY_VERSION, keyVersion.getName(), "");
} }
Object json = KMSServerJSONUtils.toJSON(keyVersion); Object json = KMSServerJSONUtils.toJSON(keyVersion);
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
@ -334,7 +328,7 @@ public Response generateEncryptedKeys(
Object retJSON; Object retJSON;
if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) { if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
assertAccess(KMSACLs.Type.GENERATE_EEK, user, GENERATE_EEK, name); assertAccess(KMSACLs.Type.GENERATE_EEK, user, KMSOp.GENERATE_EEK, name);
List<EncryptedKeyVersion> retEdeks = List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>(); new LinkedList<EncryptedKeyVersion>();
@ -345,7 +339,7 @@ public Response generateEncryptedKeys(
} catch (Exception e) { } catch (Exception e) {
throw new IOException(e); throw new IOException(e);
} }
kmsAudit.ok(user, GENERATE_EEK, name, ""); kmsAudit.ok(user, KMSOp.GENERATE_EEK, name, "");
retJSON = new ArrayList(); retJSON = new ArrayList();
for (EncryptedKeyVersion edek : retEdeks) { for (EncryptedKeyVersion edek : retEdeks) {
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek)); ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
@ -380,7 +374,7 @@ public Response decryptEncryptedKey(@Context SecurityContext securityContext,
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD); (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
Object retJSON; Object retJSON;
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) { if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName); assertAccess(KMSACLs.Type.DECRYPT_EEK, user, KMSOp.DECRYPT_EEK, keyName);
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD); KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
byte[] iv = Base64.decodeBase64(ivStr); byte[] iv = Base64.decodeBase64(ivStr);
KMSClientProvider.checkNotNull(encMaterialStr, KMSClientProvider.checkNotNull(encMaterialStr,
@ -391,7 +385,7 @@ public Response decryptEncryptedKey(@Context SecurityContext securityContext,
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName, new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
iv, KeyProviderCryptoExtension.EEK, encMaterial)); iv, KeyProviderCryptoExtension.EEK, encMaterial));
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion); retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
kmsAudit.ok(user, DECRYPT_EEK, keyName, ""); kmsAudit.ok(user, KMSOp.DECRYPT_EEK, keyName, "");
} else { } else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP + throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " + " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
@ -412,9 +406,9 @@ public Response getKeyVersions(@Context SecurityContext securityContext,
Principal user = getPrincipal(securityContext); Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name"); KMSClientProvider.checkNotEmpty(name, "name");
KMSWebApp.getKeyCallsMeter().mark(); KMSWebApp.getKeyCallsMeter().mark();
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name); assertAccess(KMSACLs.Type.GET, user, KMSOp.GET_KEY_VERSIONS, name);
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name)); Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
kmsAudit.ok(user, GET_KEY_VERSIONS, name, ""); kmsAudit.ok(user, KMSOp.GET_KEY_VERSIONS, name, "");
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build(); return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
} }

View File

@ -50,11 +50,11 @@ private static class AuditEvent {
private final AtomicLong accessCount = new AtomicLong(-1); private final AtomicLong accessCount = new AtomicLong(-1);
private final String keyName; private final String keyName;
private final String user; private final String user;
private final String op; private final KMS.KMSOp op;
private final String extraMsg; private final String extraMsg;
private final long startTime = System.currentTimeMillis(); private final long startTime = System.currentTimeMillis();
private AuditEvent(String keyName, String user, String op, String msg) { private AuditEvent(String keyName, String user, KMS.KMSOp op, String msg) {
this.keyName = keyName; this.keyName = keyName;
this.user = user; this.user = user;
this.op = op; this.op = op;
@ -77,7 +77,7 @@ public String getUser() {
return user; return user;
} }
public String getOp() { public KMS.KMSOp getOp() {
return op; return op;
} }
@ -90,8 +90,9 @@ public static enum OpStatus {
OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR; OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
} }
private static Set<String> AGGREGATE_OPS_WHITELIST = Sets.newHashSet( private static Set<KMS.KMSOp> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK KMS.KMSOp.GET_KEY_VERSION, KMS.KMSOp.GET_CURRENT_KEY,
KMS.KMSOp.DECRYPT_EEK, KMS.KMSOp.GENERATE_EEK
); );
private Cache<String, AuditEvent> cache; private Cache<String, AuditEvent> cache;
@ -137,10 +138,10 @@ private void logEvent(AuditEvent event) {
event.getExtraMsg()); event.getExtraMsg());
} }
private void op(OpStatus opStatus, final String op, final String user, private void op(OpStatus opStatus, final KMS.KMSOp op, final String user,
final String key, final String extraMsg) { final String key, final String extraMsg) {
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key) if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
&& !Strings.isNullOrEmpty(op) && (op != null)
&& AGGREGATE_OPS_WHITELIST.contains(op)) { && AGGREGATE_OPS_WHITELIST.contains(op)) {
String cacheKey = createCacheKey(user, key, op); String cacheKey = createCacheKey(user, key, op);
if (opStatus == OpStatus.UNAUTHORIZED) { if (opStatus == OpStatus.UNAUTHORIZED) {
@ -167,7 +168,7 @@ public AuditEvent call() throws Exception {
} }
} else { } else {
List<String> kvs = new LinkedList<String>(); List<String> kvs = new LinkedList<String>();
if (!Strings.isNullOrEmpty(op)) { if (op != null) {
kvs.add("op=" + op); kvs.add("op=" + op);
} }
if (!Strings.isNullOrEmpty(key)) { if (!Strings.isNullOrEmpty(key)) {
@ -185,16 +186,16 @@ public AuditEvent call() throws Exception {
} }
} }
public void ok(Principal user, String op, String key, public void ok(Principal user, KMS.KMSOp op, String key,
String extraMsg) { String extraMsg) {
op(OpStatus.OK, op, user.getName(), key, extraMsg); op(OpStatus.OK, op, user.getName(), key, extraMsg);
} }
public void ok(Principal user, String op, String extraMsg) { public void ok(Principal user, KMS.KMSOp op, String extraMsg) {
op(OpStatus.OK, op, user.getName(), null, extraMsg); op(OpStatus.OK, op, user.getName(), null, extraMsg);
} }
public void unauthorized(Principal user, String op, String key) { public void unauthorized(Principal user, KMS.KMSOp op, String key) {
op(OpStatus.UNAUTHORIZED, op, user.getName(), key, ""); op(OpStatus.UNAUTHORIZED, op, user.getName(), key, "");
} }
@ -211,7 +212,7 @@ public void unauthenticated(String remoteHost, String method,
+ " URL:" + url + " ErrorMsg:'" + extraMsg + "'"); + " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
} }
private static String createCacheKey(String user, String key, String op) { private static String createCacheKey(String user, String key, KMS.KMSOp op) {
return user + "#" + key + "#" + op; return user + "#" + key + "#" + op;
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.crypto.key.kms.server; package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import java.io.File; import java.io.File;
@ -26,6 +27,7 @@
/** /**
* Utility class to load KMS configuration files. * Utility class to load KMS configuration files.
*/ */
@InterfaceAudience.Private
public class KMSConfiguration { public class KMSConfiguration {
public static final String KMS_CONFIG_DIR = "kms.config.dir"; public static final String KMS_CONFIG_DIR = "kms.config.dir";

View File

@ -17,12 +17,15 @@
*/ */
package org.apache.hadoop.crypto.key.kms.server; package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.jmx.JMXJsonServlet; import org.apache.hadoop.jmx.JMXJsonServlet;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponse;
import java.io.IOException; import java.io.IOException;
@InterfaceAudience.Private
public class KMSJMXServlet extends JMXJsonServlet { public class KMSJMXServlet extends JMXJsonServlet {
@Override @Override

View File

@ -23,6 +23,7 @@
import java.io.PrintStream; import java.io.PrintStream;
import java.security.Principal; import java.security.Principal;
import org.apache.hadoop.crypto.key.kms.server.KMS.KMSOp;
import org.apache.log4j.LogManager; import org.apache.log4j.LogManager;
import org.apache.log4j.PropertyConfigurator; import org.apache.log4j.PropertyConfigurator;
import org.junit.After; import org.junit.After;
@ -82,16 +83,16 @@ private String getAndResetLogOutput() {
public void testAggregation() throws Exception { public void testAggregation() throws Exception {
Principal luser = Mockito.mock(Principal.class); Principal luser = Mockito.mock(Principal.class);
Mockito.when(luser.getName()).thenReturn("luser"); Mockito.when(luser.getName()).thenReturn("luser");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DELETE_KEY, "k1", "testmsg");
kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.ROLL_NEW_VERSION, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500); Thread.sleep(1500);
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg"); kmsAudit.ok(luser, KMSOp.DECRYPT_EEK, "k1", "testmsg");
Thread.sleep(1500); Thread.sleep(1500);
String out = getAndResetLogOutput(); String out = getAndResetLogOutput();
System.out.println(out); System.out.println(out);
@ -110,15 +111,15 @@ public void testAggregation() throws Exception {
public void testAggregationUnauth() throws Exception { public void testAggregationUnauth() throws Exception {
Principal luser = Mockito.mock(Principal.class); Principal luser = Mockito.mock(Principal.class);
Mockito.when(luser.getName()).thenReturn("luser"); Mockito.when(luser.getName()).thenReturn("luser");
kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2"); kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k2");
Thread.sleep(1000); Thread.sleep(1000);
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3"); kmsAudit.unauthorized(luser, KMSOp.GENERATE_EEK, "k3");
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg"); kmsAudit.ok(luser, KMSOp.GENERATE_EEK, "k3", "testmsg");
Thread.sleep(2000); Thread.sleep(2000);
String out = getAndResetLogOutput(); String out = getAndResetLogOutput();
System.out.println(out); System.out.println(out);

View File

@ -39,12 +39,14 @@
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.lib.wsrs.EnumSetParam; import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
@ -67,7 +69,6 @@
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.net.HttpURLConnection; import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.net.URL; import java.net.URL;
@ -75,7 +76,6 @@
import java.text.MessageFormat; import java.text.MessageFormat;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.concurrent.Callable;
/** /**
* HttpFSServer implementation of the FileSystemAccess FileSystem. * HttpFSServer implementation of the FileSystemAccess FileSystem.
@ -217,34 +217,15 @@ public String getMethod() {
} }
private DelegationTokenAuthenticatedURL authURL;
private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); private DelegationTokenAuthenticatedURL.Token authToken =
new DelegationTokenAuthenticatedURL.Token();
private URI uri; private URI uri;
private InetSocketAddress httpFSAddr;
private Path workingDir; private Path workingDir;
private UserGroupInformation realUser; private UserGroupInformation realUser;
private String doAs; private String doAs;
private Token<?> delegationToken;
//This method enables handling UGI doAs with SPNEGO, we have to
//fallback to the realuser who logged in with Kerberos credentials
private <T> T doAsRealUserIfNecessary(final Callable<T> callable)
throws IOException {
try {
if (realUser.getShortUserName().equals(doAs)) {
return callable.call();
} else {
return realUser.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
}
} catch (Exception ex) {
throw new IOException(ex.toString(), ex);
}
}
/** /**
* Convenience method that creates a <code>HttpURLConnection</code> for the * Convenience method that creates a <code>HttpURLConnection</code> for the
@ -291,20 +272,26 @@ private HttpURLConnection getConnection(final String method,
private HttpURLConnection getConnection(final String method, private HttpURLConnection getConnection(final String method,
Map<String, String> params, Map<String, List<String>> multiValuedParams, Map<String, String> params, Map<String, List<String>> multiValuedParams,
Path path, boolean makeQualified) throws IOException { Path path, boolean makeQualified) throws IOException {
if (!realUser.getShortUserName().equals(doAs)) {
params.put(DO_AS_PARAM, doAs);
}
HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
if (makeQualified) { if (makeQualified) {
path = makeQualified(path); path = makeQualified(path);
} }
final URL url = HttpFSUtils.createURL(path, params, multiValuedParams); final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() { try {
@Override return UserGroupInformation.getCurrentUser().doAs(
public HttpURLConnection call() throws Exception { new PrivilegedExceptionAction<HttpURLConnection>() {
return getConnection(url, method); @Override
public HttpURLConnection run() throws Exception {
return getConnection(url, method);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
} }
}); }
} }
/** /**
@ -321,12 +308,8 @@ public HttpURLConnection call() throws Exception {
* @throws IOException thrown if an IO error occurrs. * @throws IOException thrown if an IO error occurrs.
*/ */
private HttpURLConnection getConnection(URL url, String method) throws IOException { private HttpURLConnection getConnection(URL url, String method) throws IOException {
Class<? extends Authenticator> klass =
getConf().getClass("httpfs.authenticator.class",
HttpFSKerberosAuthenticator.class, Authenticator.class);
Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
try { try {
HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken); HttpURLConnection conn = authURL.openConnection(url, authToken);
conn.setRequestMethod(method); conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) { if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true); conn.setDoOutput(true);
@ -357,10 +340,17 @@ public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf); super.initialize(name, conf);
try { try {
uri = new URI(name.getScheme() + "://" + name.getAuthority()); uri = new URI(name.getScheme() + "://" + name.getAuthority());
httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
} catch (URISyntaxException ex) { } catch (URISyntaxException ex) {
throw new IOException(ex); throw new IOException(ex);
} }
Class<? extends DelegationTokenAuthenticator> klass =
getConf().getClass("httpfs.authenticator.class",
KerberosDelegationTokenAuthenticator.class,
DelegationTokenAuthenticator.class);
DelegationTokenAuthenticator authenticator =
ReflectionUtils.newInstance(klass, getConf());
authURL = new DelegationTokenAuthenticatedURL(authenticator);
} }
@Override @Override
@ -1059,38 +1049,57 @@ public void readFields(DataInput in) throws IOException {
@Override @Override
public Token<?> getDelegationToken(final String renewer) public Token<?> getDelegationToken(final String renewer)
throws IOException { throws IOException {
return doAsRealUserIfNecessary(new Callable<Token<?>>() { try {
@Override return UserGroupInformation.getCurrentUser().doAs(
public Token<?> call() throws Exception { new PrivilegedExceptionAction<Token<?>>() {
return HttpFSKerberosAuthenticator. @Override
getDelegationToken(uri, httpFSAddr, authToken, renewer); public Token<?> run() throws Exception {
return authURL.getDelegationToken(uri.toURL(), authToken,
renewer);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
} }
}); }
} }
public long renewDelegationToken(final Token<?> token) throws IOException { public long renewDelegationToken(final Token<?> token) throws IOException {
return doAsRealUserIfNecessary(new Callable<Long>() { try {
@Override return UserGroupInformation.getCurrentUser().doAs(
public Long call() throws Exception { new PrivilegedExceptionAction<Long>() {
return HttpFSKerberosAuthenticator. @Override
renewDelegationToken(uri, authToken, token); public Long run() throws Exception {
return authURL.renewDelegationToken(uri.toURL(), authToken);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
} }
}); }
} }
public void cancelDelegationToken(final Token<?> token) throws IOException { public void cancelDelegationToken(final Token<?> token) throws IOException {
HttpFSKerberosAuthenticator. authURL.cancelDelegationToken(uri.toURL(), authToken);
cancelDelegationToken(uri, authToken, token);
} }
@Override @Override
public Token<?> getRenewToken() { public Token<?> getRenewToken() {
return delegationToken; return null; //TODO : for renewer
} }
@Override @Override
@SuppressWarnings("unchecked")
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) { public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
delegationToken = token; //TODO : for renewer
} }
@Override @Override

View File

@ -1,188 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.json.simple.JSONObject;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
/**
* A <code>KerberosAuthenticator</code> subclass that fallback to
* {@link HttpFSPseudoAuthenticator}.
*/
@InterfaceAudience.Private
public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
/**
* Returns the fallback authenticator if the server does not use
* Kerberos SPNEGO HTTP authentication.
*
* @return a {@link HttpFSPseudoAuthenticator} instance.
*/
@Override
protected Authenticator getFallBackAuthenticator() {
return new HttpFSPseudoAuthenticator();
}
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
public static void injectDelegationToken(Map<String, String> params,
Token<?> dtToken)
throws IOException {
if (dtToken != null) {
params.put(DELEGATION_PARAM, dtToken.encodeToUrlString());
}
}
private boolean hasDelegationToken(URL url) {
return url.getQuery().contains(DELEGATION_PARAM + "=");
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
super.authenticate(url, token);
}
}
public static final String OP_PARAM = "op";
public static Token<?> getDelegationToken(URI fsURI,
InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
String renewer) throws IOException {
DelegationTokenOperation op =
DelegationTokenOperation.GETDELEGATIONTOKEN;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, op.toString());
params.put(RENEWER_PARAM,renewer);
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(op.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
String tokenStr = (String)
json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
SecurityUtil.setTokenService(dToken, httpFSAddr);
return dToken;
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
public static long renewDelegationToken(URI fsURI,
AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM,
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(
DelegationTokenOperation.RENEWDELEGATIONTOKEN.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
return (Long)(json.get(RENEW_DELEGATION_TOKEN_JSON));
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
public static void cancelDelegationToken(URI fsURI,
AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM,
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(
DelegationTokenOperation.CANCELDELEGATIONTOKEN.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
}

View File

@ -20,7 +20,10 @@
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import javax.servlet.FilterConfig; import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import java.io.FileReader; import java.io.FileReader;
import java.io.IOException; import java.io.IOException;
import java.io.Reader; import java.io.Reader;
@ -32,7 +35,9 @@
* from HttpFSServer's server configuration. * from HttpFSServer's server configuration.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
public class HttpFSAuthenticationFilter extends AuthenticationFilter { public class HttpFSAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
private static final String CONF_PREFIX = "httpfs.authentication."; private static final String CONF_PREFIX = "httpfs.authentication.";
private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file"; private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
@ -50,7 +55,8 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
* @return hadoop-auth configuration read from HttpFSServer's configuration. * @return hadoop-auth configuration read from HttpFSServer's configuration.
*/ */
@Override @Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) { protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException{
Properties props = new Properties(); Properties props = new Properties();
Configuration conf = HttpFSServerWebApp.get().getConfig(); Configuration conf = HttpFSServerWebApp.get().getConfig();
@ -64,11 +70,6 @@ protected Properties getConfiguration(String configPrefix, FilterConfig filterCo
} }
} }
if (props.getProperty(AUTH_TYPE).equals("kerberos")) {
props.setProperty(AUTH_TYPE,
HttpFSKerberosAuthenticationHandler.class.getName());
}
String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null); String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) { if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE); throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);

View File

@ -1,230 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.token.Token;
import org.json.simple.JSONObject;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
/**
* Server side <code>AuthenticationHandler</code> that authenticates requests
* using the incoming delegation token as a 'delegation' query string parameter.
* <p/>
* If not delegation token is present in the request it delegates to the
* {@link KerberosAuthenticationHandler}
*/
@InterfaceAudience.Private
public class HttpFSKerberosAuthenticationHandler
extends KerberosAuthenticationHandler {
static final Set<String> DELEGATION_TOKEN_OPS =
new HashSet<String>();
static {
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
public static final String TYPE = "kerberos-dt";
/**
* Returns authentication type of the handler.
*
* @return <code>delegationtoken-kerberos</code>
*/
@Override
public String getType() {
return TYPE;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = request.getParameter(HttpFSFileSystem.OP_PARAM);
op = (op != null) ? op.toUpperCase() : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
DelegationTokenOperation dtOp =
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
if (dtOp.requiresKerberosCredentials() && token == null) {
response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
MessageFormat.format(
"Operation [{0}] requires SPNEGO authentication established",
dtOp));
requestContinues = false;
} else {
DelegationTokenManager tokenManager =
HttpFSServerWebApp.get().get(DelegationTokenManager.class);
try {
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
String renewerParam =
request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
if (renewerParam == null) {
renewerParam = token.getUserName();
}
Token<?> dToken = tokenManager.createToken(
UserGroupInformation.getCurrentUser(), renewerParam);
map = delegationTokenToJSON(dToken);
break;
case RENEWDELEGATIONTOKEN:
case CANCELDELEGATIONTOKEN:
String tokenParam =
request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM);
if (tokenParam == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]",
dtOp, HttpFSKerberosAuthenticator.TOKEN_PARAM));
requestContinues = false;
} else {
if (dtOp == DelegationTokenOperation.CANCELDELEGATIONTOKEN) {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenParam);
tokenManager.cancelToken(dt,
UserGroupInformation.getCurrentUser().getUserName());
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenParam);
long expirationTime =
tokenManager.renewToken(dt, token.getUserName());
map = new HashMap();
map.put("long", expirationTime);
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
JSONObject.writeJSONString(map, writer);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
} catch (DelegationTokenManagerException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be [{2}]",
request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam =
request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(delegationParam);
DelegationTokenManager tokenManager =
HttpFSServerWebApp.get().get(DelegationTokenManager.class);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
} catch (Throwable ex) {
throw new AuthenticationException("Could not verify DelegationToken, " +
ex.toString(), ex);
}
} else {
token = super.authenticate(request, response);
}
return token;
}
}

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
/**
* Service interface to manage HttpFS delegation tokens.
*/
@InterfaceAudience.Private
public interface DelegationTokenManager {
/**
* Creates a delegation token.
*
* @param ugi UGI creating the token.
* @param renewer token renewer.
* @return new delegation token.
* @throws DelegationTokenManagerException thrown if the token could not be
* created.
*/
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer)
throws DelegationTokenManagerException;
/**
* Renews a delegation token.
*
* @param token delegation token to renew.
* @param renewer token renewer.
* @return epoc expiration time.
* @throws DelegationTokenManagerException thrown if the token could not be
* renewed.
*/
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws DelegationTokenManagerException;
/**
* Cancels a delegation token.
*
* @param token delegation token to cancel.
* @param canceler token canceler.
* @throws DelegationTokenManagerException thrown if the token could not be
* canceled.
*/
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler)
throws DelegationTokenManagerException;
/**
* Verifies a delegation token.
*
* @param token delegation token to verify.
* @return the UGI for the token.
* @throws DelegationTokenManagerException thrown if the token could not be
* verified.
*/
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
throws DelegationTokenManagerException;
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by the {@link DelegationTokenManager} service implementation.
*/
@InterfaceAudience.Private
public class DelegationTokenManagerException extends XException {
public enum ERROR implements XException.ERROR {
DT01("Could not verify delegation token, {0}"),
DT02("Could not renew delegation token, {0}"),
DT03("Could not cancel delegation token, {0}"),
DT04("Could not create delegation token, {0}");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
public DelegationTokenManagerException(ERROR error, Object... params) {
super(error, params);
}
}

View File

@ -1,242 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* DelegationTokenManager service implementation.
*/
@InterfaceAudience.Private
public class DelegationTokenManagerService extends BaseService
implements DelegationTokenManager {
private static final String PREFIX = "delegation.token.manager";
private static final String UPDATE_INTERVAL = "update.interval";
private static final String MAX_LIFETIME = "max.lifetime";
private static final String RENEW_INTERVAL = "renew.interval";
private static final long HOUR = 60 * 60 * 1000;
private static final long DAY = 24 * HOUR;
DelegationTokenSecretManager secretManager = null;
private Text tokenKind;
public DelegationTokenManagerService() {
super(PREFIX);
}
/**
* Initializes the service.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
@Override
protected void init() throws ServiceException {
long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
tokenKind = (HttpFSServerWebApp.get().isSslEnabled())
? SWebHdfsFileSystem.TOKEN_KIND : WebHdfsFileSystem.TOKEN_KIND;
secretManager = new DelegationTokenSecretManager(tokenKind, updateInterval,
maxLifetime,
renewInterval, HOUR);
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new ServiceException(ServiceException.ERROR.S12,
DelegationTokenManager.class.getSimpleName(),
ex.toString(), ex);
}
}
/**
* Destroys the service.
*/
@Override
public void destroy() {
secretManager.stopThreads();
super.destroy();
}
/**
* Returns the service interface.
*
* @return the service interface.
*/
@Override
public Class getInterface() {
return DelegationTokenManager.class;
}
/**
* Creates a delegation token.
*
* @param ugi UGI creating the token.
* @param renewer token renewer.
* @return new delegation token.
* @throws DelegationTokenManagerException thrown if the token could not be
* created.
*/
@Override
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer)
throws DelegationTokenManagerException {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier tokenIdentifier =
new DelegationTokenIdentifier(tokenKind, owner, new Text(renewer), realUser);
Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
try {
SecurityUtil.setTokenService(token,
HttpFSServerWebApp.get().getAuthority());
} catch (ServerException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT04, ex.toString(), ex);
}
return token;
}
/**
* Renews a delegation token.
*
* @param token delegation token to renew.
* @param renewer token renewer.
* @return epoc expiration time.
* @throws DelegationTokenManagerException thrown if the token could not be
* renewed.
*/
@Override
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws DelegationTokenManagerException {
try {
return secretManager.renewToken(token, renewer);
} catch (IOException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT02, ex.toString(), ex);
}
}
/**
* Cancels a delegation token.
*
* @param token delegation token to cancel.
* @param canceler token canceler.
* @throws DelegationTokenManagerException thrown if the token could not be
* canceled.
*/
@Override
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler)
throws DelegationTokenManagerException {
try {
secretManager.cancelToken(token, canceler);
} catch (IOException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT03, ex.toString(), ex);
}
}
/**
* Verifies a delegation token.
*
* @param token delegation token to verify.
* @return the UGI for the token.
* @throws DelegationTokenManagerException thrown if the token could not be
* verified.
*/
@Override
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
throws DelegationTokenManagerException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
try {
id.readFields(dis);
dis.close();
secretManager.verifyToken(id, token.getPassword());
} catch (Exception ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT01, ex.toString(), ex);
}
return id.getUser();
}
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
/**
* Create a secret manager
*
* @param delegationKeyUpdateInterval the number of seconds for rolling new
* secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens
* @param delegationTokenRenewInterval how often the tokens must be renewed
* @param delegationTokenRemoverScanInterval how often the tokens are
* scanned
* for expired tokens
*/
public DelegationTokenSecretManager(Text tokenKind, long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
}
}

View File

@ -35,7 +35,6 @@
org.apache.hadoop.lib.service.scheduler.SchedulerService, org.apache.hadoop.lib.service.scheduler.SchedulerService,
org.apache.hadoop.lib.service.security.GroupsService, org.apache.hadoop.lib.service.security.GroupsService,
org.apache.hadoop.lib.service.security.ProxyUserService, org.apache.hadoop.lib.service.security.ProxyUserService,
org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
org.apache.hadoop.lib.service.hadoop.FileSystemAccessService org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
</value> </value>
<description> <description>
@ -226,12 +225,4 @@
</description> </description>
</property> </property>
<property>
<name>httpfs.user.provider.user.pattern</name>
<value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
<description>
Valid pattern for user and group names, it must be a valid java regex.
</description>
</property>
</configuration> </configuration>

View File

@ -17,15 +17,19 @@
*/ */
package org.apache.hadoop.fs.http.server; package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import javax.servlet.ServletException; import javax.servlet.ServletException;
import java.util.Properties; import java.util.Properties;
public class HttpFSKerberosAuthenticationHandlerForTesting public class HttpFSKerberosAuthenticationHandlerForTesting
extends HttpFSKerberosAuthenticationHandler { extends KerberosDelegationTokenAuthenticationHandler {
@Override @Override
public void init(Properties config) throws ServletException { public void init(Properties config) throws ServletException {
//NOP overwrite to avoid Kerberos initialization //NOP overwrite to avoid Kerberos initialization
config.setProperty(TOKEN_KIND, "t");
initTokenManager(config);
} }
@Override @Override

View File

@ -1,94 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.wsrs.UserProvider;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.List;
public class TestHttpFSCustomUserName extends HFSTestCase {
@Test
@TestDir
@TestJetty
public void defaultUserName() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
Assert.assertEquals(UserProvider.USER_PATTERN_DEFAULT,
UserProvider.getUserPattern().pattern());
server.destroy();
}
@Test
@TestDir
@TestJetty
public void customUserName() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
httpfsConf.set(UserProvider.USER_PATTERN_KEY, "1");
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
Assert.assertEquals("1", UserProvider.getUserPattern().pattern());
server.destroy();
}
}

View File

@ -1,316 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.lib.servlet.ServerWebApp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
@Test
@TestDir
public void testManagementOperationsWebHdfsFileSystem() throws Exception {
testManagementOperations(WebHdfsFileSystem.TOKEN_KIND);
}
@Test
@TestDir
public void testManagementOperationsSWebHdfsFileSystem() throws Exception {
try {
System.setProperty(HttpFSServerWebApp.NAME +
ServerWebApp.SSL_ENABLED, "true");
testManagementOperations(SWebHdfsFileSystem.TOKEN_KIND);
} finally {
System.getProperties().remove(HttpFSServerWebApp.NAME +
ServerWebApp.SSL_ENABLED);
}
}
private void testManagementOperations(Text expectedTokenKind) throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
14000));
AuthenticationHandler handler =
new HttpFSKerberosAuthenticationHandlerForTesting();
try {
server.init();
handler.init(null);
testNonManagementOperation(handler);
testManagementOperationErrors(handler);
testGetToken(handler, null, expectedTokenKind);
testGetToken(handler, "foo", expectedTokenKind);
testCancelToken(handler);
testRenewToken(handler);
} finally {
if (handler != null) {
handler.destroy();
}
server.destroy();
}
}
private void testNonManagementOperation(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(HttpFSFileSystem.Operation.CREATE.toString());
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("requires SPNEGO"));
}
private void testGetToken(AuthenticationHandler handler, String renewer,
Text expectedTokenKind) throws Exception {
DelegationTokenOperation op = DelegationTokenOperation.GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM)).
thenReturn(renewer);
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token, Mockito.never()).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
json = (JSONObject) json.get(tokenLabel);
String tokenStr;
tokenStr = (String)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
private void testCancelToken(AuthenticationHandler handler)
throws Exception {
DelegationTokenOperation op =
DelegationTokenOperation.CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
thenReturn(token.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(token);
Assert.fail();
}
catch (DelegationTokenManagerException ex) {
Assert.assertTrue(ex.toString().contains("DT01"));
}
}
private void testRenewToken(AuthenticationHandler handler)
throws Exception {
DelegationTokenOperation op =
DelegationTokenOperation.RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("equires SPNEGO authentication established"));
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
thenReturn(dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dToken);
}
@Test
@TestDir
public void testAuthenticate() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
14000));
AuthenticationHandler handler =
new HttpFSKerberosAuthenticationHandlerForTesting();
try {
server.init();
handler.init(null);
testValidDelegationToken(handler);
testInvalidDelegationToken(handler);
} finally {
if (handler != null) {
handler.destroy();
}
server.destroy();
}
}
private void testValidDelegationToken(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
thenReturn(dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(HttpFSKerberosAuthenticationHandler.TYPE,
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
thenReturn("invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.fs.http.server; package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.json.simple.JSONArray; import org.json.simple.JSONArray;
import org.junit.Assert; import org.junit.Assert;
@ -43,7 +45,6 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service; import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException; import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups; import org.apache.hadoop.lib.service.Groups;
@ -682,7 +683,7 @@ public void testDelegationTokenOperations() throws Exception {
AuthenticationToken token = AuthenticationToken token =
new AuthenticationToken("u", "p", new AuthenticationToken("u", "p",
HttpFSKerberosAuthenticationHandlerForTesting.TYPE); new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000); token.setExpires(System.currentTimeMillis() + 100000000);
Signer signer = new Signer(new StringSignerSecretProvider("secret")); Signer signer = new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned = signer.sign(token.toString()); String tokenSigned = signer.sign(token.toString());
@ -706,9 +707,9 @@ public void testDelegationTokenOperations() throws Exception {
JSONObject json = (JSONObject) JSONObject json = (JSONObject)
new JSONParser().parse(new InputStreamReader(conn.getInputStream())); new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json = (JSONObject) json = (JSONObject)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON); json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String) String tokenStr = (String)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON); json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url = new URL(TestJettyHelper.getJettyURL(), url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr); "/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);

View File

@ -23,11 +23,11 @@
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem; import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.test.HFSTestCase; import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.KerberosTestUtils; import org.apache.hadoop.test.KerberosTestUtils;
import org.apache.hadoop.test.TestDir; import org.apache.hadoop.test.TestDir;
@ -166,9 +166,9 @@ public Void call() throws Exception {
.parse(new InputStreamReader(conn.getInputStream())); .parse(new InputStreamReader(conn.getInputStream()));
json = json =
(JSONObject) json (JSONObject) json
.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON); .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String) json String tokenStr = (String) json
.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON); .get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
//access httpfs using the delegation token //access httpfs using the delegation token
url = new URL(TestJettyHelper.getJettyURL(), url = new URL(TestJettyHelper.getJettyURL(),

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.lib.service.hadoop.FileSystemAccessService;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.lib.service.scheduler.SchedulerService;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
public class TestDelegationTokenManagerService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("httpfs.services", StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName(),
DelegationTokenManagerService.class.getName())));
Server server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
server.init();
DelegationTokenManager tm = server.get(DelegationTokenManager.class);
Assert.assertNotNull(tm);
server.destroy();
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void tokens() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",",
Arrays.asList(DelegationTokenManagerService.class.getName())));
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 14000));
server.init();
DelegationTokenManager tm = server.get(DelegationTokenManager.class);
Token token = tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (DelegationTokenManagerException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
}

View File

@ -724,6 +724,10 @@ READ3Response read(XDR xdr, SecurityHandler securityHandler,
FSDataInputStream fis = clientCache.getDfsInputStream(userName, FSDataInputStream fis = clientCache.getDfsInputStream(userName,
Nfs3Utils.getFileIdPath(handle)); Nfs3Utils.getFileIdPath(handle));
if (fis == null) {
return new READ3Response(Nfs3Status.NFS3ERR_ACCES);
}
try { try {
readCount = fis.read(offset, readbuffer, 0, count); readCount = fis.read(offset, readbuffer, 0, count);
} catch (IOException e) { } catch (IOException e) {

View File

@ -278,13 +278,11 @@ public void testRead() throws Exception {
readReq.serialize(xdr_req); readReq.serialize(xdr_req);
// Attempt by an unpriviledged user should fail. // Attempt by an unpriviledged user should fail.
/* Hits HDFS-6582. It needs to be fixed first.
READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(), READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged, securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234)); new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus()); response1.getStatus());
*/
// Attempt by a priviledged user should pass. // Attempt by a priviledged user should pass.
READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(), READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),

View File

@ -370,12 +370,38 @@ Release 2.6.0 - UNRELEASED
HDFS-6781. Separate HDFS commands from CommandsManual.apt.vm. (Akira HDFS-6781. Separate HDFS commands from CommandsManual.apt.vm. (Akira
Ajisaka via Arpit Agarwal) Ajisaka via Arpit Agarwal)
HDFS-6728. Dynamically add new volumes to DataStorage, formatted if
necessary. (Lei Xu via atm)
HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
Xu via atm)
HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
(Ming Ma via wheat9)
HDFS-6772. Get DN storages out of blockContentsStale state faster after
NN restarts. (Ming Ma via Arpit Agarwal)
HDFS-573. Porting libhdfs to Windows. (cnauroth)
HDFS-6828. Separate block replica dispatching from Balancer. (szetszwo via
jing9)
HDFS-6837. Code cleanup for Balancer and Dispatcher. (szetszwo via
jing9)
HDFS-6838. Code cleanup for unnecessary INode replacement.
(Jing Zhao via wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)
BUG FIXES BUG FIXES
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
insecure HDFS (Allen Wittenauer via raviprak)
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
due to a long edit log sync op. (Liang Xie via cnauroth) due to a long edit log sync op. (Liang Xie via cnauroth)
@ -462,6 +488,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6791. A block could remain under replicated if all of its replicas are on HDFS-6791. A block could remain under replicated if all of its replicas are on
decommissioned nodes. (Ming Ma via jing9) decommissioned nodes. (Ming Ma via jing9)
HDFS-6582. Missing null check in RpcProgramNfs3#read(XDR, SecurityHandler)
(Abhiraj Butala via brandonli)
Release 2.5.0 - UNRELEASED Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -361,16 +361,97 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<profiles> <profiles>
<profile> <profile>
<id>windows</id> <id>native-win</id>
<activation> <activation>
<activeByDefault>false</activeByDefault> <activeByDefault>false</activeByDefault>
<os> <os>
<family>windows</family> <family>windows</family>
</os> </os>
</activation> </activation>
<properties> <build>
<windows.build>true</windows.build> <plugins>
</properties> <plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-os</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireOS>
<family>windows</family>
<message>native-win build only supported on Windows</message>
</requireOS>
</rules>
<fail>true</fail>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>make</id>
<phase>compile</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="${project.build.directory}/native"/>
<exec executable="cmake" dir="${project.build.directory}/native"
failonerror="true">
<arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 'Visual Studio 10 Win64'"/>
</exec>
<exec executable="msbuild" dir="${project.build.directory}/native"
failonerror="true">
<arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/>
</exec>
<!-- Copy for inclusion in distribution. -->
<copy todir="${project.build.directory}/bin">
<fileset dir="${project.build.directory}/native/target/bin/Release"/>
</copy>
</target>
</configuration>
</execution>
<execution>
<id>native_tests</id>
<phase>test</phase>
<goals><goal>run</goal></goals>
<configuration>
<skip>${skipTests}</skip>
<target>
<property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/>
<macrodef name="run-test">
<attribute name="test"/>
<sequential>
<echo message="Running @{test}"/>
<exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/">
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<!-- HADOOP_HOME required to find winutils. -->
<env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
<!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
<env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
</exec>
<echo message="Finished @{test}"/>
</sequential>
</macrodef>
<run-test test="test_libhdfs_threaded"/>
<echo message="Skipping test_libhdfs_zerocopy"/>
<run-test test="test_native_mini_dfs"/>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile> </profile>
<profile> <profile>
<id>native</id> <id>native</id>
@ -408,21 +489,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<phase>test</phase> <phase>test</phase>
<goals><goal>run</goal></goals> <goals><goal>run</goal></goals>
<configuration> <configuration>
<skip>${skipTests}</skip>
<target> <target>
<property name="compile_classpath" refid="maven.compile.classpath"/> <property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/> <property name="test_classpath" refid="maven.test.classpath"/>
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native/"> <macrodef name="run-test">
<arg value="-c"/> <attribute name="test"/>
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/> <sequential>
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/> <echo message="Running @{test}"/>
<env key="SKIPTESTS" value="${skipTests}"/> <exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/">
</exec> <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native/"> <!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
<arg value="-c"/> <env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/> </exec>
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/> <echo message="Finished @{test}"/>
<env key="SKIPTESTS" value="${skipTests}"/> </sequential>
</exec> </macrodef>
<run-test test="test_libhdfs_threaded"/>
<run-test test="test_libhdfs_zerocopy"/>
<run-test test="test_native_mini_dfs"/>
</target> </target>
</configuration> </configuration>
</execution> </execution>

View File

@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH)
MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH") MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
endif (NOT GENERATED_JAVAH) endif (NOT GENERATED_JAVAH)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2") if (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE") set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
# Set warning level 4.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
# Skip "unreferenced formal parameter".
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
# Skip "conditional expression is constant".
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
# Skip deprecated POSIX function warnings.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
# Skip CRT non-secure function warnings. If we can convert usage of
# strerror, getenv and ctime to their secure CRT equivalents, then we can
# re-enable the CRT non-secure function warnings.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
# Omit unneeded headers.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
set(OS_DIR main/native/libhdfs/os/windows)
set(OUT_DIR target/bin)
else (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
set(OS_DIR main/native/libhdfs/os/posix)
set(OS_LINK_LIBRARIES pthread)
set(OUT_DIR target/usr/local/lib)
endif (WIN32)
include_directories( include_directories(
${GENERATED_JAVAH} ${GENERATED_JAVAH}
@ -87,6 +117,7 @@ include_directories(
${JNI_INCLUDE_DIRS} ${JNI_INCLUDE_DIRS}
main/native main/native
main/native/libhdfs main/native/libhdfs
${OS_DIR}
) )
set(_FUSE_DFS_VERSION 0.1.0) set(_FUSE_DFS_VERSION 0.1.0)
@ -96,6 +127,9 @@ add_dual_library(hdfs
main/native/libhdfs/exception.c main/native/libhdfs/exception.c
main/native/libhdfs/jni_helper.c main/native/libhdfs/jni_helper.c
main/native/libhdfs/hdfs.c main/native/libhdfs/hdfs.c
main/native/libhdfs/common/htable.c
${OS_DIR}/mutexes.c
${OS_DIR}/thread_local_storage.c
) )
if (NEED_LINK_DL) if (NEED_LINK_DL)
set(LIB_DL dl) set(LIB_DL dl)
@ -104,17 +138,14 @@ endif(NEED_LINK_DL)
target_link_dual_libraries(hdfs target_link_dual_libraries(hdfs
${JAVA_JVM_LIBRARY} ${JAVA_JVM_LIBRARY}
${LIB_DL} ${LIB_DL}
pthread ${OS_LINK_LIBRARIES}
) )
dual_output_directory(hdfs target/usr/local/lib)
dual_output_directory(hdfs ${OUT_DIR})
set(LIBHDFS_VERSION "0.0.0") set(LIBHDFS_VERSION "0.0.0")
set_target_properties(hdfs PROPERTIES set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION}) SOVERSION ${LIBHDFS_VERSION})
add_library(posix_util
main/native/util/posix_util.c
)
add_executable(test_libhdfs_ops add_executable(test_libhdfs_ops
main/native/libhdfs/test/test_libhdfs_ops.c main/native/libhdfs/test/test_libhdfs_ops.c
) )
@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_dfs
add_executable(test_libhdfs_threaded add_executable(test_libhdfs_threaded
main/native/libhdfs/expect.c main/native/libhdfs/expect.c
main/native/libhdfs/test_libhdfs_threaded.c main/native/libhdfs/test_libhdfs_threaded.c
${OS_DIR}/thread.c
) )
target_link_libraries(test_libhdfs_threaded target_link_libraries(test_libhdfs_threaded
hdfs hdfs
native_mini_dfs native_mini_dfs
pthread ${OS_LINK_LIBRARIES}
) )
add_executable(test_libhdfs_zerocopy add_executable(test_libhdfs_zerocopy
@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy
target_link_libraries(test_libhdfs_zerocopy target_link_libraries(test_libhdfs_zerocopy
hdfs hdfs
native_mini_dfs native_mini_dfs
pthread ${OS_LINK_LIBRARIES}
) )
add_executable(test_libhdfs_vecsum # Skip vecsum on Windows. This could be made to work in the future by
main/native/libhdfs/test/vecsum.c # introducing an abstraction layer over the sys/mman.h functions.
) if (NOT WIN32)
target_link_libraries(test_libhdfs_vecsum add_executable(test_libhdfs_vecsum
hdfs main/native/libhdfs/test/vecsum.c
pthread )
rt target_link_libraries(test_libhdfs_vecsum
) hdfs
pthread
rt
)
endif(NOT WIN32)
IF(REQUIRE_LIBWEBHDFS) IF(REQUIRE_LIBWEBHDFS)
add_subdirectory(contrib/libwebhdfs) add_subdirectory(contrib/libwebhdfs)

View File

@ -1671,9 +1671,11 @@ public static HttpServer2.Builder httpServerTemplateForNNAndJN(
.setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey)); .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
// initialize the webserver for uploading/downloading files. // initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: " if (UserGroupInformation.isSecurityEnabled()) {
+ SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey), LOG.info("Starting web server as: "
httpAddr.getHostName())); + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
httpAddr.getHostName()));
}
if (policy.isHttpEnabled()) { if (policy.isHttpEnabled()) {
if (httpAddr.getPort() == 0) { if (httpAddr.getPort() == 0) {

View File

@ -1,4 +1,4 @@
/* /**
* Licensed to the Apache Software Foundation (ASF) under one * Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file * or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information * distributed with this work for additional information
@ -15,25 +15,30 @@
* See the License for the specific language governing permissions and * See the License for the specific language governing permissions and
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.hdfs.server.balancer;
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event; /**
* Exit status - The values associated with each exit status is directly mapped
* to the process's exit code in command line.
*/
public enum ExitStatus {
SUCCESS(0),
IN_PROGRESS(1),
ALREADY_RUNNING(-1),
NO_MOVE_BLOCK(-2),
NO_MOVE_PROGRESS(-3),
IO_EXCEPTION(-4),
ILLEGAL_ARGUMENTS(-5),
INTERRUPTED(-6);
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; private final int code;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
public class RMAppAttemptNewSavedEvent extends RMAppAttemptEvent { private ExitStatus(int code) {
this.code = code;
final Exception storedException;
public RMAppAttemptNewSavedEvent(ApplicationAttemptId appAttemptId,
Exception storedException) {
super(appAttemptId, RMAppAttemptEventType.ATTEMPT_NEW_SAVED);
this.storedException = storedException;
} }
public Exception getStoredException() { /** @return the command line exit code. */
return storedException; public int getExitCode() {
return code;
} }
}
}

View File

@ -34,6 +34,10 @@
import org.apache.hadoop.hdfs.NameNodeProxies; import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -90,14 +94,16 @@ public String getBlockpoolID() {
return blockpoolID; return blockpoolID;
} }
/** @return the namenode proxy. */ /** @return blocks with locations. */
public NamenodeProtocol getNamenode() { public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
return namenode; throws IOException {
return namenode.getBlocks(datanode, size);
} }
/** @return the client proxy. */ /** @return live datanode storage reports. */
public ClientProtocol getClient() { public DatanodeStorageReport[] getLiveDatanodeStorageReport()
return client; throws IOException {
return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
} }
/** @return the key manager */ /** @return the key manager */

View File

@ -135,7 +135,10 @@ public class DatanodeManager {
/** The number of stale DataNodes */ /** The number of stale DataNodes */
private volatile int numStaleNodes; private volatile int numStaleNodes;
/** The number of stale storages */
private volatile int numStaleStorages;
/** /**
* Whether or not this cluster has ever consisted of more than 1 rack, * Whether or not this cluster has ever consisted of more than 1 rack,
* according to the NetworkTopology. * according to the NetworkTopology.
@ -1142,6 +1145,22 @@ public int getNumStaleNodes() {
return this.numStaleNodes; return this.numStaleNodes;
} }
/**
* Get the number of content stale storages.
*/
public int getNumStaleStorages() {
return numStaleStorages;
}
/**
* Set the number of content stale storages.
*
* @param numStaleStorages The number of content stale storages.
*/
void setNumStaleStorages(int numStaleStorages) {
this.numStaleStorages = numStaleStorages;
}
/** Fetch live and dead datanodes. */ /** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live, public void fetchDatanodes(final List<DatanodeDescriptor> live,
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) { final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {

View File

@ -256,6 +256,7 @@ void heartbeatCheck() {
DatanodeID dead = null; DatanodeID dead = null;
// check the number of stale nodes // check the number of stale nodes
int numOfStaleNodes = 0; int numOfStaleNodes = 0;
int numOfStaleStorages = 0;
synchronized(this) { synchronized(this) {
for (DatanodeDescriptor d : datanodes) { for (DatanodeDescriptor d : datanodes) {
if (dead == null && dm.isDatanodeDead(d)) { if (dead == null && dm.isDatanodeDead(d)) {
@ -265,10 +266,17 @@ void heartbeatCheck() {
if (d.isStale(dm.getStaleInterval())) { if (d.isStale(dm.getStaleInterval())) {
numOfStaleNodes++; numOfStaleNodes++;
} }
DatanodeStorageInfo[] storageInfos = d.getStorageInfos();
for(DatanodeStorageInfo storageInfo : storageInfos) {
if (storageInfo.areBlockContentsStale()) {
numOfStaleStorages++;
}
}
} }
// Set the number of stale nodes in the DatanodeManager // Set the number of stale nodes in the DatanodeManager
dm.setNumStaleNodes(numOfStaleNodes); dm.setNumStaleNodes(numOfStaleNodes);
dm.setNumStaleStorages(numOfStaleStorages);
} }
allAlive = dead == null; allAlive = dead == null;

View File

@ -601,7 +601,7 @@ boolean processCommandFromActor(DatanodeCommand cmd,
LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ " with " + actor.state + " state"); + " with " + actor.state + " state");
actor.reRegister(); actor.reRegister();
return true; return false;
} }
writeLock(); writeLock();
try { try {

View File

@ -222,7 +222,19 @@ private void connectToNNAndHandshake() throws IOException {
// Second phase of the handshake with the NN. // Second phase of the handshake with the NN.
register(); register();
} }
// This is useful to make sure NN gets Heartbeat before Blockreport
// upon NN restart while DN keeps retrying Otherwise,
// 1. NN restarts.
// 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
// 3. After reregistration completes, DN will send Blockreport first.
// 4. Given NN receives Blockreport after Heartbeat, it won't mark
// DatanodeStorageInfo#blockContentsStale to false until the next
// Blockreport.
void scheduleHeartbeat() {
lastHeartbeat = 0;
}
/** /**
* This methods arranges for the data node to send the block report at * This methods arranges for the data node to send the block report at
* the next heartbeat. * the next heartbeat.
@ -902,6 +914,7 @@ void reRegister() throws IOException {
retrieveNamespaceInfo(); retrieveNamespaceInfo();
// and re-register // and re-register
register(); register();
scheduleHeartbeat();
} }
} }

View File

@ -36,8 +36,10 @@
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.Properties; import java.util.Properties;
import java.util.Set;
import java.util.regex.Matcher; import java.util.regex.Matcher;
import java.util.regex.Pattern; import java.util.regex.Pattern;
@ -106,13 +108,22 @@ private BlockPoolSliceStorage() {
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo, void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<File> dataDirs, StartupOption startOpt) throws IOException { Collection<File> dataDirs, StartupOption startOpt) throws IOException {
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID()); LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
Set<String> existingStorageDirs = new HashSet<String>();
for (int i = 0; i < getNumStorageDirs(); i++) {
existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
}
// 1. For each BP data directory analyze the state and // 1. For each BP data directory analyze the state and
// check whether all is consistent before transitioning. // check whether all is consistent before transitioning.
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>( ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(
dataDirs.size()); dataDirs.size());
for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) { for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next(); File dataDir = it.next();
if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
LOG.info("Storage directory " + dataDir + " has already been used.");
it.remove();
continue;
}
StorageDirectory sd = new StorageDirectory(dataDir, null, true); StorageDirectory sd = new StorageDirectory(dataDir, null, true);
StorageState curState; StorageState curState;
try { try {

View File

@ -55,6 +55,7 @@
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -172,43 +173,99 @@ public String getTrashDirectoryForBlockFile(String bpid, File blockFile) {
} }
/** /**
* Analyze storage directories. * {{@inheritDoc org.apache.hadoop.hdfs.server.common.Storage#writeAll()}}
* Recover from previous transitions if required. */
* Perform fs state transition if necessary depending on the namespace info. private void writeAll(Collection<StorageDirectory> dirs) throws IOException {
* Read storage info. this.layoutVersion = getServiceLayoutVersion();
* <br> for (StorageDirectory dir : dirs) {
* This method should be synchronized between multiple DN threads. Only the writeProperties(dir);
* first DN thread does DN level storage dir recoverTransitionRead. }
* }
/**
* Add a list of volumes to be managed by DataStorage. If the volume is empty,
* format it, otherwise recover it from previous transitions if required.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information * @param nsInfo namespace information
* @param dataDirs array of data storage directories * @param dataDirs array of data storage directories
* @param startOpt startup option * @param startOpt startup option
* @throws IOException * @throws IOException
*/ */
synchronized void recoverTransitionRead(DataNode datanode, synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs, NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt) StartupOption startOpt)
throws IOException { throws IOException {
if (initialized) { // Similar to recoverTransitionRead, it first ensures the datanode level
// DN storage has been initialized, no need to do anything // format is completed.
return; List<StorageLocation> tmpDataDirs =
new ArrayList<StorageLocation>(dataDirs);
addStorageLocations(datanode, nsInfo, tmpDataDirs, startOpt, false, true);
Collection<File> bpDataDirs = new ArrayList<File>();
String bpid = nsInfo.getBlockPoolID();
for (StorageLocation dir : dataDirs) {
File dnRoot = dir.getFile();
File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, new File(dnRoot,
STORAGE_DIR_CURRENT));
bpDataDirs.add(bpRoot);
} }
LOG.info("Data-node version: " + HdfsConstants.DATANODE_LAYOUT_VERSION // mkdir for the list of BlockPoolStorage
+ " and name-node layout version: " + nsInfo.getLayoutVersion()); makeBlockPoolDataDir(bpDataDirs, null);
BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid);
// 1. For each data directory calculate its state and if (bpStorage == null) {
// check whether all is consistent before transitioning. bpStorage = new BlockPoolSliceStorage(
// Format and recover. nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size()); nsInfo.getClusterID());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size()); }
bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
addBlockPoolStorage(bpid, bpStorage);
}
/**
* Add a list of volumes to be managed by this DataStorage. If the volume is
* empty, it formats the volume, otherwise it recovers it from previous
* transitions if required.
*
* If isInitialize is false, only the directories that have finished the
* doTransition() process will be added into DataStorage.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @param isInitialize whether it is called when DataNode starts up.
* @throws IOException
*/
private synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt, boolean isInitialize, boolean ignoreExistingDirs)
throws IOException {
Set<String> existingStorageDirs = new HashSet<String>();
for (int i = 0; i < getNumStorageDirs(); i++) {
existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
}
// 1. For each data directory calculate its state and check whether all is
// consistent before transitioning. Format and recover.
ArrayList<StorageState> dataDirStates =
new ArrayList<StorageState>(dataDirs.size());
List<StorageDirectory> addedStorageDirectories =
new ArrayList<StorageDirectory>();
for(Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext();) { for(Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next().getFile(); File dataDir = it.next().getFile();
if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
LOG.info("Storage directory " + dataDir + " has already been used.");
it.remove();
continue;
}
StorageDirectory sd = new StorageDirectory(dataDir); StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState; StorageState curState;
try { try {
curState = sd.analyzeStorage(startOpt, this); curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened // sd is locked but not opened
switch(curState) { switch (curState) {
case NORMAL: case NORMAL:
break; break;
case NON_EXISTENT: case NON_EXISTENT:
@ -217,7 +274,8 @@ synchronized void recoverTransitionRead(DataNode datanode,
it.remove(); it.remove();
continue; continue;
case NOT_FORMATTED: // format case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted"); LOG.info("Storage directory " + dataDir + " is not formatted for "
+ nsInfo.getBlockPoolID());
LOG.info("Formatting ..."); LOG.info("Formatting ...");
format(sd, nsInfo, datanode.getDatanodeUuid()); format(sd, nsInfo, datanode.getDatanodeUuid());
break; break;
@ -231,33 +289,82 @@ synchronized void recoverTransitionRead(DataNode datanode,
//continue with other good dirs //continue with other good dirs
continue; continue;
} }
// add to the storage list if (isInitialize) {
addStorageDir(sd); addStorageDir(sd);
}
addedStorageDirectories.add(sd);
dataDirStates.add(curState); dataDirStates.add(curState);
} }
if (dataDirs.size() == 0 || dataDirStates.size() == 0) // none of the data dirs exist if (dataDirs.size() == 0 || dataDirStates.size() == 0) {
// none of the data dirs exist
if (ignoreExistingDirs) {
return;
}
throw new IOException( throw new IOException(
"All specified directories are not accessible or do not exist."); "All specified directories are not accessible or do not exist.");
}
// 2. Do transitions // 2. Do transitions
// Each storage directory is treated individually. // Each storage directory is treated individually.
// During startup some of them can upgrade or rollback // During startup some of them can upgrade or rollback
// while others could be uptodate for the regular startup. // while others could be up-to-date for the regular startup.
try { for (Iterator<StorageDirectory> it = addedStorageDirectories.iterator();
for (int idx = 0; idx < getNumStorageDirs(); idx++) { it.hasNext(); ) {
doTransition(datanode, getStorageDir(idx), nsInfo, startOpt); StorageDirectory sd = it.next();
createStorageID(getStorageDir(idx)); try {
doTransition(datanode, sd, nsInfo, startOpt);
createStorageID(sd);
} catch (IOException e) {
if (!isInitialize) {
sd.unlock();
it.remove();
continue;
}
unlockAll();
throw e;
} }
} catch (IOException e) {
unlockAll();
throw e;
} }
// 3. Update all storages. Some of them might have just been formatted. // 3. Update all successfully loaded storages. Some of them might have just
this.writeAll(); // been formatted.
this.writeAll(addedStorageDirectories);
// 4. Make newly loaded storage directories visible for service.
if (!isInitialize) {
this.storageDirs.addAll(addedStorageDirectories);
}
}
/**
* Analyze storage directories.
* Recover from previous transitions if required.
* Perform fs state transition if necessary depending on the namespace info.
* Read storage info.
* <br>
* This method should be synchronized between multiple DN threads. Only the
* first DN thread does DN level storage dir recoverTransitionRead.
*
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @throws IOException
*/
synchronized void recoverTransitionRead(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt)
throws IOException {
if (initialized) {
// DN storage has been initialized, no need to do anything
return;
}
LOG.info("DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+ " and NameNode layout version: " + nsInfo.getLayoutVersion());
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
addStorageLocations(datanode, nsInfo, dataDirs, startOpt, true, false);
// 4. mark DN storage is initialized // mark DN storage is initialized
this.initialized = true; this.initialized = true;
} }

View File

@ -78,7 +78,7 @@ public File getFile() {
* @return A StorageLocation object if successfully parsed, null otherwise. * @return A StorageLocation object if successfully parsed, null otherwise.
* Does not throw any exceptions. * Does not throw any exceptions.
*/ */
static StorageLocation parse(String rawLocation) public static StorageLocation parse(String rawLocation)
throws IOException, SecurityException { throws IOException, SecurityException {
Matcher matcher = regex.matcher(rawLocation); Matcher matcher = regex.matcher(rawLocation);
StorageType storageType = StorageType.DEFAULT; StorageType storageType = StorageType.DEFAULT;

View File

@ -22,6 +22,7 @@
import java.io.FileDescriptor; import java.io.FileDescriptor;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.util.Collection;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -39,6 +40,7 @@
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica; import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface; import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@ -91,6 +93,10 @@ public RollingLogs createRollingLogs(String bpid, String prefix
/** @return a list of volumes. */ /** @return a list of volumes. */
public List<V> getVolumes(); public List<V> getVolumes();
/** Add an array of StorageLocation to FsDataset. */
public void addVolumes(Collection<StorageLocation> volumes)
throws IOException;
/** @return a storage with the given storage ID */ /** @return a storage with the given storage ID */
public DatanodeStorage getStorage(final String storageUuid); public DatanodeStorage getStorage(final String storageUuid);

View File

@ -61,6 +61,7 @@ class FsDatasetAsyncDiskService {
private static final long THREADS_KEEP_ALIVE_SECONDS = 60; private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final DataNode datanode; private final DataNode datanode;
private final ThreadGroup threadGroup;
private Map<File, ThreadPoolExecutor> executors private Map<File, ThreadPoolExecutor> executors
= new HashMap<File, ThreadPoolExecutor>(); = new HashMap<File, ThreadPoolExecutor>();
@ -70,42 +71,52 @@ class FsDatasetAsyncDiskService {
* *
* The AsyncDiskServices uses one ThreadPool per volume to do the async * The AsyncDiskServices uses one ThreadPool per volume to do the async
* disk operations. * disk operations.
*
* @param volumes The roots of the data volumes.
*/ */
FsDatasetAsyncDiskService(DataNode datanode, File[] volumes) { FsDatasetAsyncDiskService(DataNode datanode) {
this.datanode = datanode; this.datanode = datanode;
this.threadGroup = new ThreadGroup(getClass().getSimpleName());
}
final ThreadGroup threadGroup = new ThreadGroup(getClass().getSimpleName()); private void addExecutorForVolume(final File volume) {
// Create one ThreadPool per volume ThreadFactory threadFactory = new ThreadFactory() {
for (int v = 0 ; v < volumes.length; v++) { int counter = 0;
final File vol = volumes[v];
ThreadFactory threadFactory = new ThreadFactory() {
int counter = 0;
@Override @Override
public Thread newThread(Runnable r) { public Thread newThread(Runnable r) {
int thisIndex; int thisIndex;
synchronized (this) { synchronized (this) {
thisIndex = counter++; thisIndex = counter++;
} }
Thread t = new Thread(threadGroup, r); Thread t = new Thread(threadGroup, r);
t.setName("Async disk worker #" + thisIndex + t.setName("Async disk worker #" + thisIndex +
" for volume " + vol); " for volume " + volume);
return t; return t;
} }
}; };
ThreadPoolExecutor executor = new ThreadPoolExecutor( ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME, CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS, THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory); new LinkedBlockingQueue<Runnable>(), threadFactory);
// This can reduce the number of running threads // This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true); executor.allowCoreThreadTimeOut(true);
executors.put(vol, executor); executors.put(volume, executor);
}
/**
* Starts AsyncDiskService for a new volume
* @param volume the root of the new data volume.
*/
synchronized void addVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
} }
ThreadPoolExecutor executor = executors.get(volume);
if (executor != null) {
throw new RuntimeException("Volume " + volume + " is already existed.");
}
addExecutorForVolume(volume);
} }
synchronized long countPendingDeletions() { synchronized long countPendingDeletions() {

View File

@ -202,6 +202,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
final Map<String, DatanodeStorage> storageMap; final Map<String, DatanodeStorage> storageMap;
final FsDatasetAsyncDiskService asyncDiskService; final FsDatasetAsyncDiskService asyncDiskService;
final FsDatasetCache cacheManager; final FsDatasetCache cacheManager;
private final Configuration conf;
private final int validVolsRequired; private final int validVolsRequired;
final ReplicaMap volumeMap; final ReplicaMap volumeMap;
@ -216,6 +217,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
) throws IOException { ) throws IOException {
this.datanode = datanode; this.datanode = datanode;
this.dataStorage = storage; this.dataStorage = storage;
this.conf = conf;
// The number of volumes required for operation is the total number // The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate. // of volumes minus the number of failed volumes we can tolerate.
final int volFailuresTolerated = final int volFailuresTolerated =
@ -242,38 +244,76 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
} }
storageMap = new HashMap<String, DatanodeStorage>(); storageMap = new HashMap<String, DatanodeStorage>();
final List<FsVolumeImpl> volArray = new ArrayList<FsVolumeImpl>(
storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = storage.getStorageDir(idx);
final File dir = sd.getCurrentDir();
final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot());
volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf,
storageType));
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
}
volumeMap = new ReplicaMap(this); volumeMap = new ReplicaMap(this);
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl = final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
ReflectionUtils.newInstance(conf.getClass( ReflectionUtils.newInstance(conf.getClass(
DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY, DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
RoundRobinVolumeChoosingPolicy.class, RoundRobinVolumeChoosingPolicy.class,
VolumeChoosingPolicy.class), conf); VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volArray, volsFailed, blockChooserImpl); volumes = new FsVolumeList(volsFailed, blockChooserImpl);
volumes.initializeReplicaMaps(volumeMap); asyncDiskService = new FsDatasetAsyncDiskService(datanode);
File[] roots = new File[storage.getNumStorageDirs()];
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) { for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
roots[idx] = storage.getStorageDir(idx).getCurrentDir(); addVolume(dataLocations, storage.getStorageDir(idx));
} }
asyncDiskService = new FsDatasetAsyncDiskService(datanode, roots);
cacheManager = new FsDatasetCache(this); cacheManager = new FsDatasetCache(this);
registerMBean(datanode.getDatanodeUuid()); registerMBean(datanode.getDatanodeUuid());
} }
private void addVolume(Collection<StorageLocation> dataLocations,
Storage.StorageDirectory sd) throws IOException {
final File dir = sd.getCurrentDir();
final StorageType storageType =
getStorageTypeFromLocations(dataLocations, sd.getRoot());
// If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
// nothing needed to be rolled back to make various data structures, e.g.,
// storageMap and asyncDiskService, consistent.
FsVolumeImpl fsVolume = new FsVolumeImpl(
this, sd.getStorageUuid(), dir, this.conf, storageType);
fsVolume.getVolumeMap(volumeMap);
volumes.addVolume(fsVolume);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(),
DatanodeStorage.State.NORMAL,
storageType));
asyncDiskService.addVolume(sd.getCurrentDir());
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
/**
* Add an array of StorageLocation to FsDataset.
*
* @pre dataStorage must have these volumes.
* @param volumes
* @throws IOException
*/
@Override
public synchronized void addVolumes(Collection<StorageLocation> volumes)
throws IOException {
final Collection<StorageLocation> dataLocations =
DataNode.getStorageLocations(this.conf);
Map<String, Storage.StorageDirectory> allStorageDirs =
new HashMap<String, Storage.StorageDirectory>();
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
allStorageDirs.put(sd.getRoot().getAbsolutePath(), sd);
}
for (StorageLocation vol : volumes) {
String key = vol.getFile().getAbsolutePath();
if (!allStorageDirs.containsKey(key)) {
LOG.warn("Attempt to add an invalid volume: " + vol.getFile());
} else {
addVolume(dataLocations, allStorageDirs.get(key));
}
}
}
private StorageType getStorageTypeFromLocations( private StorageType getStorageTypeFromLocations(
Collection<StorageLocation> dataLocations, File dir) { Collection<StorageLocation> dataLocations, File dir) {
for (StorageLocation dataLocation : dataLocations) { for (StorageLocation dataLocation : dataLocations) {

View File

@ -40,9 +40,8 @@ class FsVolumeList {
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser; private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
private volatile int numFailedVolumes; private volatile int numFailedVolumes;
FsVolumeList(List<FsVolumeImpl> volumes, int failedVols, FsVolumeList(int failedVols,
VolumeChoosingPolicy<FsVolumeImpl> blockChooser) { VolumeChoosingPolicy<FsVolumeImpl> blockChooser) {
this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser; this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols; this.numFailedVolumes = failedVols;
} }
@ -101,12 +100,6 @@ long getRemaining() throws IOException {
} }
return remaining; return remaining;
} }
void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
for (FsVolumeImpl v : volumes) {
v.getVolumeMap(globalReplicaMap);
}
}
void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException { void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
long totalStartTime = Time.monotonicNow(); long totalStartTime = Time.monotonicNow();
@ -205,6 +198,19 @@ public String toString() {
return volumes.toString(); return volumes.toString();
} }
/**
* Dynamically add new volumes to the existing volumes that this DN manages.
* @param newVolume the instance of new FsVolumeImpl.
*/
synchronized void addVolume(FsVolumeImpl newVolume) {
// Make a copy of volumes to add new volumes.
final List<FsVolumeImpl> volumeList = volumes == null ?
new ArrayList<FsVolumeImpl>() :
new ArrayList<FsVolumeImpl>(volumes);
volumeList.add(newVolume);
volumes = Collections.unmodifiableList(volumeList);
FsDatasetImpl.LOG.info("Added new volume: " + newVolume.toString());
}
void addBlockPool(final String bpid, final Configuration conf) throws IOException { void addBlockPool(final String bpid, final Configuration conf) throws IOException {
long totalStartTime = Time.monotonicNow(); long totalStartTime = Time.monotonicNow();

View File

@ -786,8 +786,6 @@ private static void validateRenameSource(String src, INodesInPath srcIIP)
checkSnapshot(srcInode, null); checkSnapshot(srcInode, null);
} }
private class RenameOperation { private class RenameOperation {
private final INodesInPath srcIIP; private final INodesInPath srcIIP;
private final INodesInPath dstIIP; private final INodesInPath dstIIP;
@ -820,7 +818,7 @@ private RenameOperation(String src, String dst, INodesInPath srcIIP, INodesInPat
// snapshot is taken on the dst tree, changes will be recorded in the latest // snapshot is taken on the dst tree, changes will be recorded in the latest
// snapshot of the src tree. // snapshot of the src tree.
if (isSrcInSnapshot) { if (isSrcInSnapshot) {
srcChild = srcChild.recordModification(srcIIP.getLatestSnapshotId()); srcChild.recordModification(srcIIP.getLatestSnapshotId());
} }
// check srcChild for reference // check srcChild for reference
@ -950,8 +948,7 @@ Block[] unprotectedSetReplication(String src, short replication,
updateCount(iip, 0, dsDelta, true); updateCount(iip, 0, dsDelta, true);
} }
file = file.setFileReplication(replication, iip.getLatestSnapshotId(), file.setFileReplication(replication, iip.getLatestSnapshotId());
inodeMap);
final short newBR = file.getBlockReplication(); final short newBR = file.getBlockReplication();
// check newBR < oldBR case. // check newBR < oldBR case.
@ -1234,8 +1231,7 @@ long unprotectedDelete(INodesInPath iip, BlocksMapUpdateInfo collectedBlocks,
// record modification // record modification
final int latestSnapshot = iip.getLatestSnapshotId(); final int latestSnapshot = iip.getLatestSnapshotId();
targetNode = targetNode.recordModification(latestSnapshot); targetNode.recordModification(latestSnapshot);
iip.setLastINode(targetNode);
// Remove the node from the namespace // Remove the node from the namespace
long removed = removeLastINode(iip); long removed = removeLastINode(iip);
@ -2161,7 +2157,7 @@ INodeDirectory unprotectedSetQuota(String src, long nsQuota, long dsQuota)
} }
final int latest = iip.getLatestSnapshotId(); final int latest = iip.getLatestSnapshotId();
dirNode = dirNode.recordModification(latest); dirNode.recordModification(latest);
dirNode.setQuota(nsQuota, dsQuota); dirNode.setQuota(nsQuota, dsQuota);
return dirNode; return dirNode;
} }

View File

@ -2733,7 +2733,7 @@ LocatedBlock prepareFileForWrite(String src, INodeFile file,
boolean writeToEditLog, boolean writeToEditLog,
int latestSnapshot, boolean logRetryCache) int latestSnapshot, boolean logRetryCache)
throws IOException { throws IOException {
file = file.recordModification(latestSnapshot); file.recordModification(latestSnapshot);
final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine); final INodeFile cons = file.toUnderConstruction(leaseHolder, clientMachine);
leaseManager.addLease(cons.getFileUnderConstructionFeature() leaseManager.addLease(cons.getFileUnderConstructionFeature()
@ -4441,7 +4441,7 @@ private void finalizeINodeFileUnderConstruction(String src,
Preconditions.checkArgument(uc != null); Preconditions.checkArgument(uc != null);
leaseManager.removeLease(uc.getClientName(), src); leaseManager.removeLease(uc.getClientName(), src);
pendingFile = pendingFile.recordModification(latestSnapshot); pendingFile.recordModification(latestSnapshot);
// The file is no longer pending. // The file is no longer pending.
// Create permanent INode, update blocks. No need to replace the inode here // Create permanent INode, update blocks. No need to replace the inode here
@ -6341,7 +6341,6 @@ void shutdown() {
blockManager.shutdown(); blockManager.shutdown();
} }
} }
@Override // FSNamesystemMBean @Override // FSNamesystemMBean
public int getNumLiveDataNodes() { public int getNumLiveDataNodes() {
@ -6388,6 +6387,15 @@ public int getNumStaleDataNodes() {
return getBlockManager().getDatanodeManager().getNumStaleNodes(); return getBlockManager().getDatanodeManager().getNumStaleNodes();
} }
/**
* Storages are marked as "content stale" after NN restart or fails over and
* before NN receives the first Heartbeat followed by the first Blockreport.
*/
@Override // FSNamesystemMBean
public int getNumStaleStorages() {
return getBlockManager().getDatanodeManager().getNumStaleStorages();
}
/** /**
* Sets the current generation stamp for legacy blocks * Sets the current generation stamp for legacy blocks
*/ */

View File

@ -97,9 +97,9 @@ public final String getUserName() {
/** Set user */ /** Set user */
final INode setUser(String user, int latestSnapshotId) final INode setUser(String user, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.setUser(user); setUser(user);
return nodeToUpdate; return this;
} }
/** /**
* @param snapshotId * @param snapshotId
@ -122,9 +122,9 @@ public final String getGroupName() {
/** Set group */ /** Set group */
final INode setGroup(String group, int latestSnapshotId) final INode setGroup(String group, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.setGroup(group); setGroup(group);
return nodeToUpdate; return this;
} }
/** /**
@ -148,9 +148,9 @@ public final FsPermission getFsPermission() {
/** Set the {@link FsPermission} of this {@link INode} */ /** Set the {@link FsPermission} of this {@link INode} */
INode setPermission(FsPermission permission, int latestSnapshotId) INode setPermission(FsPermission permission, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.setPermission(permission); setPermission(permission);
return nodeToUpdate; return this;
} }
abstract AclFeature getAclFeature(int snapshotId); abstract AclFeature getAclFeature(int snapshotId);
@ -164,18 +164,18 @@ public final AclFeature getAclFeature() {
final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId) final INode addAclFeature(AclFeature aclFeature, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.addAclFeature(aclFeature); addAclFeature(aclFeature);
return nodeToUpdate; return this;
} }
abstract void removeAclFeature(); abstract void removeAclFeature();
final INode removeAclFeature(int latestSnapshotId) final INode removeAclFeature(int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.removeAclFeature(); removeAclFeature();
return nodeToUpdate; return this;
} }
/** /**
@ -199,9 +199,9 @@ public final XAttrFeature getXAttrFeature() {
final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId) final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.addXAttrFeature(xAttrFeature); addXAttrFeature(xAttrFeature);
return nodeToUpdate; return this;
} }
/** /**
@ -211,9 +211,9 @@ final INode addXAttrFeature(XAttrFeature xAttrFeature, int latestSnapshotId)
final INode removeXAttrFeature(int lastestSnapshotId) final INode removeXAttrFeature(int lastestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(lastestSnapshotId); recordModification(lastestSnapshotId);
nodeToUpdate.removeXAttrFeature(); removeXAttrFeature();
return nodeToUpdate; return this;
} }
/** /**
@ -298,11 +298,8 @@ public final boolean shouldRecordInSrcSnapshot(final int latestInDst) {
* @param latestSnapshotId The id of the latest snapshot that has been taken. * @param latestSnapshotId The id of the latest snapshot that has been taken.
* Note that it is {@link Snapshot#CURRENT_STATE_ID} * Note that it is {@link Snapshot#CURRENT_STATE_ID}
* if no snapshots have been taken. * if no snapshots have been taken.
* @return The current inode, which usually is the same object of this inode.
* However, in some cases, this inode may be replaced with a new inode
* for maintaining snapshots. The current inode is then the new inode.
*/ */
abstract INode recordModification(final int latestSnapshotId) abstract void recordModification(final int latestSnapshotId)
throws QuotaExceededException; throws QuotaExceededException;
/** Check whether it's a reference. */ /** Check whether it's a reference. */
@ -652,9 +649,9 @@ public abstract INode updateModificationTime(long mtime, int latestSnapshotId)
/** Set the last modification time of inode. */ /** Set the last modification time of inode. */
public final INode setModificationTime(long modificationTime, public final INode setModificationTime(long modificationTime,
int latestSnapshotId) throws QuotaExceededException { int latestSnapshotId) throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.setModificationTime(modificationTime); setModificationTime(modificationTime);
return nodeToUpdate; return this;
} }
/** /**
@ -682,9 +679,9 @@ public final long getAccessTime() {
*/ */
public final INode setAccessTime(long accessTime, int latestSnapshotId) public final INode setAccessTime(long accessTime, int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
final INode nodeToUpdate = recordModification(latestSnapshotId); recordModification(latestSnapshotId);
nodeToUpdate.setAccessTime(accessTime); setAccessTime(accessTime);
return nodeToUpdate; return this;
} }

View File

@ -318,7 +318,7 @@ INodeReference.WithName replaceChild4ReferenceWithName(INode oldChild,
} }
@Override @Override
public INodeDirectory recordModification(int latestSnapshotId) public void recordModification(int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId) if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) { && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@ -330,7 +330,6 @@ public INodeDirectory recordModification(int latestSnapshotId)
// record self in the diff list if necessary // record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null); sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
} }
return this;
} }
/** /**

View File

@ -284,7 +284,7 @@ public INodeFileAttributes getSnapshotINode(final int snapshotId) {
} }
@Override @Override
public INodeFile recordModification(final int latestSnapshotId) public void recordModification(final int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId) if (isInLatestSnapshot(latestSnapshotId)
&& !shouldRecordInSrcSnapshot(latestSnapshotId)) { && !shouldRecordInSrcSnapshot(latestSnapshotId)) {
@ -296,7 +296,6 @@ public INodeFile recordModification(final int latestSnapshotId)
// record self in the diff list if necessary // record self in the diff list if necessary
sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null); sf.getDiffs().saveSelf2Snapshot(latestSnapshotId, this, null);
} }
return this;
} }
public FileDiffList getDiffs() { public FileDiffList getDiffs() {
@ -344,11 +343,10 @@ public final void setFileReplication(short replication) {
/** Set the replication factor of this file. */ /** Set the replication factor of this file. */
public final INodeFile setFileReplication(short replication, public final INodeFile setFileReplication(short replication,
int latestSnapshotId, final INodeMap inodeMap) int latestSnapshotId) throws QuotaExceededException {
throws QuotaExceededException { recordModification(latestSnapshotId);
final INodeFile nodeToUpdate = recordModification(latestSnapshotId); setFileReplication(replication);
nodeToUpdate.setFileReplication(replication); return this;
return nodeToUpdate;
} }
/** @return preferred block size (in bytes) of the file. */ /** @return preferred block size (in bytes) of the file. */

View File

@ -93,9 +93,8 @@ public INode get(long id) {
"", "", new FsPermission((short) 0)), 0, 0) { "", "", new FsPermission((short) 0)), 0, 0) {
@Override @Override
INode recordModification(int latestSnapshotId) void recordModification(int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
return null;
} }
@Override @Override

View File

@ -287,11 +287,9 @@ public final void setAccessTime(long accessTime) {
} }
@Override @Override
final INode recordModification(int latestSnapshotId) final void recordModification(int latestSnapshotId)
throws QuotaExceededException { throws QuotaExceededException {
referred.recordModification(latestSnapshotId); referred.recordModification(latestSnapshotId);
// reference is never replaced
return this;
} }
@Override // used by WithCount @Override // used by WithCount

View File

@ -47,12 +47,11 @@ public class INodeSymlink extends INodeWithAdditionalFields {
} }
@Override @Override
INode recordModification(int latestSnapshotId) throws QuotaExceededException { void recordModification(int latestSnapshotId) throws QuotaExceededException {
if (isInLatestSnapshot(latestSnapshotId)) { if (isInLatestSnapshot(latestSnapshotId)) {
INodeDirectory parent = getParent(); INodeDirectory parent = getParent();
parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this)); parent.saveChild2Snapshot(this, latestSnapshotId, new INodeSymlink(this));
} }
return this;
} }
/** @return true unconditionally. */ /** @return true unconditionally. */

View File

@ -151,4 +151,11 @@ public interface FSNamesystemMBean {
* @return number of blocks pending deletion * @return number of blocks pending deletion
*/ */
long getPendingDeletionBlocks(); long getPendingDeletionBlocks();
/**
* Number of content stale storages.
* @return number of content stale storages
*/
public int getNumStaleStorages();
} }

View File

@ -22,6 +22,9 @@
/** /**
* A BlockCommand is an instruction to a datanode to register with the namenode. * A BlockCommand is an instruction to a datanode to register with the namenode.
* This command can't be combined with other commands in the same response.
* This is because after the datanode processes RegisterCommand, it will skip
* the rest of the DatanodeCommands in the same HeartbeatResponse.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving

View File

@ -37,6 +37,10 @@ ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux") ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF(FUSE_FOUND) IF(FUSE_FOUND)
add_library(posix_util
../util/posix_util.c
)
add_executable(fuse_dfs add_executable(fuse_dfs
fuse_dfs.c fuse_dfs.c
fuse_options.c fuse_options.c

View File

@ -0,0 +1,271 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/htable.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
struct htable_pair {
void *key;
void *val;
};
/**
* A hash table which uses linear probing.
*/
struct htable {
uint32_t capacity;
uint32_t used;
htable_hash_fn_t hash_fun;
htable_eq_fn_t eq_fun;
struct htable_pair *elem;
};
/**
* An internal function for inserting a value into the hash table.
*
* Note: this function assumes that you have made enough space in the table.
*
* @param nelem The new element to insert.
* @param capacity The capacity of the hash table.
* @param hash_fun The hash function to use.
* @param key The key to insert.
* @param val The value to insert.
*/
static void htable_insert_internal(struct htable_pair *nelem,
uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
void *val)
{
uint32_t i;
i = hash_fun(key, capacity);
while (1) {
if (!nelem[i].key) {
nelem[i].key = key;
nelem[i].val = val;
return;
}
i++;
if (i == capacity) {
i = 0;
}
}
}
static int htable_realloc(struct htable *htable, uint32_t new_capacity)
{
struct htable_pair *nelem;
uint32_t i, old_capacity = htable->capacity;
htable_hash_fn_t hash_fun = htable->hash_fun;
nelem = calloc(new_capacity, sizeof(struct htable_pair));
if (!nelem) {
return ENOMEM;
}
for (i = 0; i < old_capacity; i++) {
struct htable_pair *pair = htable->elem + i;
htable_insert_internal(nelem, new_capacity, hash_fun,
pair->key, pair->val);
}
free(htable->elem);
htable->elem = nelem;
htable->capacity = new_capacity;
return 0;
}
struct htable *htable_alloc(uint32_t size,
htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
{
struct htable *htable;
htable = calloc(1, sizeof(*htable));
if (!htable) {
return NULL;
}
size = (size + 1) >> 1;
size = size << 1;
if (size < HTABLE_MIN_SIZE) {
size = HTABLE_MIN_SIZE;
}
htable->hash_fun = hash_fun;
htable->eq_fun = eq_fun;
htable->used = 0;
if (htable_realloc(htable, size)) {
free(htable);
return NULL;
}
return htable;
}
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
{
uint32_t i;
for (i = 0; i != htable->capacity; ++i) {
struct htable_pair *elem = htable->elem + i;
if (elem->key) {
fun(ctx, elem->key, elem->val);
}
}
}
void htable_free(struct htable *htable)
{
if (htable) {
free(htable->elem);
free(htable);
}
}
int htable_put(struct htable *htable, void *key, void *val)
{
int ret;
uint32_t nused;
// NULL is not a valid key value.
// This helps us implement htable_get_internal efficiently, since we know
// that we can stop when we encounter the first NULL key.
if (!key) {
return EINVAL;
}
// NULL is not a valid value. Otherwise the results of htable_get would
// be confusing (does a NULL return mean entry not found, or that the
// entry was found and was NULL?)
if (!val) {
return EINVAL;
}
// Re-hash if we have used more than half of the hash table
nused = htable->used + 1;
if (nused >= (htable->capacity / 2)) {
ret = htable_realloc(htable, htable->capacity * 2);
if (ret)
return ret;
}
htable_insert_internal(htable->elem, htable->capacity,
htable->hash_fun, key, val);
htable->used++;
return 0;
}
static int htable_get_internal(const struct htable *htable,
const void *key, uint32_t *out)
{
uint32_t start_idx, idx;
start_idx = htable->hash_fun(key, htable->capacity);
idx = start_idx;
while (1) {
struct htable_pair *pair = htable->elem + idx;
if (!pair->key) {
// We always maintain the invariant that the entries corresponding
// to a given key are stored in a contiguous block, not separated
// by any NULLs. So if we encounter a NULL, our search is over.
return ENOENT;
} else if (htable->eq_fun(pair->key, key)) {
*out = idx;
return 0;
}
idx++;
if (idx == htable->capacity) {
idx = 0;
}
if (idx == start_idx) {
return ENOENT;
}
}
}
void *htable_get(const struct htable *htable, const void *key)
{
uint32_t idx;
if (htable_get_internal(htable, key, &idx)) {
return NULL;
}
return htable->elem[idx].val;
}
void htable_pop(struct htable *htable, const void *key,
void **found_key, void **found_val)
{
uint32_t hole, i;
const void *nkey;
if (htable_get_internal(htable, key, &hole)) {
*found_key = NULL;
*found_val = NULL;
return;
}
i = hole;
htable->used--;
// We need to maintain the compactness invariant used in
// htable_get_internal. This invariant specifies that the entries for any
// given key are never separated by NULLs (although they may be separated
// by entries for other keys.)
while (1) {
i++;
if (i == htable->capacity) {
i = 0;
}
nkey = htable->elem[i].key;
if (!nkey) {
*found_key = htable->elem[hole].key;
*found_val = htable->elem[hole].val;
htable->elem[hole].key = NULL;
htable->elem[hole].val = NULL;
return;
} else if (htable->eq_fun(key, nkey)) {
htable->elem[hole].key = htable->elem[i].key;
htable->elem[hole].val = htable->elem[i].val;
hole = i;
}
}
}
uint32_t htable_used(const struct htable *htable)
{
return htable->used;
}
uint32_t htable_capacity(const struct htable *htable)
{
return htable->capacity;
}
uint32_t ht_hash_string(const void *str, uint32_t max)
{
const char *s = str;
uint32_t hash = 0;
while (*s) {
hash = (hash * 31) + *s;
s++;
}
return hash % max;
}
int ht_compare_string(const void *a, const void *b)
{
return strcmp(a, b) == 0;
}
// vim: ts=4:sw=4:tw=79:et

View File

@ -0,0 +1,161 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HADOOP_CORE_COMMON_HASH_TABLE
#define HADOOP_CORE_COMMON_HASH_TABLE
#include <inttypes.h>
#include <stdio.h>
#include <stdint.h>
#define HTABLE_MIN_SIZE 4
struct htable;
/**
* An HTable hash function.
*
* @param key The key.
* @param capacity The total capacity.
*
* @return The hash slot. Must be less than the capacity.
*/
typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
/**
* An HTable equality function. Compares two keys.
*
* @param a First key.
* @param b Second key.
*
* @return nonzero if the keys are equal.
*/
typedef int (*htable_eq_fn_t)(const void *a, const void *b);
/**
* Allocate a new hash table.
*
* @param capacity The minimum suggested starting capacity.
* @param hash_fun The hash function to use in this hash table.
* @param eq_fun The equals function to use in this hash table.
*
* @return The new hash table on success; NULL on OOM.
*/
struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
htable_eq_fn_t eq_fun);
typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
/**
* Visit all of the entries in the hash table.
*
* @param htable The hash table.
* @param fun The callback function to invoke on each key and value.
* @param ctx Context pointer to pass to the callback.
*/
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
/**
* Free the hash table.
*
* It is up the calling code to ensure that the keys and values inside the
* table are de-allocated, if that is necessary.
*
* @param htable The hash table.
*/
void htable_free(struct htable *htable);
/**
* Add an entry to the hash table.
*
* @param htable The hash table.
* @param key The key to add. This cannot be NULL.
* @param fun The value to add. This cannot be NULL.
*
* @return 0 on success;
* EEXIST if the value already exists in the table;
* ENOMEM if there is not enough memory to add the element.
* EFBIG if the hash table has too many entries to fit in 32
* bits.
*/
int htable_put(struct htable *htable, void *key, void *val);
/**
* Get an entry from the hash table.
*
* @param htable The hash table.
* @param key The key to find.
*
* @return NULL if there is no such entry; the entry otherwise.
*/
void *htable_get(const struct htable *htable, const void *key);
/**
* Get an entry from the hash table and remove it.
*
* @param htable The hash table.
* @param key The key for the entry find and remove.
* @param found_key (out param) NULL if the entry was not found; the found key
* otherwise.
* @param found_val (out param) NULL if the entry was not found; the found
* value otherwise.
*/
void htable_pop(struct htable *htable, const void *key,
void **found_key, void **found_val);
/**
* Get the number of entries used in the hash table.
*
* @param htable The hash table.
*
* @return The number of entries used in the hash table.
*/
uint32_t htable_used(const struct htable *htable);
/**
* Get the capacity of the hash table.
*
* @param htable The hash table.
*
* @return The capacity of the hash table.
*/
uint32_t htable_capacity(const struct htable *htable);
/**
* Hash a string.
*
* @param str The string.
* @param max Maximum hash value
*
* @return A number less than max.
*/
uint32_t ht_hash_string(const void *str, uint32_t max);
/**
* Compare two strings.
*
* @param a The first string.
* @param b The second string.
*
* @return 1 if the strings are identical; 0 otherwise.
*/
int ht_compare_string(const void *a, const void *b);
#endif
// vim: ts=4:sw=4:tw=79:et

View File

@ -19,8 +19,8 @@
#include "exception.h" #include "exception.h"
#include "hdfs.h" #include "hdfs.h"
#include "jni_helper.h" #include "jni_helper.h"
#include "platform.h"
#include <inttypes.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -35,54 +35,54 @@ struct ExceptionInfo {
static const struct ExceptionInfo gExceptionInfo[] = { static const struct ExceptionInfo gExceptionInfo[] = {
{ {
.name = "java.io.FileNotFoundException", "java.io.FileNotFoundException",
.noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND, NOPRINT_EXC_FILE_NOT_FOUND,
.excErrno = ENOENT, ENOENT,
}, },
{ {
.name = "org.apache.hadoop.security.AccessControlException", "org.apache.hadoop.security.AccessControlException",
.noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL, NOPRINT_EXC_ACCESS_CONTROL,
.excErrno = EACCES, EACCES,
}, },
{ {
.name = "org.apache.hadoop.fs.UnresolvedLinkException", "org.apache.hadoop.fs.UnresolvedLinkException",
.noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK, NOPRINT_EXC_UNRESOLVED_LINK,
.excErrno = ENOLINK, ENOLINK,
}, },
{ {
.name = "org.apache.hadoop.fs.ParentNotDirectoryException", "org.apache.hadoop.fs.ParentNotDirectoryException",
.noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY, NOPRINT_EXC_PARENT_NOT_DIRECTORY,
.excErrno = ENOTDIR, ENOTDIR,
}, },
{ {
.name = "java.lang.IllegalArgumentException", "java.lang.IllegalArgumentException",
.noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT, NOPRINT_EXC_ILLEGAL_ARGUMENT,
.excErrno = EINVAL, EINVAL,
}, },
{ {
.name = "java.lang.OutOfMemoryError", "java.lang.OutOfMemoryError",
.noPrintFlag = 0, 0,
.excErrno = ENOMEM, ENOMEM,
}, },
{ {
.name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException", "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
.noPrintFlag = 0, 0,
.excErrno = EROFS, EROFS,
}, },
{ {
.name = "org.apache.hadoop.fs.FileAlreadyExistsException", "org.apache.hadoop.fs.FileAlreadyExistsException",
.noPrintFlag = 0, 0,
.excErrno = EEXIST, EEXIST,
}, },
{ {
.name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException", "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
.noPrintFlag = 0, 0,
.excErrno = EDQUOT, EDQUOT,
}, },
{ {
.name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException", "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
.noPrintFlag = 0, 0,
.excErrno = ESTALE, ESTALE,
}, },
}; };
@ -113,6 +113,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
jstring jStr = NULL; jstring jStr = NULL;
jvalue jVal; jvalue jVal;
jthrowable jthr; jthrowable jthr;
const char *stackTrace;
jthr = classNameOfObject(exc, env, &className); jthr = classNameOfObject(exc, env, &className);
if (jthr) { if (jthr) {
@ -148,7 +149,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
destroyLocalReference(env, jthr); destroyLocalReference(env, jthr);
} else { } else {
jStr = jVal.l; jStr = jVal.l;
const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL); stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
if (!stackTrace) { if (!stackTrace) {
fprintf(stderr, "(unable to get stack trace for %s exception: " fprintf(stderr, "(unable to get stack trace for %s exception: "
"GetStringUTFChars error.)\n", className); "GetStringUTFChars error.)\n", className);

View File

@ -34,13 +34,14 @@
* usually not what you want.) * usually not what you want.)
*/ */
#include "platform.h"
#include <jni.h> #include <jni.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdarg.h> #include <stdarg.h>
#include <search.h> #include <search.h>
#include <pthread.h>
#include <errno.h> #include <errno.h>
/** /**
@ -109,7 +110,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object. * object.
*/ */
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags, int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
const char *fmt, ...) __attribute__((format(printf, 4, 5))); const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
/** /**
* Print out information about the pending exception and free it. * Print out information about the pending exception and free it.
@ -124,7 +125,7 @@ int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object. * object.
*/ */
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags, int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
const char *fmt, ...) __attribute__((format(printf, 3, 4))); const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
/** /**
* Get a local reference to the pending exception and clear it. * Get a local reference to the pending exception and clear it.
@ -150,6 +151,7 @@ jthrowable getPendingExceptionAndClear(JNIEnv *env);
* @return A local reference to a RuntimeError * @return A local reference to a RuntimeError
*/ */
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...) jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
__attribute__((format(printf, 2, 3))); TYPE_CHECKED_PRINTF_FORMAT(2, 3);
#undef TYPE_CHECKED_PRINTF_FORMAT
#endif #endif

View File

@ -49,18 +49,18 @@ int expectFileStats(hdfsFile file,
stats->totalShortCircuitBytesRead, stats->totalShortCircuitBytesRead,
stats->totalZeroCopyBytesRead); stats->totalZeroCopyBytesRead);
if (expectedTotalBytesRead != UINT64_MAX) { if (expectedTotalBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead); EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
} }
if (expectedTotalLocalBytesRead != UINT64_MAX) { if (expectedTotalLocalBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalLocalBytesRead, EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
stats->totalLocalBytesRead); stats->totalLocalBytesRead);
} }
if (expectedTotalShortCircuitBytesRead != UINT64_MAX) { if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead, EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
stats->totalShortCircuitBytesRead); stats->totalShortCircuitBytesRead);
} }
if (expectedTotalZeroCopyBytesRead != UINT64_MAX) { if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead, EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
stats->totalZeroCopyBytesRead); stats->totalZeroCopyBytesRead);
} }
hdfsFileFreeReadStatistics(stats); hdfsFileFreeReadStatistics(stats);

View File

@ -126,6 +126,18 @@ struct hdfsFile_internal;
} \ } \
} while (0); } while (0);
#define EXPECT_UINT64_EQ(x, y) \
do { \
uint64_t __my_ret__ = y; \
int __my_errno__ = errno; \
if (__my_ret__ != (x)) { \
fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
"value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
__FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
return -1; \
} \
} while (0);
#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \ #define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
ret = expr; \ ret = expr; \
if (!ret) \ if (!ret) \

View File

@ -19,20 +19,18 @@
#include "config.h" #include "config.h"
#include "exception.h" #include "exception.h"
#include "jni_helper.h" #include "jni_helper.h"
#include "platform.h"
#include "common/htable.h"
#include "os/mutexes.h"
#include "os/thread_local_storage.h"
#include <stdio.h> #include <stdio.h>
#include <string.h> #include <string.h>
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER; static struct htable *gClassRefHTable = NULL;
static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
static volatile int hashTableInited = 0;
#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
/** The Native return types that methods could return */ /** The Native return types that methods could return */
#define VOID 'V' #define JVOID 'V'
#define JOBJECT 'L' #define JOBJECT 'L'
#define JARRAYOBJECT '[' #define JARRAYOBJECT '['
#define JBOOLEAN 'Z' #define JBOOLEAN 'Z'
@ -51,40 +49,10 @@ static volatile int hashTableInited = 0;
*/ */
#define MAX_HASH_TABLE_ELEM 4096 #define MAX_HASH_TABLE_ELEM 4096
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
/** Pthreads thread-local storage for each library thread. */
struct hdfsTls {
JNIEnv *env;
};
/** /**
* The function that is called whenever a thread with libhdfs thread local data * Length of buffer for retrieving created JVMs. (We only ever create one.)
* is destroyed.
*
* @param v The thread-local data
*/ */
static void hdfsThreadDestructor(void *v) #define VM_BUF_LENGTH 1
{
struct hdfsTls *tls = v;
JavaVM *vm;
JNIEnv *env = tls->env;
jint ret;
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
"error %d\n", ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
free(tls);
}
void destroyLocalReference(JNIEnv *env, jobject jObject) void destroyLocalReference(JNIEnv *env, jobject jObject)
{ {
@ -138,67 +106,6 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
return NULL; return NULL;
} }
static int hashTableInit(void)
{
if (!hashTableInited) {
LOCK_HASH_TABLE();
if (!hashTableInited) {
if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
fprintf(stderr, "error creating hashtable, <%d>: %s\n",
errno, strerror(errno));
UNLOCK_HASH_TABLE();
return 0;
}
hashTableInited = 1;
}
UNLOCK_HASH_TABLE();
}
return 1;
}
static int insertEntryIntoTable(const char *key, void *data)
{
ENTRY e, *ep;
if (key == NULL || data == NULL) {
return 0;
}
if (! hashTableInit()) {
return -1;
}
e.data = data;
e.key = (char*)key;
LOCK_HASH_TABLE();
ep = hsearch(e, ENTER);
UNLOCK_HASH_TABLE();
if (ep == NULL) {
fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
key, errno, strerror(errno));
}
return 0;
}
static void* searchEntryFromTable(const char *key)
{
ENTRY e,*ep;
if (key == NULL) {
return NULL;
}
hashTableInit();
e.key = (char*)key;
LOCK_HASH_TABLE();
ep = hsearch(e, FIND);
UNLOCK_HASH_TABLE();
if (ep != NULL) {
return ep->data;
}
return NULL;
}
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType, jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
jobject instObj, const char *className, jobject instObj, const char *className,
const char *methName, const char *methSignature, ...) const char *methName, const char *methSignature, ...)
@ -235,7 +142,7 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
} }
retval->l = jobj; retval->l = jobj;
} }
else if (returnType == VOID) { else if (returnType == JVOID) {
if (methType == STATIC) { if (methType == STATIC) {
(*env)->CallStaticVoidMethodV(env, cls, mid, args); (*env)->CallStaticVoidMethodV(env, cls, mid, args);
} }
@ -325,11 +232,11 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
{ {
jclass cls; jclass cls;
jthrowable jthr; jthrowable jthr;
jmethodID mid = 0;
jthr = globalClassReference(className, env, &cls); jthr = globalClassReference(className, env, &cls);
if (jthr) if (jthr)
return jthr; return jthr;
jmethodID mid = 0;
jthr = validateMethodType(env, methType); jthr = validateMethodType(env, methType);
if (jthr) if (jthr)
return jthr; return jthr;
@ -350,25 +257,50 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out) jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
{ {
jclass clsLocalRef; jthrowable jthr = NULL;
jclass cls = searchEntryFromTable(className); jclass local_clazz = NULL;
if (cls) { jclass clazz = NULL;
*out = cls; int ret;
return NULL;
mutexLock(&hdfsHashMutex);
if (!gClassRefHTable) {
gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
ht_compare_string);
if (!gClassRefHTable) {
jthr = newRuntimeError(env, "htable_alloc failed\n");
goto done;
}
} }
clsLocalRef = (*env)->FindClass(env,className); clazz = htable_get(gClassRefHTable, className);
if (clsLocalRef == NULL) { if (clazz) {
return getPendingExceptionAndClear(env); *out = clazz;
goto done;
} }
cls = (*env)->NewGlobalRef(env, clsLocalRef); local_clazz = (*env)->FindClass(env,className);
if (cls == NULL) { if (!local_clazz) {
(*env)->DeleteLocalRef(env, clsLocalRef); jthr = getPendingExceptionAndClear(env);
return getPendingExceptionAndClear(env); goto done;
} }
(*env)->DeleteLocalRef(env, clsLocalRef); clazz = (*env)->NewGlobalRef(env, local_clazz);
insertEntryIntoTable(className, cls); if (!clazz) {
*out = cls; jthr = getPendingExceptionAndClear(env);
return NULL; goto done;
}
ret = htable_put(gClassRefHTable, (void*)className, clazz);
if (ret) {
jthr = newRuntimeError(env, "htable_put failed with error "
"code %d\n", ret);
goto done;
}
*out = clazz;
jthr = NULL;
done:
mutexUnlock(&hdfsHashMutex);
(*env)->DeleteLocalRef(env, local_clazz);
if (jthr && clazz) {
(*env)->DeleteGlobalRef(env, clazz);
}
return jthr;
} }
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name) jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
@ -436,14 +368,24 @@ done:
*/ */
static JNIEnv* getGlobalJNIEnv(void) static JNIEnv* getGlobalJNIEnv(void)
{ {
const jsize vmBufLength = 1; JavaVM* vmBuf[VM_BUF_LENGTH];
JavaVM* vmBuf[vmBufLength];
JNIEnv *env; JNIEnv *env;
jint rv = 0; jint rv = 0;
jint noVMs = 0; jint noVMs = 0;
jthrowable jthr; jthrowable jthr;
char *hadoopClassPath;
const char *hadoopClassPathVMArg = "-Djava.class.path=";
size_t optHadoopClassPathLen;
char *optHadoopClassPath;
int noArgs = 1;
char *hadoopJvmArgs;
char jvmArgDelims[] = " ";
char *str, *token, *savePtr;
JavaVMInitArgs vm_args;
JavaVM *vm;
JavaVMOption *options;
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs); rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
if (rv != 0) { if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv); fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
return NULL; return NULL;
@ -451,23 +393,19 @@ static JNIEnv* getGlobalJNIEnv(void)
if (noVMs == 0) { if (noVMs == 0) {
//Get the environment variables for initializing the JVM //Get the environment variables for initializing the JVM
char *hadoopClassPath = getenv("CLASSPATH"); hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) { if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n"); fprintf(stderr, "Environment variable CLASSPATH not set!\n");
return NULL; return NULL;
} }
char *hadoopClassPathVMArg = "-Djava.class.path="; optHadoopClassPathLen = strlen(hadoopClassPath) +
size_t optHadoopClassPathLen = strlen(hadoopClassPath) +
strlen(hadoopClassPathVMArg) + 1; strlen(hadoopClassPathVMArg) + 1;
char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen); optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
snprintf(optHadoopClassPath, optHadoopClassPathLen, snprintf(optHadoopClassPath, optHadoopClassPathLen,
"%s%s", hadoopClassPathVMArg, hadoopClassPath); "%s%s", hadoopClassPathVMArg, hadoopClassPath);
// Determine the # of LIBHDFS_OPTS args // Determine the # of LIBHDFS_OPTS args
int noArgs = 1; hadoopJvmArgs = getenv("LIBHDFS_OPTS");
char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
char jvmArgDelims[] = " ";
char *str, *token, *savePtr;
if (hadoopJvmArgs != NULL) { if (hadoopJvmArgs != NULL) {
hadoopJvmArgs = strdup(hadoopJvmArgs); hadoopJvmArgs = strdup(hadoopJvmArgs);
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) { for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
@ -480,7 +418,12 @@ static JNIEnv* getGlobalJNIEnv(void)
} }
// Now that we know the # args, populate the options array // Now that we know the # args, populate the options array
JavaVMOption options[noArgs]; options = calloc(noArgs, sizeof(JavaVMOption));
if (!options) {
fputs("Call to calloc failed\n", stderr);
free(optHadoopClassPath);
return NULL;
}
options[0].optionString = optHadoopClassPath; options[0].optionString = optHadoopClassPath;
hadoopJvmArgs = getenv("LIBHDFS_OPTS"); hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) { if (hadoopJvmArgs != NULL) {
@ -495,8 +438,6 @@ static JNIEnv* getGlobalJNIEnv(void)
} }
//Create the VM //Create the VM
JavaVMInitArgs vm_args;
JavaVM *vm;
vm_args.version = JNI_VERSION_1_2; vm_args.version = JNI_VERSION_1_2;
vm_args.options = options; vm_args.options = options;
vm_args.nOptions = noArgs; vm_args.nOptions = noArgs;
@ -508,6 +449,7 @@ static JNIEnv* getGlobalJNIEnv(void)
free(hadoopJvmArgs); free(hadoopJvmArgs);
} }
free(optHadoopClassPath); free(optHadoopClassPath);
free(options);
if (rv != 0) { if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed " fprintf(stderr, "Call to JNI_CreateJavaVM failed "
@ -523,7 +465,7 @@ static JNIEnv* getGlobalJNIEnv(void)
} }
else { else {
//Attach this thread to the VM //Attach this thread to the VM
JavaVM* vm = vmBuf[0]; vm = vmBuf[0];
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0); rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
if (rv != 0) { if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread " fprintf(stderr, "Call to AttachCurrentThread "
@ -557,54 +499,27 @@ static JNIEnv* getGlobalJNIEnv(void)
JNIEnv* getJNIEnv(void) JNIEnv* getJNIEnv(void)
{ {
JNIEnv *env; JNIEnv *env;
struct hdfsTls *tls; THREAD_LOCAL_STORAGE_GET_QUICK();
int ret; mutexLock(&jvmMutex);
if (threadLocalStorageGet(&env)) {
#ifdef HAVE_BETTER_TLS mutexUnlock(&jvmMutex);
static __thread struct hdfsTls *quickTls = NULL; return NULL;
if (quickTls)
return quickTls->env;
#endif
pthread_mutex_lock(&jvmMutex);
if (!gTlsKeyInitialized) {
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
if (ret) {
pthread_mutex_unlock(&jvmMutex);
fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
"error %d\n", ret);
return NULL;
}
gTlsKeyInitialized = 1;
} }
tls = pthread_getspecific(gTlsKey); if (env) {
if (tls) { mutexUnlock(&jvmMutex);
pthread_mutex_unlock(&jvmMutex); return env;
return tls->env;
} }
env = getGlobalJNIEnv(); env = getGlobalJNIEnv();
pthread_mutex_unlock(&jvmMutex); mutexUnlock(&jvmMutex);
if (!env) { if (!env) {
fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n"); fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
return NULL; return NULL;
} }
tls = calloc(1, sizeof(struct hdfsTls)); if (threadLocalStorageSet(env)) {
if (!tls) { return NULL;
fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
sizeof(struct hdfsTls));
return NULL;
} }
tls->env = env; THREAD_LOCAL_STORAGE_SET_QUICK(env);
ret = pthread_setspecific(gTlsKey, tls);
if (ret) {
fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
"error code %d\n", ret);
hdfsThreadDestructor(tls);
return NULL;
}
#ifdef HAVE_BETTER_TLS
quickTls = tls;
#endif
return env; return env;
} }

View File

@ -24,8 +24,6 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdarg.h> #include <stdarg.h>
#include <search.h>
#include <pthread.h>
#include <errno.h> #include <errno.h>
#define PATH_SEPARATOR ':' #define PATH_SEPARATOR ':'

View File

@ -21,6 +21,7 @@
#include "hdfs_test.h" #include "hdfs_test.h"
#include "jni_helper.h" #include "jni_helper.h"
#include "native_mini_dfs.h" #include "native_mini_dfs.h"
#include "platform.h"
#include <errno.h> #include <errno.h>
#include <jni.h> #include <jni.h>
@ -347,10 +348,11 @@ error_dlr_nn:
int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl, int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
struct hdfsBuilder *bld) struct hdfsBuilder *bld)
{ {
int port, ret; int ret;
tPort port;
hdfsBuilderSetNameNode(bld, "localhost"); hdfsBuilderSetNameNode(bld, "localhost");
port = nmdGetNameNodePort(cl); port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) { if (port < 0) {
fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port); fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
return EIO; return EIO;

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_MUTEXES_H
#define LIBHDFS_MUTEXES_H
/*
* Defines abstraction over platform-specific mutexes. libhdfs has no formal
* initialization function that users would call from a single-threaded context
* to initialize the library. This creates a challenge for bootstrapping the
* mutexes. To address this, all required mutexes are pre-defined here with
* external storage. Platform-specific implementations must guarantee that the
* mutexes are initialized via static initialization.
*/
#include "platform.h"
/** Mutex protecting the class reference hash table. */
extern mutex hdfsHashMutex;
/** Mutex protecting singleton JVM instance. */
extern mutex jvmMutex;
/**
* Locks a mutex.
*
* @param m mutex
* @return 0 if successful, non-zero otherwise
*/
int mutexLock(mutex *m);
/**
* Unlocks a mutex.
*
* @param m mutex
* @return 0 if successful, non-zero otherwise
*/
int mutexUnlock(mutex *m);
#endif

View File

@ -16,23 +16,28 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event; #include "os/mutexes.h"
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; #include <pthread.h>
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; #include <stdio.h>
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
public class RMAppAttemptUpdateSavedEvent extends RMAppAttemptEvent { mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
final Exception updatedException; int mutexLock(mutex *m) {
int ret = pthread_mutex_lock(m);
public RMAppAttemptUpdateSavedEvent(ApplicationAttemptId appAttemptId, if (ret) {
Exception updatedException) { fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
super(appAttemptId, RMAppAttemptEventType.ATTEMPT_UPDATE_SAVED); ret);
this.updatedException = updatedException;
}
public Exception getUpdatedException() {
return updatedException;
} }
return ret;
}
int mutexUnlock(mutex *m) {
int ret = pthread_mutex_unlock(m);
if (ret) {
fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
ret);
}
return ret;
} }

View File

@ -16,21 +16,19 @@
* limitations under the License. * limitations under the License.
*/ */
package org.apache.hadoop.yarn.server.resourcemanager.rmapp; #ifndef LIBHDFS_PLATFORM_H
#define LIBHDFS_PLATFORM_H
import org.apache.hadoop.yarn.api.records.ApplicationId; #include <pthread.h>
public class RMAppNewSavedEvent extends RMAppEvent { /* Use gcc type-checked format arguments. */
#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
__attribute__((format(printf, formatArg, varArgs)))
private final Exception storedException; /*
* Mutex and thread data types defined by pthreads.
*/
typedef pthread_mutex_t mutex;
typedef pthread_t threadId;
public RMAppNewSavedEvent(ApplicationId appId, Exception storedException) { #endif
super(appId, RMAppEventType.APP_NEW_SAVED);
this.storedException = storedException;
}
public Exception getStoredException() {
return storedException;
}
}

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread.h"
#include <pthread.h>
#include <stdio.h>
/**
* Defines a helper function that adapts function pointer provided by caller to
* the type required by pthread_create.
*
* @param toRun thread to run
* @return void* result of running thread (always NULL)
*/
static void* runThread(void *toRun) {
const thread *t = toRun;
t->start(t->arg);
return NULL;
}
int threadCreate(thread *t) {
int ret;
ret = pthread_create(&t->id, NULL, runThread, t);
if (ret) {
fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
}
return ret;
}
int threadJoin(const thread *t) {
int ret = pthread_join(t->id, NULL);
if (ret) {
fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
}
return ret;
}

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread_local_storage.h"
#include <jni.h>
#include <pthread.h>
#include <stdio.h>
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
/**
* The function that is called whenever a thread with libhdfs thread local data
* is destroyed.
*
* @param v The thread-local data
*/
static void hdfsThreadDestructor(void *v)
{
JavaVM *vm;
JNIEnv *env = v;
jint ret;
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
}
int threadLocalStorageGet(JNIEnv **env)
{
int ret = 0;
if (!gTlsKeyInitialized) {
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
if (ret) {
fprintf(stderr,
"threadLocalStorageGet: pthread_key_create failed with error %d\n",
ret);
return ret;
}
gTlsKeyInitialized = 1;
}
*env = pthread_getspecific(gTlsKey);
return ret;
}
int threadLocalStorageSet(JNIEnv *env)
{
int ret = pthread_setspecific(gTlsKey, env);
if (ret) {
fprintf(stderr,
"threadLocalStorageSet: pthread_setspecific failed with error %d\n",
ret);
hdfsThreadDestructor(env);
}
return ret;
}

Some files were not shown because too many files have changed in this diff Show More