Merging r1616428 through r1616893 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-6584@1616897 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-08-08 22:21:35 +00:00
commit 9d5f8fa68d
147 changed files with 8330 additions and 3545 deletions

View File

@ -189,6 +189,7 @@ Requirements:
* Maven 3.0 or later
* Findbugs 1.3.9 (if running findbugs)
* ProtocolBuffer 2.5.0
* CMake 2.6 or newer
* Windows SDK or Visual Studio 2010 Professional
* Unix command-line tools from GnuWin32 or Cygwin: sh, mkdir, rm, cp, tar, gzip
* zlib headers (if building native code bindings for zlib)

View File

@ -144,6 +144,15 @@
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<id>prepare-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
<execution>
<id>prepare-test-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>test-jar</goal>
</goals>

View File

@ -120,32 +120,6 @@ public String toString() {
return token;
}
/**
* Return the hashcode for the token.
*
* @return the hashcode for the token.
*/
@Override
public int hashCode() {
return (token != null) ? token.hashCode() : 0;
}
/**
* Return if two token instances are equal.
*
* @param o the other token instance.
*
* @return if this instance and the other instance are equal.
*/
@Override
public boolean equals(Object o) {
boolean eq = false;
if (o instanceof Token) {
Token other = (Token) o;
eq = (token == null && other.token == null) || (token != null && this.token.equals(other.token));
}
return eq;
}
}
private static Class<? extends Authenticator> DEFAULT_AUTHENTICATOR = KerberosAuthenticator.class;
@ -208,6 +182,16 @@ public AuthenticatedURL(Authenticator authenticator,
this.authenticator.setConnectionConfigurator(connConfigurator);
}
/**
* Returns the {@link Authenticator} instance used by the
* <code>AuthenticatedURL</code>.
*
* @return the {@link Authenticator} instance
*/
protected Authenticator getAuthenticator() {
return authenticator;
}
/**
* Returns an authenticated {@link HttpURLConnection}.
*

View File

@ -142,11 +142,30 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
*/
public static final String NAME_RULES = TYPE + ".name.rules";
private String type;
private String keytab;
private GSSManager gssManager;
private Subject serverSubject = new Subject();
private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
/**
* Creates a Kerberos SPNEGO authentication handler with the default
* auth-token type, <code>kerberos</code>.
*/
public KerberosAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Kerberos SPNEGO authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public KerberosAuthenticationHandler(String type) {
this.type = type;
}
/**
* Initializes the authentication handler instance.
* <p/>
@ -249,7 +268,7 @@ public void destroy() {
*/
@Override
public String getType() {
return TYPE;
return type;
}
/**

View File

@ -55,6 +55,25 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
private boolean acceptAnonymous;
private String type;
/**
* Creates a Hadoop pseudo authentication handler with the default auth-token
* type, <code>simple</code>.
*/
public PseudoAuthenticationHandler() {
this(TYPE);
}
/**
* Creates a Hadoop pseudo authentication handler with a custom auth-token
* type.
*
* @param type auth-token type.
*/
public PseudoAuthenticationHandler(String type) {
this.type = type;
}
/**
* Initializes the authentication handler instance.
@ -96,7 +115,7 @@ public void destroy() {
*/
@Override
public String getType() {
return TYPE;
return type;
}
/**

View File

@ -33,36 +33,6 @@ public void testToken() throws Exception {
token = new AuthenticatedURL.Token("foo");
Assert.assertTrue(token.isSet());
Assert.assertEquals("foo", token.toString());
AuthenticatedURL.Token token1 = new AuthenticatedURL.Token();
AuthenticatedURL.Token token2 = new AuthenticatedURL.Token();
Assert.assertEquals(token1.hashCode(), token2.hashCode());
Assert.assertTrue(token1.equals(token2));
token1 = new AuthenticatedURL.Token();
token2 = new AuthenticatedURL.Token("foo");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token();
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token("foo");
Assert.assertEquals(token1.hashCode(), token2.hashCode());
Assert.assertTrue(token1.equals(token2));
token1 = new AuthenticatedURL.Token("bar");
token2 = new AuthenticatedURL.Token("foo");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
token1 = new AuthenticatedURL.Token("foo");
token2 = new AuthenticatedURL.Token("bar");
Assert.assertNotSame(token1.hashCode(), token2.hashCode());
Assert.assertFalse(token1.equals(token2));
}
@Test
@ -137,4 +107,12 @@ public void testConnectionConfigurator() throws Exception {
Mockito.verify(connConf).configure(Mockito.<HttpURLConnection>any());
}
@Test
public void testGetAuthenticator() throws Exception {
Authenticator authenticator = Mockito.mock(Authenticator.class);
AuthenticatedURL aURL = new AuthenticatedURL(authenticator);
Assert.assertEquals(authenticator, aURL.getAuthenticator());
}
}

View File

@ -490,6 +490,8 @@ Release 2.6.0 - UNRELEASED
HADOOP-10791. AuthenticationFilter should support externalizing the
secret for signing and provide rotation support. (rkanter via tucu)
HADOOP-10771. Refactor HTTP delegation support out of httpfs to common, PART 1. (tucu)
OPTIMIZATIONS
BUG FIXES
@ -521,9 +523,6 @@ Release 2.6.0 - UNRELEASED
HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
(Benoy Antony via umamahesh)
HADOOP-10876. The constructor of Path should not take an empty URL as a
parameter. (Zhihai Xu via wang)
HADOOP-10928. Incorrect usage on `hadoop credential list`.
(Josh Elser via wang)
@ -543,6 +542,11 @@ Release 2.6.0 - UNRELEASED
HADOOP-10905. LdapGroupsMapping Should use configuration.getPassword for SSL
and LDAP Passwords. (lmccay via brandonli)
HADOOP-10931 compile error on tools/hadoop-openstack (xukun via stevel)
HADOOP-10929. Typo in Configuration.getPasswordFromCredentialProviders
(lmccay via brandonli)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -203,6 +203,17 @@
<artifactId>hadoop-auth</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.jcraft</groupId>
<artifactId>jsch</artifactId>

View File

@ -1781,7 +1781,7 @@ public void setStrings(String name, String... values) {
public char[] getPassword(String name) throws IOException {
char[] pass = null;
pass = getPasswordFromCredenitalProviders(name);
pass = getPasswordFromCredentialProviders(name);
if (pass == null) {
pass = getPasswordFromConfig(name);
@ -1797,7 +1797,7 @@ public char[] getPassword(String name) throws IOException {
* @return password or null if not found
* @throws IOException
*/
protected char[] getPasswordFromCredenitalProviders(String name)
protected char[] getPasswordFromCredentialProviders(String name)
throws IOException {
char[] pass = null;
try {

View File

@ -128,20 +128,7 @@ private void checkPathArg( String path ) throws IllegalArgumentException {
"Can not create a Path from an empty string");
}
}
/** check URI parameter of Path constructor. */
private void checkPathArg(URI aUri) throws IllegalArgumentException {
// disallow construction of a Path from an empty URI
if (aUri == null) {
throw new IllegalArgumentException(
"Can not create a Path from a null URI");
}
if (aUri.toString().isEmpty()) {
throw new IllegalArgumentException(
"Can not create a Path from an empty URI");
}
}
/** Construct a path from a String. Path strings are URIs, but with
* unescaped elements and some additional normalization. */
public Path(String pathString) throws IllegalArgumentException {
@ -189,7 +176,6 @@ public Path(String pathString) throws IllegalArgumentException {
* Construct a path from a URI
*/
public Path(URI aUri) {
checkPathArg(aUri);
uri = aUri.normalize();
}

View File

@ -21,14 +21,18 @@
import java.util.Iterator;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsFilter;
import static org.apache.hadoop.metrics2.lib.Interns.*;
class MetricsCollectorImpl implements MetricsCollector,
@InterfaceAudience.Private
@VisibleForTesting
public class MetricsCollectorImpl implements MetricsCollector,
Iterable<MetricsRecordBuilderImpl> {
private final List<MetricsRecordBuilderImpl> rbs = Lists.newArrayList();

View File

@ -89,6 +89,14 @@ public MutableStat(String name, String description,
this(name, description, sampleName, valueName, false);
}
/**
* Set whether to display the extended stats (stdev, min/max etc.) or not
* @param extended enable/disable displaying extended stats
*/
public synchronized void setExtended(boolean extended) {
this.extended = extended;
}
/**
* Add a number of samples and their sum to the running stat
* @param numSamples number of samples

View File

@ -0,0 +1,330 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
/**
* The <code>DelegationTokenAuthenticatedURL</code> is a
* {@link AuthenticatedURL} sub-class with built-in Hadoop Delegation Token
* functionality.
* <p/>
* The authentication mechanisms supported by default are Hadoop Simple
* authentication (also known as pseudo authentication) and Kerberos SPNEGO
* authentication.
* <p/>
* Additional authentication mechanisms can be supported via {@link
* DelegationTokenAuthenticator} implementations.
* <p/>
* The default {@link DelegationTokenAuthenticator} is the {@link
* KerberosDelegationTokenAuthenticator} class which supports
* automatic fallback from Kerberos SPNEGO to Hadoop Simple authentication via
* the {@link PseudoDelegationTokenAuthenticator} class.
* <p/>
* <code>AuthenticatedURL</code> instances are not thread-safe.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class DelegationTokenAuthenticatedURL extends AuthenticatedURL {
/**
* Client side authentication token that handles Delegation Tokens.
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static class Token extends AuthenticatedURL.Token {
private
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
delegationToken;
org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken() {
return delegationToken;
}
}
private static Class<? extends DelegationTokenAuthenticator>
DEFAULT_AUTHENTICATOR = KerberosDelegationTokenAuthenticator.class;
/**
* Sets the default {@link DelegationTokenAuthenticator} class to use when an
* {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
*
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @param authenticator the authenticator class to use as default.
*/
public static void setDefaultDelegationTokenAuthenticator(
Class<? extends DelegationTokenAuthenticator> authenticator) {
DEFAULT_AUTHENTICATOR = authenticator;
}
/**
* Returns the default {@link DelegationTokenAuthenticator} class to use when
* an {@link DelegationTokenAuthenticatedURL} instance is created without
* specifying one.
* <p/>
* The default class is {@link KerberosDelegationTokenAuthenticator}
*
* @return the delegation token authenticator class to use as default.
*/
public static Class<? extends DelegationTokenAuthenticator>
getDefaultDelegationTokenAuthenticator() {
return DEFAULT_AUTHENTICATOR;
}
private static DelegationTokenAuthenticator
obtainDelegationTokenAuthenticator(DelegationTokenAuthenticator dta) {
try {
return (dta != null) ? dta : DEFAULT_AUTHENTICATOR.newInstance();
} catch (Exception ex) {
throw new IllegalArgumentException(ex);
}
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
* <p/>
* An instance of the default {@link DelegationTokenAuthenticator} will be
* used.
*/
public DelegationTokenAuthenticatedURL() {
this(null, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator) {
this(authenticator, null);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code> using the default
* {@link DelegationTokenAuthenticator} class.
*
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
ConnectionConfigurator connConfigurator) {
this(null, connConfigurator);
}
/**
* Creates an <code>DelegationTokenAuthenticatedURL</code>.
*
* @param authenticator the {@link DelegationTokenAuthenticator} instance to
* use, if <code>null</code> the default one will be used.
* @param connConfigurator a connection configurator.
*/
public DelegationTokenAuthenticatedURL(
DelegationTokenAuthenticator authenticator,
ConnectionConfigurator connConfigurator) {
super(obtainDelegationTokenAuthenticator(authenticator), connConfigurator);
}
/**
* Returns an authenticated {@link HttpURLConnection}, it uses a Delegation
* Token only if the given auth token is an instance of {@link Token} and
* it contains a Delegation Token, otherwise use the configured
* {@link DelegationTokenAuthenticator} to authenticate the connection.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
@Override
public HttpURLConnection openConnection(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
return (token instanceof Token) ? openConnection(url, (Token) token)
: super.openConnection(url ,token);
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token)
throws IOException, AuthenticationException {
return openConnection(url, token, null);
}
private URL augmentURL(URL url, Map<String, String> params)
throws IOException {
if (params != null && params.size() > 0) {
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> param : params.entrySet()) {
sb.append(separator).append(param.getKey()).append("=").append(
param.getValue());
separator = "&";
}
url = new URL(sb.toString());
}
return url;
}
/**
* Returns an authenticated {@link HttpURLConnection}. If the Delegation
* Token is present, it will be used taking precedence over the configured
* <code>Authenticator</code>. If the <code>doAs</code> parameter is not NULL,
* the request will be done on behalf of the specified <code>doAs</code> user.
*
* @param url the URL to connect to. Only HTTP/S URLs are supported.
* @param token the authentication token being used for the user.
* @param doAs user to do the the request on behalf of, if NULL the request is
* as self.
* @return an authenticated {@link HttpURLConnection}.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public HttpURLConnection openConnection(URL url, Token token, String doAs)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Map<String, String> extraParams = new HashMap<String, String>();
// delegation token
Credentials creds = UserGroupInformation.getCurrentUser().getCredentials();
if (!creds.getAllTokens().isEmpty()) {
InetSocketAddress serviceAddr = new InetSocketAddress(url.getHost(),
url.getPort());
Text service = SecurityUtil.buildTokenService(serviceAddr);
org.apache.hadoop.security.token.Token<? extends TokenIdentifier> dt =
creds.getToken(service);
if (dt != null) {
extraParams.put(KerberosDelegationTokenAuthenticator.DELEGATION_PARAM,
dt.encodeToUrlString());
}
}
url = augmentURL(url, extraParams);
return super.openConnection(url, token);
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @return a delegation token.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public org.apache.hadoop.security.token.Token<AbstractDelegationTokenIdentifier>
getDelegationToken(URL url, Token token, String renewer)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
try {
token.delegationToken =
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
getDelegationToken(url, token, renewer);
return token.delegationToken;
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url, Token token)
throws IOException, AuthenticationException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
return ((KerberosDelegationTokenAuthenticator) getAuthenticator()).
renewDelegationToken(url, token, token.delegationToken);
} catch (IOException ex) {
token.delegationToken = null;
throw ex;
}
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url, Token token)
throws IOException {
Preconditions.checkNotNull(url, "url");
Preconditions.checkNotNull(token, "token");
Preconditions.checkNotNull(token.delegationToken,
"No delegation token available");
try {
((KerberosDelegationTokenAuthenticator) getAuthenticator()).
cancelDelegationToken(url, token, token.delegationToken);
} finally {
token.delegationToken = null;
}
}
}

View File

@ -0,0 +1,102 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import javax.servlet.FilterConfig;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import java.util.Properties;
/**
* The <code>DelegationTokenAuthenticationFilter</code> filter is a
* {@link AuthenticationFilter} with Hadoop Delegation Token support.
* <p/>
* By default it uses it own instance of the {@link
* AbstractDelegationTokenSecretManager}. For situations where an external
* <code>AbstractDelegationTokenSecretManager</code> is required (i.e. one that
* shares the secret with <code>AbstractDelegationTokenSecretManager</code>
* instance running in other services), the external
* <code>AbstractDelegationTokenSecretManager</code> must be set as an
* attribute in the {@link ServletContext} of the web application using the
* {@link #DELEGATION_TOKEN_SECRET_MANAGER_ATTR} attribute name (
* 'hadoop.http.delegation-token-secret-manager').
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenAuthenticationFilter
extends AuthenticationFilter {
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*/
public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR =
"hadoop.http.delegation-token-secret-manager";
/**
* It delegates to
* {@link AuthenticationFilter#getConfiguration(String, FilterConfig)} and
* then overrides the {@link AuthenticationHandler} to use if authentication
* type is set to <code>simple</code> or <code>kerberos</code> in order to use
* the corresponding implementation with delegation token support.
*
* @param configPrefix parameter not used.
* @param filterConfig parameter not used.
* @return hadoop-auth de-prefixed configuration for the filter and handler.
*/
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException {
Properties props = super.getConfiguration(configPrefix, filterConfig);
String authType = props.getProperty(AUTH_TYPE);
if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
} else if (authType.equals(KerberosAuthenticationHandler.TYPE)) {
props.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
}
return props;
}
@Override
public void init(FilterConfig filterConfig) throws ServletException {
super.init(filterConfig);
AbstractDelegationTokenSecretManager dtSecretManager =
(AbstractDelegationTokenSecretManager) filterConfig.getServletContext().
getAttribute(DELEGATION_TOKEN_SECRET_MANAGER_ATTR);
if (dtSecretManager != null && getAuthenticationHandler()
instanceof DelegationTokenAuthenticationHandler) {
DelegationTokenAuthenticationHandler handler =
(DelegationTokenAuthenticationHandler) getAuthenticationHandler();
handler.setExternalDelegationTokenSecretManager(dtSecretManager);
}
}
}

View File

@ -0,0 +1,355 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the wrapped {@link AuthenticationHandler} configuration
* properties, this handler supports the following properties prefixed
* with the type of the wrapped <code>AuthenticationHandler</code>:
* <ul>
* <li>delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticationHandler
implements AuthenticationHandler {
protected static final String TYPE_POSTFIX = "-dt";
public static final String PREFIX = "delegation-token.";
public static final String TOKEN_KIND = PREFIX + "token-kind.sec";
public static final String UPDATE_INTERVAL = PREFIX + "update-interval.sec";
public static final long UPDATE_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String MAX_LIFETIME = PREFIX + "max-lifetime.sec";
public static final long MAX_LIFETIME_DEFAULT = 7 * 24 * 60 * 60;
public static final String RENEW_INTERVAL = PREFIX + "renew-interval.sec";
public static final long RENEW_INTERVAL_DEFAULT = 24 * 60 * 60;
public static final String REMOVAL_SCAN_INTERVAL = PREFIX +
"removal-scan-interval.sec";
public static final long REMOVAL_SCAN_INTERVAL_DEFAULT = 60 * 60;
private static final Set<String> DELEGATION_TOKEN_OPS = new HashSet<String>();
static {
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
private AuthenticationHandler authHandler;
private DelegationTokenManager tokenManager;
private String authType;
public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) {
authHandler = handler;
authType = handler.getType();
}
@VisibleForTesting
DelegationTokenManager getTokenManager() {
return tokenManager;
}
@Override
public void init(Properties config) throws ServletException {
authHandler.init(config);
initTokenManager(config);
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
tokenManager.setExternalDelegationTokenSecretManager(secretManager);
}
@VisibleForTesting
@SuppressWarnings("unchecked")
public void initTokenManager(Properties config) {
String configPrefix = authHandler.getType() + ".";
Configuration conf = new Configuration(false);
for (Map.Entry entry : config.entrySet()) {
conf.set((String) entry.getKey(), (String) entry.getValue());
}
String tokenKind = conf.get(TOKEN_KIND);
if (tokenKind == null) {
throw new IllegalArgumentException(
"The configuration does not define the token kind");
}
tokenKind = tokenKind.trim();
long updateInterval = conf.getLong(configPrefix + UPDATE_INTERVAL,
UPDATE_INTERVAL_DEFAULT);
long maxLifeTime = conf.getLong(configPrefix + MAX_LIFETIME,
MAX_LIFETIME_DEFAULT);
long renewInterval = conf.getLong(configPrefix + RENEW_INTERVAL,
RENEW_INTERVAL_DEFAULT);
long removalScanInterval = conf.getLong(
configPrefix + REMOVAL_SCAN_INTERVAL, REMOVAL_SCAN_INTERVAL_DEFAULT);
tokenManager = new DelegationTokenManager(new Text(tokenKind),
updateInterval * 1000, maxLifeTime * 1000, renewInterval * 1000,
removalScanInterval * 1000);
tokenManager.init();
}
@Override
public void destroy() {
tokenManager.destroy();
authHandler.destroy();
}
@Override
public String getType() {
return authType;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.OP_PARAM);
op = (op != null) ? op.toUpperCase() : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
KerberosDelegationTokenAuthenticator.DelegationTokenOperation dtOp =
KerberosDelegationTokenAuthenticator.
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
boolean doManagement;
if (dtOp.requiresKerberosCredentials() && token == null) {
token = authenticate(request, response);
if (token == null) {
requestContinues = false;
doManagement = false;
} else {
doManagement = true;
}
} else {
doManagement = true;
}
if (doManagement) {
UserGroupInformation requestUgi = (token != null)
? UserGroupInformation.createRemoteUser(token.getUserName())
: null;
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String renewer = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.RENEWER_PARAM);
try {
Token<?> dToken = tokenManager.createToken(requestUgi, renewer);
map = delegationTokenToJSON(dToken);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
break;
case RENEWDELEGATIONTOKEN:
if (requestUgi == null) {
throw new IllegalStateException("request UGI cannot be NULL");
}
String tokenToRenew = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToRenew == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
try {
dt.decodeFromUrlString(tokenToRenew);
long expirationTime = tokenManager.renewToken(dt,
requestUgi.getShortUserName());
map = new HashMap();
map.put("long", expirationTime);
} catch (IOException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
break;
case CANCELDELEGATIONTOKEN:
String tokenToCancel = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM);
if (tokenToCancel == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]", dtOp,
KerberosDelegationTokenAuthenticator.TOKEN_PARAM)
);
requestContinues = false;
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
try {
dt.decodeFromUrlString(tokenToCancel);
tokenManager.cancelToken(dt, (requestUgi != null)
? requestUgi.getShortUserName() : null);
} catch (IOException ex) {
response.sendError(HttpServletResponse.SC_NOT_FOUND,
"Invalid delegation token, cannot cancel");
requestContinues = false;
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
ObjectMapper jsonMapper = new ObjectMapper();
jsonMapper.writeValue(writer, map);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be " +
"[{2}]", request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(
KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(KerberosDelegationTokenAuthenticator.DELEGATION_TOKEN_JSON,
json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam = ServletUtils.getParameter(request,
KerberosDelegationTokenAuthenticator.DELEGATION_PARAM);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(delegationParam);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
} catch (Throwable ex) {
throw new AuthenticationException("Could not verify DelegationToken, " +
ex.toString(), ex);
}
} else {
token = authHandler.authenticate(request, response);
}
return token;
}
}

View File

@ -0,0 +1,250 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.ConnectionConfigurator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URL;
import java.net.URLEncoder;
import java.util.HashMap;
import java.util.Map;
/**
* {@link Authenticator} wrapper that enhances an {@link Authenticator} with
* Delegation Token support.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public abstract class DelegationTokenAuthenticator implements Authenticator {
private static Logger LOG =
LoggerFactory.getLogger(DelegationTokenAuthenticator.class);
private static final String CONTENT_TYPE = "Content-Type";
private static final String APPLICATION_JSON_MIME = "application/json";
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String OP_PARAM = "op";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
private Authenticator authenticator;
public DelegationTokenAuthenticator(Authenticator authenticator) {
this.authenticator = authenticator;
}
@Override
public void setConnectionConfigurator(ConnectionConfigurator configurator) {
authenticator.setConnectionConfigurator(configurator);
}
private boolean hasDelegationToken(URL url) {
String queryStr = url.getQuery();
return (queryStr != null) && queryStr.contains(DELEGATION_PARAM + "=");
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
authenticator.authenticate(url, token);
}
}
/**
* Requests a delegation token using the configured <code>Authenticator</code>
* for authentication.
*
* @param url the URL to get the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token being used for the user where the
* Delegation token will be stored.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public Token<AbstractDelegationTokenIdentifier> getDelegationToken(URL url,
AuthenticatedURL.Token token, String renewer)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.GETDELEGATIONTOKEN, renewer, null, true);
json = (Map) json.get(DELEGATION_TOKEN_JSON);
String tokenStr = (String) json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
InetSocketAddress service = new InetSocketAddress(url.getHost(),
url.getPort());
SecurityUtil.setTokenService(dToken, service);
return dToken;
}
/**
* Renews a delegation token from the server end-point using the
* configured <code>Authenticator</code> for authentication.
*
* @param url the URL to renew the delegation token from. Only HTTP/S URLs are
* supported.
* @param token the authentication token with the Delegation Token to renew.
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication exception occurred.
*/
public long renewDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException, AuthenticationException {
Map json = doDelegationTokenOperation(url, token,
DelegationTokenOperation.RENEWDELEGATIONTOKEN, null, dToken, true);
return (Long) json.get(RENEW_DELEGATION_TOKEN_JSON);
}
/**
* Cancels a delegation token from the server end-point. It does not require
* being authenticated by the configured <code>Authenticator</code>.
*
* @param url the URL to cancel the delegation token from. Only HTTP/S URLs
* are supported.
* @param token the authentication token with the Delegation Token to cancel.
* @throws IOException if an IO error occurred.
*/
public void cancelDelegationToken(URL url,
AuthenticatedURL.Token token,
Token<AbstractDelegationTokenIdentifier> dToken)
throws IOException {
try {
doDelegationTokenOperation(url, token,
DelegationTokenOperation.CANCELDELEGATIONTOKEN, null, dToken, false);
} catch (AuthenticationException ex) {
throw new IOException("This should not happen: " + ex.getMessage(), ex);
}
}
private Map doDelegationTokenOperation(URL url,
AuthenticatedURL.Token token, DelegationTokenOperation operation,
String renewer, Token<?> dToken, boolean hasResponse)
throws IOException, AuthenticationException {
Map ret = null;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, operation.toString());
if (renewer != null) {
params.put(RENEWER_PARAM, renewer);
}
if (dToken != null) {
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
}
String urlStr = url.toExternalForm();
StringBuilder sb = new StringBuilder(urlStr);
String separator = (urlStr.contains("?")) ? "&" : "?";
for (Map.Entry<String, String> entry : params.entrySet()) {
sb.append(separator).append(entry.getKey()).append("=").
append(URLEncoder.encode(entry.getValue(), "UTF8"));
separator = "&";
}
url = new URL(sb.toString());
AuthenticatedURL aUrl = new AuthenticatedURL(this);
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(operation.getHttpMethod());
validateResponse(conn, HttpURLConnection.HTTP_OK);
if (hasResponse) {
String contentType = conn.getHeaderField(CONTENT_TYPE);
contentType = (contentType != null) ? contentType.toLowerCase()
: null;
if (contentType != null &&
contentType.contains(APPLICATION_JSON_MIME)) {
try {
ObjectMapper mapper = new ObjectMapper();
ret = mapper.readValue(conn.getInputStream(), Map.class);
} catch (Exception ex) {
throw new AuthenticationException(String.format(
"'%s' did not handle the '%s' delegation token operation: %s",
url.getAuthority(), operation, ex.getMessage()), ex);
}
} else {
throw new AuthenticationException(String.format("'%s' did not " +
"respond with JSON to the '%s' delegation token operation",
url.getAuthority(), operation));
}
}
return ret;
}
@SuppressWarnings("unchecked")
private static void validateResponse(HttpURLConnection conn, int expected)
throws IOException {
int status = conn.getResponseCode();
if (status != expected) {
try {
conn.getInputStream().close();
} catch (IOException ex) {
//NOP
}
String msg = String.format("HTTP status, expected [%d], got [%d]: %s",
expected, status, conn.getResponseMessage());
LOG.debug(msg);
throw new IOException(msg);
}
}
}

View File

@ -15,21 +15,24 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
/**
* HttpFS <code>DelegationTokenIdentifier</code> implementation.
* Concrete delegation token identifier used by {@link DelegationTokenManager},
* {@link KerberosDelegationTokenAuthenticationHandler} and
* {@link DelegationTokenAuthenticationFilter}.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
extends AbstractDelegationTokenIdentifier {
private Text kind = WebHdfsFileSystem.TOKEN_KIND;
private Text kind;
public DelegationTokenIdentifier(Text kind) {
this.kind = kind;
@ -50,8 +53,8 @@ public DelegationTokenIdentifier(Text kind, Text owner, Text renewer,
}
/**
* Returns the kind, <code>TOKEN_KIND</code>.
* @return returns <code>TOKEN_KIND</code>.
* Return the delegation token kind
* @return returns the delegation token kind
*/
@Override
public Text getKind() {

View File

@ -0,0 +1,153 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* Delegation Token Manager used by the
* {@link KerberosDelegationTokenAuthenticationHandler}.
*
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
class DelegationTokenManager {
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
public DelegationTokenSecretManager(Text tokenKind,
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
}
private AbstractDelegationTokenSecretManager secretManager = null;
private boolean managedSecretManager;
private Text tokenKind;
public DelegationTokenManager(Text tokenKind,
long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
this.secretManager = new DelegationTokenSecretManager(tokenKind,
delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
managedSecretManager = true;
}
/**
* Sets an external <code>DelegationTokenSecretManager</code> instance to
* manage creation and verification of Delegation Tokens.
* <p/>
* This is useful for use cases where secrets must be shared across multiple
* services.
*
* @param secretManager a <code>DelegationTokenSecretManager</code> instance
*/
public void setExternalDelegationTokenSecretManager(
AbstractDelegationTokenSecretManager secretManager) {
this.secretManager.stopThreads();
this.secretManager = secretManager;
this.tokenKind = secretManager.createIdentifier().getKind();
managedSecretManager = false;
}
public void init() {
if (managedSecretManager) {
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new RuntimeException("Could not start " +
secretManager.getClass() + ": " + ex.toString(), ex);
}
}
}
public void destroy() {
if (managedSecretManager) {
secretManager.stopThreads();
}
}
@SuppressWarnings("unchecked")
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer) {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier tokenIdentifier = new DelegationTokenIdentifier(
tokenKind, owner, new Text(renewer), realUser);
return new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
}
@SuppressWarnings("unchecked")
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws IOException {
return secretManager.renewToken(token, renewer);
}
@SuppressWarnings("unchecked")
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler) throws IOException {
canceler = (canceler != null) ? canceler :
verifyToken(token).getShortUserName();
secretManager.cancelToken(token, canceler);
}
@SuppressWarnings("unchecked")
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier>
token) throws IOException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
id.readFields(dis);
dis.close();
secretManager.verifyToken(id, token.getPassword());
return id.getUser();
}
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>kerberos.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>kerberos.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>kerberos.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>kerberos.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public KerberosDelegationTokenAuthenticationHandler() {
super(new KerberosAuthenticationHandler(KerberosAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}

View File

@ -0,0 +1,46 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
/**
* The <code>KerberosDelegationTokenAuthenticator</code> provides support for
* Kerberos SPNEGO authentication mechanism and support for Hadoop Delegation
* Token operations.
* <p/>
* It falls back to the {@link PseudoDelegationTokenAuthenticator} if the HTTP
* endpoint does not trigger a SPNEGO authentication
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class KerberosDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
public KerberosDelegationTokenAuthenticator() {
super(new KerberosAuthenticator() {
@Override
protected Authenticator getFallBackAuthenticator() {
return new PseudoDelegationTokenAuthenticator();
}
});
}
}

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
/**
* An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
* for HTTP and supports Delegation Token functionality.
* <p/>
* In addition to the {@link KerberosAuthenticationHandler} configuration
* properties, this handler supports:
* <ul>
* <li>simple.delegation-token.token-kind: the token kind for generated tokens
* (no default, required property).</li>
* <li>simple.delegation-token.update-interval.sec: secret manager master key
* update interval in seconds (default 1 day).</li>
* <li>simple.delegation-token.max-lifetime.sec: maximum life of a delegation
* token in seconds (default 7 days).</li>
* <li>simple.delegation-token.renewal-interval.sec: renewal interval for
* delegation tokens in seconds (default 1 day).</li>
* <li>simple.delegation-token.removal-scan-interval.sec: delegation tokens
* removal scan interval in seconds (default 1 hour).</li>
* </ul>
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public PseudoDelegationTokenAuthenticationHandler() {
super(new PseudoAuthenticationHandler(PseudoAuthenticationHandler.TYPE +
TYPE_POSTFIX));
}
}

View File

@ -15,33 +15,40 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.PseudoAuthenticator;
import java.io.IOException;
/**
* A <code>PseudoAuthenticator</code> subclass that uses FileSystemAccess's
* <code>UserGroupInformation</code> to obtain the client user name (the UGI's login user).
* The <code>PseudoDelegationTokenAuthenticator</code> provides support for
* Hadoop's pseudo authentication mechanism that accepts
* the user name specified as a query string parameter and support for Hadoop
* Delegation Token operations.
* <p/>
* This mimics the model of Hadoop Simple authentication trusting the
* {@link UserGroupInformation#getCurrentUser()} value.
*/
@InterfaceAudience.Private
public class HttpFSPseudoAuthenticator extends PseudoAuthenticator {
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class PseudoDelegationTokenAuthenticator
extends DelegationTokenAuthenticator {
/**
* Return the client user name.
*
* @return the client user name.
*/
@Override
protected String getUserName() {
try {
return UserGroupInformation.getLoginUser().getUserName();
} catch (IOException ex) {
throw new SecurityException("Could not obtain current user, " + ex.getMessage(), ex);
}
public PseudoDelegationTokenAuthenticator() {
super(new PseudoAuthenticator() {
@Override
protected String getUserName() {
try {
return UserGroupInformation.getCurrentUser().getShortUserName();
} catch (IOException ex) {
throw new RuntimeException(ex);
}
}
});
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
/**
* Servlet utility methods.
*/
@InterfaceAudience.Private
class ServletUtils {
private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
/**
* Extract a query string parameter without triggering http parameters
* processing by the servlet container.
*
* @param request the request
* @param name the parameter to get the value.
* @return the parameter value, or <code>NULL</code> if the parameter is not
* defined.
* @throws IOException thrown if there was an error parsing the query string.
*/
public static String getParameter(HttpServletRequest request, String name)
throws IOException {
List<NameValuePair> list = URLEncodedUtils.parse(request.getQueryString(),
UTF8_CHARSET);
if (list != null) {
for (NameValuePair nv : list) {
if (name.equals(nv.getName())) {
return nv.getValue();
}
}
}
return null;
}
}

View File

@ -114,57 +114,18 @@ User Commands
* <<<fs>>>
Usage: <<<hadoop fs [GENERIC_OPTIONS] [COMMAND_OPTIONS]>>>
Deprecated, use <<<hdfs dfs>>> instead.
Runs a generic filesystem user client.
The various COMMAND_OPTIONS can be found at File System Shell Guide.
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#dfs}<<<hdfs dfs>>>}}
instead.
* <<<fsck>>>
Runs a HDFS filesystem checking utility.
See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
*------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
*------------------+---------------------------------------------+
| <path> | Start checking from this path.
*------------------+---------------------------------------------+
| -move | Move corrupted files to /lost+found
*------------------+---------------------------------------------+
| -delete | Delete corrupted files.
*------------------+---------------------------------------------+
| -openforwrite | Print out files opened for write.
*------------------+---------------------------------------------+
| -files | Print out files being checked.
*------------------+---------------------------------------------+
| -blocks | Print out block report.
*------------------+---------------------------------------------+
| -locations | Print out locations for every block.
*------------------+---------------------------------------------+
| -racks | Print out network topology for data-node locations.
*------------------+---------------------------------------------+
| -showprogress | Print out show progress in output. Default is OFF (no progress).
*------------------+---------------------------------------------+
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#fsck}<<<hdfs fsck>>>}}
instead.
* <<<fetchdt>>>
Gets Delegation Token from a NameNode.
See {{{../hadoop-hdfs/HdfsUserGuide.html#fetchdt}fetchdt}} for more info.
Usage: <<<hadoop fetchdt [GENERIC_OPTIONS] [--webservice <namenode_http_addr>] <path> >>>
*------------------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
*------------------------------+---------------------------------------------+
| <fileName> | File name to store the token into.
*------------------------------+---------------------------------------------+
| --webservice <https_address> | use http protocol instead of RPC
*------------------------------+---------------------------------------------+
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#fetchdt}
<<<hdfs fetchdt>>>}} instead.
* <<<jar>>>
@ -321,23 +282,8 @@ Administration Commands
* <<<balancer>>>
Runs a cluster balancing utility. An administrator can simply press Ctrl-C
to stop the rebalancing process. See
{{{../hadoop-hdfs/HdfsUserGuide.html#Balancer}Balancer}} for more details.
Usage: <<<hadoop balancer [-threshold <threshold>] [-policy <policy>]>>>
*------------------------+-----------------------------------------------------------+
|| COMMAND_OPTION | Description
*------------------------+-----------------------------------------------------------+
| -threshold <threshold> | Percentage of disk capacity. This overwrites the
| default threshold.
*------------------------+-----------------------------------------------------------+
| -policy <policy> | <<<datanode>>> (default): Cluster is balanced if each datanode is balanced. \
| <<<blockpool>>>: Cluster is balanced if each block pool in each datanode is balanced.
*------------------------+-----------------------------------------------------------+
Note that the <<<blockpool>>> policy is more strict than the <<<datanode>>> policy.
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#balancer}
<<<hdfs balancer>>>}} instead.
* <<<daemonlog>>>
@ -360,84 +306,13 @@ Administration Commands
* <<<datanode>>>
Runs a HDFS datanode.
Usage: <<<hadoop datanode [-rollback]>>>
*-----------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-----------------+-----------------------------------------------------------+
| -rollback | Rollsback the datanode to the previous version. This should
| be used after stopping the datanode and distributing the old
| hadoop version.
*-----------------+-----------------------------------------------------------+
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#datanode}
<<<hdfs datanode>>>}} instead.
* <<<dfsadmin>>>
Runs a HDFS dfsadmin client.
Usage: <<<hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] [-restoreFailedStorage true|false|check] [-help [cmd]]>>>
*-----------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-----------------+-----------------------------------------------------------+
| -report | Reports basic filesystem information and statistics.
*-----------------+-----------------------------------------------------------+
| -safemode enter / leave / get / wait | Safe mode maintenance command. Safe
| mode is a Namenode state in which it \
| 1. does not accept changes to the name space (read-only) \
| 2. does not replicate or delete blocks. \
| Safe mode is entered automatically at Namenode startup, and
| leaves safe mode automatically when the configured minimum
| percentage of blocks satisfies the minimum replication
| condition. Safe mode can also be entered manually, but then
| it can only be turned off manually as well.
*-----------------+-----------------------------------------------------------+
| -refreshNodes | Re-read the hosts and exclude files to update the set of
| Datanodes that are allowed to connect to the Namenode and
| those that should be decommissioned or recommissioned.
*-----------------+-----------------------------------------------------------+
| -finalizeUpgrade| Finalize upgrade of HDFS. Datanodes delete their previous
| version working directories, followed by Namenode doing the
| same. This completes the upgrade process.
*-----------------+-----------------------------------------------------------+
| -upgradeProgress status / details / force | Request current distributed
| upgrade status, a detailed status or force the upgrade to
| proceed.
*-----------------+-----------------------------------------------------------+
| -metasave filename | Save Namenode's primary data structures to <filename> in
| the directory specified by hadoop.log.dir property.
| <filename> is overwritten if it exists.
| <filename> will contain one line for each of the following\
| 1. Datanodes heart beating with Namenode\
| 2. Blocks waiting to be replicated\
| 3. Blocks currrently being replicated\
| 4. Blocks waiting to be deleted\
*-----------------+-----------------------------------------------------------+
| -setQuota <quota> <dirname>...<dirname> | Set the quota <quota> for each
| directory <dirname>. The directory quota is a long integer
| that puts a hard limit on the number of names in the
| directory tree. Best effort for the directory, with faults
| reported if \
| 1. N is not a positive integer, or \
| 2. user is not an administrator, or \
| 3. the directory does not exist or is a file, or \
| 4. the directory would immediately exceed the new quota. \
*-----------------+-----------------------------------------------------------+
| -clrQuota <dirname>...<dirname> | Clear the quota for each directory
| <dirname>. Best effort for the directory. with fault
| reported if \
| 1. the directory does not exist or is a file, or \
| 2. user is not an administrator. It does not fault if the
| directory has no quota.
*-----------------+-----------------------------------------------------------+
| -restoreFailedStorage true / false / check | This option will turn on/off automatic attempt to restore failed storage replicas.
| If a failed storage becomes available again the system will attempt to restore
| edits and/or fsimage during checkpoint. 'check' option will return current setting.
*-----------------+-----------------------------------------------------------+
| -help [cmd] | Displays help for the given command or all commands if none
| is specified.
*-----------------+-----------------------------------------------------------+
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#dfsadmin}
<<<hdfs dfsadmin>>>}} instead.
* <<<mradmin>>>
@ -470,51 +345,13 @@ Administration Commands
* <<<namenode>>>
Runs the namenode. More info about the upgrade, rollback and finalize is
at {{{../hadoop-hdfs/HdfsUserGuide.html#Upgrade_and_Rollback}Upgrade Rollback}}.
Usage: <<<hadoop namenode [-format] | [-upgrade] | [-rollback] | [-finalize] | [-importCheckpoint]>>>
*--------------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*--------------------+-----------------------------------------------------------+
| -format | Formats the namenode. It starts the namenode, formats
| it and then shut it down.
*--------------------+-----------------------------------------------------------+
| -upgrade | Namenode should be started with upgrade option after
| the distribution of new hadoop version.
*--------------------+-----------------------------------------------------------+
| -rollback | Rollsback the namenode to the previous version. This
| should be used after stopping the cluster and
| distributing the old hadoop version.
*--------------------+-----------------------------------------------------------+
| -finalize | Finalize will remove the previous state of the files
| system. Recent upgrade will become permanent. Rollback
| option will not be available anymore. After finalization
| it shuts the namenode down.
*--------------------+-----------------------------------------------------------+
| -importCheckpoint | Loads image from a checkpoint directory and save it
| into the current one. Checkpoint dir is read from
| property fs.checkpoint.dir
*--------------------+-----------------------------------------------------------+
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#namenode}
<<<hdfs namenode>>>}} instead.
* <<<secondarynamenode>>>
Runs the HDFS secondary namenode.
See {{{../hadoop-hdfs/HdfsUserGuide.html#Secondary_NameNode}Secondary Namenode}}
for more info.
Usage: <<<hadoop secondarynamenode [-checkpoint [force]] | [-geteditsize]>>>
*----------------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*----------------------+-----------------------------------------------------------+
| -checkpoint [-force] | Checkpoints the Secondary namenode if EditLog size
| >= fs.checkpoint.size. If <<<-force>>> is used,
| checkpoint irrespective of EditLog size.
*----------------------+-----------------------------------------------------------+
| -geteditsize | Prints the EditLog size.
*----------------------+-----------------------------------------------------------+
Deprecated, use {{{../hadoop-hdfs/HDFSCommands.html#secondarynamenode}
<<<hdfs secondarynamenode>>>}} instead.
* <<<tasktracker>>>

View File

@ -26,13 +26,11 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.AvroTestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import com.google.common.base.Joiner;
import junit.framework.TestCase;
import static org.junit.Assert.fail;
public class TestPath extends TestCase {
/**
@ -307,28 +305,6 @@ public void testURI() throws URISyntaxException, IOException {
// if the child uri is absolute path
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
// empty URI
URI uri3 = new URI("");
assertEquals("", uri3.toString());
try {
path = new Path(uri3);
fail("Expected exception for empty URI");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("Can not create a Path"
+ " from an empty URI", e);
}
// null URI
uri3 = null;
try {
path = new Path(uri3);
fail("Expected exception for null URI");
} catch (IllegalArgumentException e) {
// expect to receive an IllegalArgumentException
GenericTestUtils.assertExceptionContains("Can not create a Path"
+ " from a null URI", e);
}
}
/** Test URIs created from Path objects */

View File

@ -0,0 +1,326 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Map;
import java.util.Properties;
public class TestDelegationTokenAuthenticationHandlerWithMocks {
public static class MockDelegationTokenAuthenticationHandler
extends DelegationTokenAuthenticationHandler {
public MockDelegationTokenAuthenticationHandler() {
super(new AuthenticationHandler() {
@Override
public String getType() {
return "T";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "mock");
return null;
}
});
}
}
private DelegationTokenAuthenticationHandler handler;
@Before
public void setUp() throws Exception {
Properties conf = new Properties();
conf.put(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND, "foo");
handler = new MockDelegationTokenAuthenticationHandler();
handler.initTokenManager(conf);
}
@After
public void cleanUp() {
handler.destroy();
}
@Test
public void testManagementOperations() throws Exception {
testNonManagementOperation();
testManagementOperationErrors();
testGetToken(null, new Text("foo"));
testGetToken("bar", new Text("foo"));
testCancelToken();
testRenewToken();
}
private void testNonManagementOperation() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(
DelegationTokenAuthenticator.OP_PARAM)).thenReturn("CREATE");
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" +
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.toString()
);
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).thenReturn(
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN.getHttpMethod()
);
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(
Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock"));
}
private void testGetToken(String renewer, Text expectedTokenKind)
throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Mockito.when(response.getWriter()).thenReturn(new PrintWriter(
new StringWriter()));
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.RENEWER_PARAM + "=" + renewer);
Mockito.reset(response);
Mockito.reset(token);
Mockito.when(token.getUserName()).thenReturn("user");
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = DelegationTokenAuthenticator.
DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
ObjectMapper jsonMapper = new ObjectMapper();
Map json = jsonMapper.readValue(responseOutput, Map.class);
json = (Map) json.get(tokenLabel);
String tokenStr;
tokenStr = (String) json.get(DelegationTokenAuthenticator.
DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
handler.getTokenManager().verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
private void testCancelToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() + "&" +
DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
token.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
handler.getTokenManager().verifyToken(token);
Assert.fail();
} catch (SecretManager.InvalidToken ex) {
//NOP
} catch (Throwable ex) {
Assert.fail();
}
}
private void testRenewToken() throws Exception {
DelegationTokenAuthenticator.DelegationTokenOperation op =
DelegationTokenAuthenticator.DelegationTokenOperation.
RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED));
Mockito.verify(response).setHeader(Mockito.eq(
KerberosAuthenticator.WWW_AUTHENTICATE),
Mockito.eq("mock")
);
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).
thenReturn(DelegationTokenAuthenticator.OP_PARAM + "=" + op.toString() +
"&" + DelegationTokenAuthenticator.TOKEN_PARAM + "=" +
dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
handler.getTokenManager().verifyToken(dToken);
}
@Test
public void testAuthenticate() throws Exception {
testValidDelegationToken();
testInvalidDelegationToken();
}
private void testValidDelegationToken() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
handler.getTokenManager().createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=" +
dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().
getShortUserName(), token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(handler.getType(),
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken() throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getQueryString()).thenReturn(
DelegationTokenAuthenticator.DELEGATION_PARAM + "=invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
public class TestDelegationTokenManager {
private static final long DAY_IN_SECS = 86400;
@Test
public void testDTManager() throws Exception {
DelegationTokenManager tm = new DelegationTokenManager(new Text("foo"),
DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS, DAY_IN_SECS);
tm.init();
Token<DelegationTokenIdentifier> token =
tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (IOException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
tm.destroy();
}
}

View File

@ -0,0 +1,727 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.security.token.delegation.web;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.jetty.Connector;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.servlet.Context;
import org.mortbay.jetty.servlet.FilterHolder;
import org.mortbay.jetty.servlet.ServletHolder;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.servlet.Filter;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.ServerSocket;
import java.net.URL;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
public class TestWebDelegationToken {
private Server jetty;
public static class DummyAuthenticationHandler
implements AuthenticationHandler {
@Override
public String getType() {
return "dummy";
}
@Override
public void init(Properties config) throws ServletException {
}
@Override
public void destroy() {
}
@Override
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
return false;
}
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
if (request.getParameter("authenticated") != null) {
token = new AuthenticationToken(request.getParameter("authenticated"),
"U", "test");
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, "dummy");
}
return token;
}
}
public static class DummyDelegationTokenAuthenticationHandler extends
DelegationTokenAuthenticationHandler {
public DummyDelegationTokenAuthenticationHandler() {
super(new DummyAuthenticationHandler());
}
@Override
public void init(Properties config) throws ServletException {
Properties conf = new Properties(config);
conf.setProperty(TOKEN_KIND, "token-kind");
initTokenManager(conf);
}
}
public static class AFilter extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
DummyDelegationTokenAuthenticationHandler.class.getName());
return conf;
}
}
public static class PingServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write("ping");
}
@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
Writer writer = resp.getWriter();
writer.write("ping: ");
IOUtils.copy(req.getReader(), writer);
resp.setStatus(HttpServletResponse.SC_OK);
}
}
protected Server createJettyServer() {
try {
InetAddress localhost = InetAddress.getLocalHost();
ServerSocket ss = new ServerSocket(0, 50, localhost);
int port = ss.getLocalPort();
ss.close();
jetty = new Server(0);
jetty.getConnectors()[0].setHost("localhost");
jetty.getConnectors()[0].setPort(port);
return jetty;
} catch (Exception ex) {
throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
ex);
}
}
protected String getJettyURL() {
Connector c = jetty.getConnectors()[0];
return "http://" + c.getHost() + ":" + c.getPort();
}
@Before
public void setUp() throws Exception {
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
jetty = createJettyServer();
}
@After
public void cleanUp() throws Exception {
jetty.stop();
// resetting hadoop security to simple
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
UserGroupInformation.setConfiguration(conf);
}
protected Server getJetty() {
return jetty;
}
@Test
public void testRawHttpCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
// unauthenticated access to URL
HttpURLConnection conn = (HttpURLConnection) nonAuthURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to URL
conn = (HttpURLConnection) authURL.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// unauthenticated access to get delegation token
URL url = new URL(nonAuthURL.toExternalForm() + "?op=GETDELEGATIONTOKEN");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// authenticated access to get delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
ObjectMapper mapper = new ObjectMapper();
Map map = mapper.readValue(conn.getInputStream(), Map.class);
String dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// delegation token access to URL
url = new URL(nonAuthURL.toExternalForm() + "?delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// delegation token and authenticated access to URL
url = new URL(authURL.toExternalForm() + "&delegation=" + dt);
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, unauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED,
conn.getResponseCode());
// renewew delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// renewew delegation token, authenticated access to URL, not renewer
url = new URL(getJettyURL() +
"/foo/bar?authenticated=bar&op=RENEWDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
conn.getResponseCode());
// cancel delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
// cancel canceled delegation token, nonauthenticated access to URL
url = new URL(nonAuthURL.toExternalForm() +
"?op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
conn.getResponseCode());
// get new delegation token
url = new URL(authURL.toExternalForm() +
"&op=GETDELEGATIONTOKEN&renewer=foo");
conn = (HttpURLConnection) url.openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
mapper = new ObjectMapper();
map = mapper.readValue(conn.getInputStream(), Map.class);
dt = (String) ((Map) map.get("Token")).get("urlString");
Assert.assertNotNull(dt);
// cancel delegation token, authenticated access to URL
url = new URL(authURL.toExternalForm() +
"&op=CANCELDELEGATIONTOKEN&token=" + dt);
conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod("PUT");
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
} finally {
jetty.stop();
}
}
@Test
public void testDelegationTokenAuthenticatorCalls() throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
jetty.start();
URL nonAuthURL = new URL(getJettyURL() + "/foo/bar");
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
URL authURL2 = new URL(getJettyURL() + "/foo/bar?authenticated=bar");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
try {
aUrl.getDelegationToken(nonAuthURL, token, "foo");
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, "foo");
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
aUrl.renewDelegationToken(authURL, token);
try {
aUrl.renewDelegationToken(nonAuthURL, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
aUrl.getDelegationToken(authURL, token, "foo");
try {
aUrl.renewDelegationToken(authURL2, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(authURL, token, "foo");
aUrl.cancelDelegationToken(authURL, token);
aUrl.getDelegationToken(authURL, token, "foo");
aUrl.cancelDelegationToken(nonAuthURL, token);
aUrl.getDelegationToken(authURL, token, "foo");
try {
aUrl.renewDelegationToken(nonAuthURL, token);
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("401"));
}
} finally {
jetty.stop();
}
}
private static class DummyDelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
public DummyDelegationTokenSecretManager() {
super(10000, 10000, 10000, 10000);
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(new Text("fooKind"));
}
}
@Test
public void testExternalDelegationTokenSecretManager() throws Exception {
DummyDelegationTokenSecretManager secretMgr
= new DummyDelegationTokenSecretManager();
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(AFilter.class), "/*", 0);
context.addServlet(new ServletHolder(PingServlet.class), "/bar");
try {
secretMgr.startThreads();
context.setAttribute(DelegationTokenAuthenticationFilter.
DELEGATION_TOKEN_SECRET_MANAGER_ATTR, secretMgr);
jetty.start();
URL authURL = new URL(getJettyURL() + "/foo/bar?authenticated=foo");
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
aUrl.getDelegationToken(authURL, token, "foo");
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("fooKind"),
token.getDelegationToken().getKind());
} finally {
jetty.stop();
secretMgr.stopThreads();
}
}
public static class NoDTFilter extends AuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class NoDTHandlerDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE, PseudoAuthenticationHandler.TYPE);
return conf;
}
}
public static class UserServlet extends HttpServlet {
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
resp.setStatus(HttpServletResponse.SC_OK);
resp.getWriter().write(req.getUserPrincipal().getName());
}
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTFilter()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTFilter.class);
}
@Test
public void testDelegationTokenAuthenticationURLWithNoDTHandler()
throws Exception {
testDelegationTokenAuthenticatedURLWithNoDT(NoDTHandlerDTAFilter.class);
}
// we are, also, implicitly testing KerberosDelegationTokenAuthenticator
// fallback here
private void testDelegationTokenAuthenticatedURLWithNoDT(
Class<? extends Filter> filterClass) throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(filterClass), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("foo", ret.get(0));
try {
aUrl.getDelegationToken(url, token, "foo");
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains(
"delegation token operation"));
}
return null;
}
});
} finally {
jetty.stop();
}
}
public static class PseudoDTAFilter
extends DelegationTokenAuthenticationFilter {
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
PseudoDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(DelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
}
@Test
public void testFallbackToPseudoDelegationTokenAuthenticator()
throws Exception {
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(PseudoDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
jetty.start();
final URL url = new URL(getJettyURL() + "/foo/bar");
UserGroupInformation ugi = UserGroupInformation.createRemoteUser("foo");
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
HttpURLConnection conn = aUrl.openConnection(url, token);
Assert.assertEquals(HttpURLConnection.HTTP_OK,
conn.getResponseCode());
List<String> ret = IOUtils.readLines(conn.getInputStream());
Assert.assertEquals(1, ret.size());
Assert.assertEquals("foo", ret.get(0));
aUrl.getDelegationToken(url, token, "foo");
Assert.assertNotNull(token.getDelegationToken());
Assert.assertEquals(new Text("token-kind"),
token.getDelegationToken().getKind());
return null;
}
});
} finally {
jetty.stop();
}
}
public static class KDTAFilter extends DelegationTokenAuthenticationFilter {
static String keytabFile;
@Override
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) {
Properties conf = new Properties();
conf.setProperty(AUTH_TYPE,
KerberosDelegationTokenAuthenticationHandler.class.getName());
conf.setProperty(KerberosAuthenticationHandler.KEYTAB, keytabFile);
conf.setProperty(KerberosAuthenticationHandler.PRINCIPAL,
"HTTP/localhost");
conf.setProperty(KerberosDelegationTokenAuthenticationHandler.TOKEN_KIND,
"token-kind");
return conf;
}
}
private static class KerberosConfiguration extends Configuration {
private String principal;
private String keytab;
public KerberosConfiguration(String principal, String keytab) {
this.principal = principal;
this.keytab = keytab;
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("principal", principal);
options.put("keyTab", keytab);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", "true");
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
}
}
public static <T> T doAsKerberosUser(String principal, String keytab,
final Callable<T> callable) throws Exception {
LoginContext loginContext = null;
try {
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet<Object>(),
new HashSet<Object>());
loginContext = new LoginContext("", subject, null,
new KerberosConfiguration(principal, keytab));
loginContext.login();
subject = loginContext.getSubject();
return Subject.doAs(subject, new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
} finally {
if (loginContext != null) {
loginContext.logout();
}
}
}
@Test
public void testKerberosDelegationTokenAuthenticator() throws Exception {
// setting hadoop security to kerberos
org.apache.hadoop.conf.Configuration conf =
new org.apache.hadoop.conf.Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File testDir = new File("target/" + UUID.randomUUID().toString());
Assert.assertTrue(testDir.mkdirs());
MiniKdc kdc = new MiniKdc(MiniKdc.createConf(), testDir);
final Server jetty = createJettyServer();
Context context = new Context();
context.setContextPath("/foo");
jetty.setHandler(context);
context.addFilter(new FilterHolder(KDTAFilter.class), "/*", 0);
context.addServlet(new ServletHolder(UserServlet.class), "/bar");
try {
kdc.start();
File keytabFile = new File(testDir, "test.keytab");
kdc.createPrincipal(keytabFile, "client", "HTTP/localhost");
KDTAFilter.keytabFile = keytabFile.getAbsolutePath();
jetty.start();
final DelegationTokenAuthenticatedURL.Token token =
new DelegationTokenAuthenticatedURL.Token();
final DelegationTokenAuthenticatedURL aUrl =
new DelegationTokenAuthenticatedURL();
final URL url = new URL(getJettyURL() + "/foo/bar");
try {
aUrl.getDelegationToken(url, token, "foo");
Assert.fail();
} catch (AuthenticationException ex) {
Assert.assertTrue(ex.getMessage().contains("GSSException"));
}
doAsKerberosUser("client", keytabFile.getAbsolutePath(),
new Callable<Void>() {
@Override
public Void call() throws Exception {
aUrl.getDelegationToken(url, token, "client");
Assert.assertNotNull(token.getDelegationToken());
aUrl.renewDelegationToken(url, token);
Assert.assertNotNull(token.getDelegationToken());
aUrl.getDelegationToken(url, token, "foo");
Assert.assertNotNull(token.getDelegationToken());
try {
aUrl.renewDelegationToken(url, token);
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage().contains("403"));
}
aUrl.getDelegationToken(url, token, "foo");
aUrl.cancelDelegationToken(url, token);
Assert.assertNull(token.getDelegationToken());
return null;
}
});
} finally {
jetty.stop();
kdc.stop();
}
}
}

View File

@ -39,12 +39,14 @@
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.lib.wsrs.EnumSetParam;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticatedURL;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticator;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
@ -67,7 +69,6 @@
import java.io.InputStream;
import java.io.OutputStream;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@ -75,7 +76,6 @@
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
/**
* HttpFSServer implementation of the FileSystemAccess FileSystem.
@ -217,34 +217,15 @@ public String getMethod() {
}
private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private DelegationTokenAuthenticatedURL authURL;
private DelegationTokenAuthenticatedURL.Token authToken =
new DelegationTokenAuthenticatedURL.Token();
private URI uri;
private InetSocketAddress httpFSAddr;
private Path workingDir;
private UserGroupInformation realUser;
private String doAs;
private Token<?> delegationToken;
//This method enables handling UGI doAs with SPNEGO, we have to
//fallback to the realuser who logged in with Kerberos credentials
private <T> T doAsRealUserIfNecessary(final Callable<T> callable)
throws IOException {
try {
if (realUser.getShortUserName().equals(doAs)) {
return callable.call();
} else {
return realUser.doAs(new PrivilegedExceptionAction<T>() {
@Override
public T run() throws Exception {
return callable.call();
}
});
}
} catch (Exception ex) {
throw new IOException(ex.toString(), ex);
}
}
/**
* Convenience method that creates a <code>HttpURLConnection</code> for the
@ -291,20 +272,26 @@ private HttpURLConnection getConnection(final String method,
private HttpURLConnection getConnection(final String method,
Map<String, String> params, Map<String, List<String>> multiValuedParams,
Path path, boolean makeQualified) throws IOException {
if (!realUser.getShortUserName().equals(doAs)) {
params.put(DO_AS_PARAM, doAs);
}
HttpFSKerberosAuthenticator.injectDelegationToken(params, delegationToken);
if (makeQualified) {
path = makeQualified(path);
}
final URL url = HttpFSUtils.createURL(path, params, multiValuedParams);
return doAsRealUserIfNecessary(new Callable<HttpURLConnection>() {
@Override
public HttpURLConnection call() throws Exception {
return getConnection(url, method);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<HttpURLConnection>() {
@Override
public HttpURLConnection run() throws Exception {
return getConnection(url, method);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
});
}
}
/**
@ -321,12 +308,8 @@ public HttpURLConnection call() throws Exception {
* @throws IOException thrown if an IO error occurrs.
*/
private HttpURLConnection getConnection(URL url, String method) throws IOException {
Class<? extends Authenticator> klass =
getConf().getClass("httpfs.authenticator.class",
HttpFSKerberosAuthenticator.class, Authenticator.class);
Authenticator authenticator = ReflectionUtils.newInstance(klass, getConf());
try {
HttpURLConnection conn = new AuthenticatedURL(authenticator).openConnection(url, authToken);
HttpURLConnection conn = authURL.openConnection(url, authToken);
conn.setRequestMethod(method);
if (method.equals(HTTP_POST) || method.equals(HTTP_PUT)) {
conn.setDoOutput(true);
@ -357,10 +340,17 @@ public void initialize(URI name, Configuration conf) throws IOException {
super.initialize(name, conf);
try {
uri = new URI(name.getScheme() + "://" + name.getAuthority());
httpFSAddr = NetUtils.createSocketAddr(getCanonicalUri().toString());
} catch (URISyntaxException ex) {
throw new IOException(ex);
}
Class<? extends DelegationTokenAuthenticator> klass =
getConf().getClass("httpfs.authenticator.class",
KerberosDelegationTokenAuthenticator.class,
DelegationTokenAuthenticator.class);
DelegationTokenAuthenticator authenticator =
ReflectionUtils.newInstance(klass, getConf());
authURL = new DelegationTokenAuthenticatedURL(authenticator);
}
@Override
@ -1059,38 +1049,57 @@ public void readFields(DataInput in) throws IOException {
@Override
public Token<?> getDelegationToken(final String renewer)
throws IOException {
return doAsRealUserIfNecessary(new Callable<Token<?>>() {
@Override
public Token<?> call() throws Exception {
return HttpFSKerberosAuthenticator.
getDelegationToken(uri, httpFSAddr, authToken, renewer);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Token<?>>() {
@Override
public Token<?> run() throws Exception {
return authURL.getDelegationToken(uri.toURL(), authToken,
renewer);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
});
}
}
public long renewDelegationToken(final Token<?> token) throws IOException {
return doAsRealUserIfNecessary(new Callable<Long>() {
@Override
public Long call() throws Exception {
return HttpFSKerberosAuthenticator.
renewDelegationToken(uri, authToken, token);
try {
return UserGroupInformation.getCurrentUser().doAs(
new PrivilegedExceptionAction<Long>() {
@Override
public Long run() throws Exception {
return authURL.renewDelegationToken(uri.toURL(), authToken);
}
}
);
} catch (Exception ex) {
if (ex instanceof IOException) {
throw (IOException) ex;
} else {
throw new IOException(ex);
}
});
}
}
public void cancelDelegationToken(final Token<?> token) throws IOException {
HttpFSKerberosAuthenticator.
cancelDelegationToken(uri, authToken, token);
authURL.cancelDelegationToken(uri.toURL(), authToken);
}
@Override
public Token<?> getRenewToken() {
return delegationToken;
return null; //TODO : for renewer
}
@Override
@SuppressWarnings("unchecked")
public <T extends TokenIdentifier> void setDelegationToken(Token<T> token) {
delegationToken = token;
//TODO : for renewer
}
@Override

View File

@ -1,188 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.client;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.client.Authenticator;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
import org.json.simple.JSONObject;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.util.HashMap;
import java.util.Map;
/**
* A <code>KerberosAuthenticator</code> subclass that fallback to
* {@link HttpFSPseudoAuthenticator}.
*/
@InterfaceAudience.Private
public class HttpFSKerberosAuthenticator extends KerberosAuthenticator {
/**
* Returns the fallback authenticator if the server does not use
* Kerberos SPNEGO HTTP authentication.
*
* @return a {@link HttpFSPseudoAuthenticator} instance.
*/
@Override
protected Authenticator getFallBackAuthenticator() {
return new HttpFSPseudoAuthenticator();
}
private static final String HTTP_GET = "GET";
private static final String HTTP_PUT = "PUT";
public static final String DELEGATION_PARAM = "delegation";
public static final String TOKEN_PARAM = "token";
public static final String RENEWER_PARAM = "renewer";
public static final String DELEGATION_TOKEN_JSON = "Token";
public static final String DELEGATION_TOKEN_URL_STRING_JSON = "urlString";
public static final String RENEW_DELEGATION_TOKEN_JSON = "long";
/**
* DelegationToken operations.
*/
@InterfaceAudience.Private
public static enum DelegationTokenOperation {
GETDELEGATIONTOKEN(HTTP_GET, true),
RENEWDELEGATIONTOKEN(HTTP_PUT, true),
CANCELDELEGATIONTOKEN(HTTP_PUT, false);
private String httpMethod;
private boolean requiresKerberosCredentials;
private DelegationTokenOperation(String httpMethod,
boolean requiresKerberosCredentials) {
this.httpMethod = httpMethod;
this.requiresKerberosCredentials = requiresKerberosCredentials;
}
public String getHttpMethod() {
return httpMethod;
}
public boolean requiresKerberosCredentials() {
return requiresKerberosCredentials;
}
}
public static void injectDelegationToken(Map<String, String> params,
Token<?> dtToken)
throws IOException {
if (dtToken != null) {
params.put(DELEGATION_PARAM, dtToken.encodeToUrlString());
}
}
private boolean hasDelegationToken(URL url) {
return url.getQuery().contains(DELEGATION_PARAM + "=");
}
@Override
public void authenticate(URL url, AuthenticatedURL.Token token)
throws IOException, AuthenticationException {
if (!hasDelegationToken(url)) {
super.authenticate(url, token);
}
}
public static final String OP_PARAM = "op";
public static Token<?> getDelegationToken(URI fsURI,
InetSocketAddress httpFSAddr, AuthenticatedURL.Token token,
String renewer) throws IOException {
DelegationTokenOperation op =
DelegationTokenOperation.GETDELEGATIONTOKEN;
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM, op.toString());
params.put(RENEWER_PARAM,renewer);
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(op.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
String tokenStr = (String)
json.get(DELEGATION_TOKEN_URL_STRING_JSON);
Token<AbstractDelegationTokenIdentifier> dToken =
new Token<AbstractDelegationTokenIdentifier>();
dToken.decodeFromUrlString(tokenStr);
SecurityUtil.setTokenService(dToken, httpFSAddr);
return dToken;
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
public static long renewDelegationToken(URI fsURI,
AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM,
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(
DelegationTokenOperation.RENEWDELEGATIONTOKEN.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(DELEGATION_TOKEN_JSON);
return (Long)(json.get(RENEW_DELEGATION_TOKEN_JSON));
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
public static void cancelDelegationToken(URI fsURI,
AuthenticatedURL.Token token, Token<?> dToken) throws IOException {
Map<String, String> params = new HashMap<String, String>();
params.put(OP_PARAM,
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
params.put(TOKEN_PARAM, dToken.encodeToUrlString());
URL url = HttpFSUtils.createURL(new Path(fsURI), params);
AuthenticatedURL aUrl =
new AuthenticatedURL(new HttpFSKerberosAuthenticator());
try {
HttpURLConnection conn = aUrl.openConnection(url, token);
conn.setRequestMethod(
DelegationTokenOperation.CANCELDELEGATIONTOKEN.getHttpMethod());
HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
} catch (AuthenticationException ex) {
throw new IOException(ex.toString(), ex);
}
}
}

View File

@ -20,7 +20,10 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticationFilter;
import javax.servlet.FilterConfig;
import javax.servlet.ServletException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
@ -32,7 +35,9 @@
* from HttpFSServer's server configuration.
*/
@InterfaceAudience.Private
public class HttpFSAuthenticationFilter extends AuthenticationFilter {
public class HttpFSAuthenticationFilter
extends DelegationTokenAuthenticationFilter {
private static final String CONF_PREFIX = "httpfs.authentication.";
private static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
@ -50,7 +55,8 @@ public class HttpFSAuthenticationFilter extends AuthenticationFilter {
* @return hadoop-auth configuration read from HttpFSServer's configuration.
*/
@Override
protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) {
protected Properties getConfiguration(String configPrefix,
FilterConfig filterConfig) throws ServletException{
Properties props = new Properties();
Configuration conf = HttpFSServerWebApp.get().getConfig();
@ -64,11 +70,6 @@ protected Properties getConfiguration(String configPrefix, FilterConfig filterCo
}
}
if (props.getProperty(AUTH_TYPE).equals("kerberos")) {
props.setProperty(AUTH_TYPE,
HttpFSKerberosAuthenticationHandler.class.getName());
}
String signatureSecretFile = props.getProperty(SIGNATURE_SECRET_FILE, null);
if (signatureSecretFile == null) {
throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);

View File

@ -1,230 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
import org.apache.hadoop.security.token.Token;
import org.json.simple.JSONObject;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.IOException;
import java.io.Writer;
import java.text.MessageFormat;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.Set;
/**
* Server side <code>AuthenticationHandler</code> that authenticates requests
* using the incoming delegation token as a 'delegation' query string parameter.
* <p/>
* If not delegation token is present in the request it delegates to the
* {@link KerberosAuthenticationHandler}
*/
@InterfaceAudience.Private
public class HttpFSKerberosAuthenticationHandler
extends KerberosAuthenticationHandler {
static final Set<String> DELEGATION_TOKEN_OPS =
new HashSet<String>();
static {
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.RENEWDELEGATIONTOKEN.toString());
DELEGATION_TOKEN_OPS.add(
DelegationTokenOperation.CANCELDELEGATIONTOKEN.toString());
}
public static final String TYPE = "kerberos-dt";
/**
* Returns authentication type of the handler.
*
* @return <code>delegationtoken-kerberos</code>
*/
@Override
public String getType() {
return TYPE;
}
private static final String ENTER = System.getProperty("line.separator");
@Override
@SuppressWarnings("unchecked")
public boolean managementOperation(AuthenticationToken token,
HttpServletRequest request, HttpServletResponse response)
throws IOException, AuthenticationException {
boolean requestContinues = true;
String op = request.getParameter(HttpFSFileSystem.OP_PARAM);
op = (op != null) ? op.toUpperCase() : null;
if (DELEGATION_TOKEN_OPS.contains(op) &&
!request.getMethod().equals("OPTIONS")) {
DelegationTokenOperation dtOp =
DelegationTokenOperation.valueOf(op);
if (dtOp.getHttpMethod().equals(request.getMethod())) {
if (dtOp.requiresKerberosCredentials() && token == null) {
response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
MessageFormat.format(
"Operation [{0}] requires SPNEGO authentication established",
dtOp));
requestContinues = false;
} else {
DelegationTokenManager tokenManager =
HttpFSServerWebApp.get().get(DelegationTokenManager.class);
try {
Map map = null;
switch (dtOp) {
case GETDELEGATIONTOKEN:
String renewerParam =
request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM);
if (renewerParam == null) {
renewerParam = token.getUserName();
}
Token<?> dToken = tokenManager.createToken(
UserGroupInformation.getCurrentUser(), renewerParam);
map = delegationTokenToJSON(dToken);
break;
case RENEWDELEGATIONTOKEN:
case CANCELDELEGATIONTOKEN:
String tokenParam =
request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM);
if (tokenParam == null) {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Operation [{0}] requires the parameter [{1}]",
dtOp, HttpFSKerberosAuthenticator.TOKEN_PARAM));
requestContinues = false;
} else {
if (dtOp == DelegationTokenOperation.CANCELDELEGATIONTOKEN) {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenParam);
tokenManager.cancelToken(dt,
UserGroupInformation.getCurrentUser().getUserName());
} else {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenParam);
long expirationTime =
tokenManager.renewToken(dt, token.getUserName());
map = new HashMap();
map.put("long", expirationTime);
}
}
break;
}
if (requestContinues) {
response.setStatus(HttpServletResponse.SC_OK);
if (map != null) {
response.setContentType(MediaType.APPLICATION_JSON);
Writer writer = response.getWriter();
JSONObject.writeJSONString(map, writer);
writer.write(ENTER);
writer.flush();
}
requestContinues = false;
}
} catch (DelegationTokenManagerException ex) {
throw new AuthenticationException(ex.toString(), ex);
}
}
} else {
response.sendError(HttpServletResponse.SC_BAD_REQUEST,
MessageFormat.format(
"Wrong HTTP method [{0}] for operation [{1}], it should be [{2}]",
request.getMethod(), dtOp, dtOp.getHttpMethod()));
requestContinues = false;
}
}
return requestContinues;
}
@SuppressWarnings("unchecked")
private static Map delegationTokenToJSON(Token token) throws IOException {
Map json = new LinkedHashMap();
json.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON,
token.encodeToUrlString());
Map response = new LinkedHashMap();
response.put(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON, json);
return response;
}
/**
* Authenticates a request looking for the <code>delegation</code>
* query-string parameter and verifying it is a valid token. If there is not
* <code>delegation</code> query-string parameter, it delegates the
* authentication to the {@link KerberosAuthenticationHandler} unless it is
* disabled.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return the authentication token for the authenticated request.
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if the authentication failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request,
HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token;
String delegationParam =
request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM);
if (delegationParam != null) {
try {
Token<DelegationTokenIdentifier> dt =
new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(delegationParam);
DelegationTokenManager tokenManager =
HttpFSServerWebApp.get().get(DelegationTokenManager.class);
UserGroupInformation ugi = tokenManager.verifyToken(dt);
final String shortName = ugi.getShortUserName();
// creating a ephemeral token
token = new AuthenticationToken(shortName, ugi.getUserName(),
getType());
token.setExpires(0);
} catch (Throwable ex) {
throw new AuthenticationException("Could not verify DelegationToken, " +
ex.toString(), ex);
}
} else {
token = super.authenticate(request, response);
}
return token;
}
}

View File

@ -1,78 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
/**
* Service interface to manage HttpFS delegation tokens.
*/
@InterfaceAudience.Private
public interface DelegationTokenManager {
/**
* Creates a delegation token.
*
* @param ugi UGI creating the token.
* @param renewer token renewer.
* @return new delegation token.
* @throws DelegationTokenManagerException thrown if the token could not be
* created.
*/
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer)
throws DelegationTokenManagerException;
/**
* Renews a delegation token.
*
* @param token delegation token to renew.
* @param renewer token renewer.
* @return epoc expiration time.
* @throws DelegationTokenManagerException thrown if the token could not be
* renewed.
*/
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws DelegationTokenManagerException;
/**
* Cancels a delegation token.
*
* @param token delegation token to cancel.
* @param canceler token canceler.
* @throws DelegationTokenManagerException thrown if the token could not be
* canceled.
*/
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler)
throws DelegationTokenManagerException;
/**
* Verifies a delegation token.
*
* @param token delegation token to verify.
* @return the UGI for the token.
* @throws DelegationTokenManagerException thrown if the token could not be
* verified.
*/
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
throws DelegationTokenManagerException;
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.lib.lang.XException;
/**
* Exception thrown by the {@link DelegationTokenManager} service implementation.
*/
@InterfaceAudience.Private
public class DelegationTokenManagerException extends XException {
public enum ERROR implements XException.ERROR {
DT01("Could not verify delegation token, {0}"),
DT02("Could not renew delegation token, {0}"),
DT03("Could not cancel delegation token, {0}"),
DT04("Could not create delegation token, {0}");
private String template;
ERROR(String template) {
this.template = template;
}
@Override
public String getTemplate() {
return template;
}
}
public DelegationTokenManagerException(ERROR error, Object... params) {
super(error, params);
}
}

View File

@ -1,242 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.lib.server.BaseService;
import org.apache.hadoop.lib.server.ServerException;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
/**
* DelegationTokenManager service implementation.
*/
@InterfaceAudience.Private
public class DelegationTokenManagerService extends BaseService
implements DelegationTokenManager {
private static final String PREFIX = "delegation.token.manager";
private static final String UPDATE_INTERVAL = "update.interval";
private static final String MAX_LIFETIME = "max.lifetime";
private static final String RENEW_INTERVAL = "renew.interval";
private static final long HOUR = 60 * 60 * 1000;
private static final long DAY = 24 * HOUR;
DelegationTokenSecretManager secretManager = null;
private Text tokenKind;
public DelegationTokenManagerService() {
super(PREFIX);
}
/**
* Initializes the service.
*
* @throws ServiceException thrown if the service could not be initialized.
*/
@Override
protected void init() throws ServiceException {
long updateInterval = getServiceConfig().getLong(UPDATE_INTERVAL, DAY);
long maxLifetime = getServiceConfig().getLong(MAX_LIFETIME, 7 * DAY);
long renewInterval = getServiceConfig().getLong(RENEW_INTERVAL, DAY);
tokenKind = (HttpFSServerWebApp.get().isSslEnabled())
? SWebHdfsFileSystem.TOKEN_KIND : WebHdfsFileSystem.TOKEN_KIND;
secretManager = new DelegationTokenSecretManager(tokenKind, updateInterval,
maxLifetime,
renewInterval, HOUR);
try {
secretManager.startThreads();
} catch (IOException ex) {
throw new ServiceException(ServiceException.ERROR.S12,
DelegationTokenManager.class.getSimpleName(),
ex.toString(), ex);
}
}
/**
* Destroys the service.
*/
@Override
public void destroy() {
secretManager.stopThreads();
super.destroy();
}
/**
* Returns the service interface.
*
* @return the service interface.
*/
@Override
public Class getInterface() {
return DelegationTokenManager.class;
}
/**
* Creates a delegation token.
*
* @param ugi UGI creating the token.
* @param renewer token renewer.
* @return new delegation token.
* @throws DelegationTokenManagerException thrown if the token could not be
* created.
*/
@Override
public Token<DelegationTokenIdentifier> createToken(UserGroupInformation ugi,
String renewer)
throws DelegationTokenManagerException {
renewer = (renewer == null) ? ugi.getShortUserName() : renewer;
String user = ugi.getUserName();
Text owner = new Text(user);
Text realUser = null;
if (ugi.getRealUser() != null) {
realUser = new Text(ugi.getRealUser().getUserName());
}
DelegationTokenIdentifier tokenIdentifier =
new DelegationTokenIdentifier(tokenKind, owner, new Text(renewer), realUser);
Token<DelegationTokenIdentifier> token =
new Token<DelegationTokenIdentifier>(tokenIdentifier, secretManager);
try {
SecurityUtil.setTokenService(token,
HttpFSServerWebApp.get().getAuthority());
} catch (ServerException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT04, ex.toString(), ex);
}
return token;
}
/**
* Renews a delegation token.
*
* @param token delegation token to renew.
* @param renewer token renewer.
* @return epoc expiration time.
* @throws DelegationTokenManagerException thrown if the token could not be
* renewed.
*/
@Override
public long renewToken(Token<DelegationTokenIdentifier> token, String renewer)
throws DelegationTokenManagerException {
try {
return secretManager.renewToken(token, renewer);
} catch (IOException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT02, ex.toString(), ex);
}
}
/**
* Cancels a delegation token.
*
* @param token delegation token to cancel.
* @param canceler token canceler.
* @throws DelegationTokenManagerException thrown if the token could not be
* canceled.
*/
@Override
public void cancelToken(Token<DelegationTokenIdentifier> token,
String canceler)
throws DelegationTokenManagerException {
try {
secretManager.cancelToken(token, canceler);
} catch (IOException ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT03, ex.toString(), ex);
}
}
/**
* Verifies a delegation token.
*
* @param token delegation token to verify.
* @return the UGI for the token.
* @throws DelegationTokenManagerException thrown if the token could not be
* verified.
*/
@Override
public UserGroupInformation verifyToken(Token<DelegationTokenIdentifier> token)
throws DelegationTokenManagerException {
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier(tokenKind);
try {
id.readFields(dis);
dis.close();
secretManager.verifyToken(id, token.getPassword());
} catch (Exception ex) {
throw new DelegationTokenManagerException(
DelegationTokenManagerException.ERROR.DT01, ex.toString(), ex);
}
return id.getUser();
}
private static class DelegationTokenSecretManager
extends AbstractDelegationTokenSecretManager<DelegationTokenIdentifier> {
private Text tokenKind;
/**
* Create a secret manager
*
* @param delegationKeyUpdateInterval the number of seconds for rolling new
* secret keys.
* @param delegationTokenMaxLifetime the maximum lifetime of the delegation
* tokens
* @param delegationTokenRenewInterval how often the tokens must be renewed
* @param delegationTokenRemoverScanInterval how often the tokens are
* scanned
* for expired tokens
*/
public DelegationTokenSecretManager(Text tokenKind, long delegationKeyUpdateInterval,
long delegationTokenMaxLifetime,
long delegationTokenRenewInterval,
long delegationTokenRemoverScanInterval) {
super(delegationKeyUpdateInterval, delegationTokenMaxLifetime,
delegationTokenRenewInterval, delegationTokenRemoverScanInterval);
this.tokenKind = tokenKind;
}
@Override
public DelegationTokenIdentifier createIdentifier() {
return new DelegationTokenIdentifier(tokenKind);
}
}
}

View File

@ -35,7 +35,6 @@
org.apache.hadoop.lib.service.scheduler.SchedulerService,
org.apache.hadoop.lib.service.security.GroupsService,
org.apache.hadoop.lib.service.security.ProxyUserService,
org.apache.hadoop.lib.service.security.DelegationTokenManagerService,
org.apache.hadoop.lib.service.hadoop.FileSystemAccessService
</value>
<description>
@ -226,12 +225,4 @@
</description>
</property>
<property>
<name>httpfs.user.provider.user.pattern</name>
<value>^[A-Za-z_][A-Za-z0-9._-]*[$]?$</value>
<description>
Valid pattern for user and group names, it must be a valid java regex.
</description>
</property>
</configuration>

View File

@ -17,15 +17,19 @@
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import javax.servlet.ServletException;
import java.util.Properties;
public class HttpFSKerberosAuthenticationHandlerForTesting
extends HttpFSKerberosAuthenticationHandler {
extends KerberosDelegationTokenAuthenticationHandler {
@Override
public void init(Properties config) throws ServletException {
//NOP overwrite to avoid Kerberos initialization
config.setProperty(TOKEN_KIND, "t");
initTokenManager(config);
}
@Override

View File

@ -1,94 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
import org.apache.hadoop.lib.wsrs.UserProvider;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.test.TestHdfs;
import org.apache.hadoop.test.TestHdfsHelper;
import org.apache.hadoop.test.TestJetty;
import org.apache.hadoop.test.TestJettyHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Assert;
import org.junit.Test;
import org.mortbay.jetty.Server;
import org.mortbay.jetty.webapp.WebAppContext;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URL;
import java.text.MessageFormat;
import java.util.Arrays;
import java.util.List;
public class TestHttpFSCustomUserName extends HFSTestCase {
@Test
@TestDir
@TestJetty
public void defaultUserName() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
Assert.assertEquals(UserProvider.USER_PATTERN_DEFAULT,
UserProvider.getUserPattern().pattern());
server.destroy();
}
@Test
@TestDir
@TestJetty
public void customUserName() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
httpfsConf.set(UserProvider.USER_PATTERN_KEY, "1");
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.init();
Assert.assertEquals("1", UserProvider.getUserPattern().pattern());
server.destroy();
}
}

View File

@ -1,316 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator.DelegationTokenOperation;
import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.lib.service.DelegationTokenIdentifier;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.lib.servlet.ServerWebApp;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.json.simple.JSONObject;
import org.json.simple.parser.JSONParser;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.core.MediaType;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
public class TestHttpFSKerberosAuthenticationHandler extends HFSTestCase {
@Test
@TestDir
public void testManagementOperationsWebHdfsFileSystem() throws Exception {
testManagementOperations(WebHdfsFileSystem.TOKEN_KIND);
}
@Test
@TestDir
public void testManagementOperationsSWebHdfsFileSystem() throws Exception {
try {
System.setProperty(HttpFSServerWebApp.NAME +
ServerWebApp.SSL_ENABLED, "true");
testManagementOperations(SWebHdfsFileSystem.TOKEN_KIND);
} finally {
System.getProperties().remove(HttpFSServerWebApp.NAME +
ServerWebApp.SSL_ENABLED);
}
}
private void testManagementOperations(Text expectedTokenKind) throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
14000));
AuthenticationHandler handler =
new HttpFSKerberosAuthenticationHandlerForTesting();
try {
server.init();
handler.init(null);
testNonManagementOperation(handler);
testManagementOperationErrors(handler);
testGetToken(handler, null, expectedTokenKind);
testGetToken(handler, "foo", expectedTokenKind);
testCancelToken(handler);
testRenewToken(handler);
} finally {
if (handler != null) {
handler.destroy();
}
server.destroy();
}
}
private void testNonManagementOperation(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(null);
Assert.assertTrue(handler.managementOperation(null, request, null));
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(HttpFSFileSystem.Operation.CREATE.toString());
Assert.assertTrue(handler.managementOperation(null, request, null));
}
private void testManagementOperationErrors(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.toString());
Mockito.when(request.getMethod()).thenReturn("FOO");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.startsWith("Wrong HTTP method"));
Mockito.reset(response);
Mockito.when(request.getMethod()).
thenReturn(DelegationTokenOperation.GETDELEGATIONTOKEN.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("requires SPNEGO"));
}
private void testGetToken(AuthenticationHandler handler, String renewer,
Text expectedTokenKind) throws Exception {
DelegationTokenOperation op = DelegationTokenOperation.GETDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.RENEWER_PARAM)).
thenReturn(renewer);
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Assert.assertFalse(handler.managementOperation(token, request, response));
if (renewer == null) {
Mockito.verify(token).getUserName();
} else {
Mockito.verify(token, Mockito.never()).getUserName();
}
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
Mockito.verify(response).setContentType(MediaType.APPLICATION_JSON);
pwriter.close();
String responseOutput = writer.toString();
String tokenLabel = HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON;
Assert.assertTrue(responseOutput.contains(tokenLabel));
Assert.assertTrue(responseOutput.contains(
HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON));
JSONObject json = (JSONObject) new JSONParser().parse(responseOutput);
json = (JSONObject) json.get(tokenLabel);
String tokenStr;
tokenStr = (String)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
Token<DelegationTokenIdentifier> dt = new Token<DelegationTokenIdentifier>();
dt.decodeFromUrlString(tokenStr);
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dt);
Assert.assertEquals(expectedTokenKind, dt.getKind());
}
private void testCancelToken(AuthenticationHandler handler)
throws Exception {
DelegationTokenOperation op =
DelegationTokenOperation.CANCELDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
Token<DelegationTokenIdentifier> token =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "foo");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
thenReturn(token.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
try {
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(token);
Assert.fail();
}
catch (DelegationTokenManagerException ex) {
Assert.assertTrue(ex.toString().contains("DT01"));
}
}
private void testRenewToken(AuthenticationHandler handler)
throws Exception {
DelegationTokenOperation op =
DelegationTokenOperation.RENEWDELEGATIONTOKEN;
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).
thenReturn(op.toString());
Mockito.when(request.getMethod()).
thenReturn(op.getHttpMethod());
Assert.assertFalse(handler.managementOperation(null, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED),
Mockito.contains("equires SPNEGO authentication established"));
Mockito.reset(response);
AuthenticationToken token = Mockito.mock(AuthenticationToken.class);
Mockito.when(token.getUserName()).thenReturn("user");
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).sendError(
Mockito.eq(HttpServletResponse.SC_BAD_REQUEST),
Mockito.contains("requires the parameter [token]"));
Mockito.reset(response);
StringWriter writer = new StringWriter();
PrintWriter pwriter = new PrintWriter(writer);
Mockito.when(response.getWriter()).thenReturn(pwriter);
Token<DelegationTokenIdentifier> dToken =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.TOKEN_PARAM)).
thenReturn(dToken.encodeToUrlString());
Assert.assertFalse(handler.managementOperation(token, request, response));
Mockito.verify(response).setStatus(HttpServletResponse.SC_OK);
pwriter.close();
Assert.assertTrue(writer.toString().contains("long"));
HttpFSServerWebApp.get().get(DelegationTokenManager.class).verifyToken(dToken);
}
@Test
@TestDir
public void testAuthenticate() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration httpfsConf = new Configuration(false);
HttpFSServerWebApp server =
new HttpFSServerWebApp(dir, dir, dir, dir, httpfsConf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(),
14000));
AuthenticationHandler handler =
new HttpFSKerberosAuthenticationHandlerForTesting();
try {
server.init();
handler.init(null);
testValidDelegationToken(handler);
testInvalidDelegationToken(handler);
} finally {
if (handler != null) {
handler.destroy();
}
server.destroy();
}
}
private void testValidDelegationToken(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Token<DelegationTokenIdentifier> dToken =
HttpFSServerWebApp.get().get(DelegationTokenManager.class).createToken(
UserGroupInformation.getCurrentUser(), "user");
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
thenReturn(dToken.encodeToUrlString());
AuthenticationToken token = handler.authenticate(request, response);
Assert.assertEquals(UserGroupInformation.getCurrentUser().getShortUserName(),
token.getUserName());
Assert.assertEquals(0, token.getExpires());
Assert.assertEquals(HttpFSKerberosAuthenticationHandler.TYPE,
token.getType());
Assert.assertTrue(token.isExpired());
}
private void testInvalidDelegationToken(AuthenticationHandler handler)
throws Exception {
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(request.getParameter(HttpFSKerberosAuthenticator.DELEGATION_PARAM)).
thenReturn("invalid");
try {
handler.authenticate(request, response);
Assert.fail();
} catch (AuthenticationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
}
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.fs.http.server;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
import org.json.simple.JSONArray;
import org.junit.Assert;
@ -43,7 +45,6 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.lib.server.Service;
import org.apache.hadoop.lib.server.ServiceException;
import org.apache.hadoop.lib.service.Groups;
@ -682,7 +683,7 @@ public void testDelegationTokenOperations() throws Exception {
AuthenticationToken token =
new AuthenticationToken("u", "p",
HttpFSKerberosAuthenticationHandlerForTesting.TYPE);
new KerberosDelegationTokenAuthenticationHandler().getType());
token.setExpires(System.currentTimeMillis() + 100000000);
Signer signer = new Signer(new StringSignerSecretProvider("secret"));
String tokenSigned = signer.sign(token.toString());
@ -706,9 +707,9 @@ public void testDelegationTokenOperations() throws Exception {
JSONObject json = (JSONObject)
new JSONParser().parse(new InputStreamReader(conn.getInputStream()));
json = (JSONObject)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String)
json.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
json.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
url = new URL(TestJettyHelper.getJettyURL(),
"/webhdfs/v1/?op=GETHOMEDIRECTORY&delegation=" + tokenStr);

View File

@ -23,11 +23,11 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
import org.apache.hadoop.fs.http.client.HttpFSKerberosAuthenticator;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
import org.apache.hadoop.test.HFSTestCase;
import org.apache.hadoop.test.KerberosTestUtils;
import org.apache.hadoop.test.TestDir;
@ -166,9 +166,9 @@ public Void call() throws Exception {
.parse(new InputStreamReader(conn.getInputStream()));
json =
(JSONObject) json
.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_JSON);
.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_JSON);
String tokenStr = (String) json
.get(HttpFSKerberosAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
.get(DelegationTokenAuthenticator.DELEGATION_TOKEN_URL_STRING_JSON);
//access httpfs using the delegation token
url = new URL(TestJettyHelper.getJettyURL(),

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.lib.service.security;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.lib.server.Server;
import org.apache.hadoop.lib.service.DelegationTokenManager;
import org.apache.hadoop.lib.service.DelegationTokenManagerException;
import org.apache.hadoop.lib.service.hadoop.FileSystemAccessService;
import org.apache.hadoop.lib.service.instrumentation.InstrumentationService;
import org.apache.hadoop.lib.service.scheduler.SchedulerService;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.HTestCase;
import org.apache.hadoop.test.TestDir;
import org.apache.hadoop.test.TestDirHelper;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Arrays;
public class TestDelegationTokenManagerService extends HTestCase {
@Test
@TestDir
public void service() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("httpfs.services", StringUtils.join(",",
Arrays.asList(InstrumentationService.class.getName(),
SchedulerService.class.getName(),
FileSystemAccessService.class.getName(),
DelegationTokenManagerService.class.getName())));
Server server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
server.init();
DelegationTokenManager tm = server.get(DelegationTokenManager.class);
Assert.assertNotNull(tm);
server.destroy();
}
@Test
@TestDir
@SuppressWarnings("unchecked")
public void tokens() throws Exception {
String dir = TestDirHelper.getTestDir().getAbsolutePath();
Configuration conf = new Configuration(false);
conf.set("server.services", StringUtils.join(",",
Arrays.asList(DelegationTokenManagerService.class.getName())));
HttpFSServerWebApp server = new HttpFSServerWebApp(dir, dir, dir, dir, conf);
server.setAuthority(new InetSocketAddress(InetAddress.getLocalHost(), 14000));
server.init();
DelegationTokenManager tm = server.get(DelegationTokenManager.class);
Token token = tm.createToken(UserGroupInformation.getCurrentUser(), "foo");
Assert.assertNotNull(token);
tm.verifyToken(token);
Assert.assertTrue(tm.renewToken(token, "foo") > System.currentTimeMillis());
tm.cancelToken(token, "foo");
try {
tm.verifyToken(token);
Assert.fail();
} catch (DelegationTokenManagerException ex) {
//NOP
} catch (Exception ex) {
Assert.fail();
}
server.destroy();
}
}

View File

@ -387,12 +387,35 @@ Release 2.6.0 - UNRELEASED
HDFS-6812. Remove addBlock and replaceBlock from DatanodeDescriptor.
(szetszwo)
HDFS-6781. Separate HDFS commands from CommandsManual.apt.vm. (Akira
Ajisaka via Arpit Agarwal)
HDFS-6728. Dynamically add new volumes to DataStorage, formatted if
necessary. (Lei Xu via atm)
HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
Xu via atm)
HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
(Ming Ma via wheat9)
HDFS-6772. Get DN storages out of blockContentsStale state faster after
NN restarts. (Ming Ma via Arpit Agarwal)
HDFS-573. Porting libhdfs to Windows. (cnauroth)
HDFS-6828. Separate block replica dispatching from Balancer. (szetszwo via
jing9)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
BUG FIXES
HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for
insecure HDFS (Allen Wittenauer via raviprak)
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
due to a long edit log sync op. (Liang Xie via cnauroth)

View File

@ -360,16 +360,97 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<profiles>
<profile>
<id>windows</id>
<id>native-win</id>
<activation>
<activeByDefault>false</activeByDefault>
<os>
<family>windows</family>
</os>
</activation>
<properties>
<windows.build>true</windows.build>
</properties>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<execution>
<id>enforce-os</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<requireOS>
<family>windows</family>
<message>native-win build only supported on Windows</message>
</requireOS>
</rules>
<fail>true</fail>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>make</id>
<phase>compile</phase>
<goals>
<goal>run</goal>
</goals>
<configuration>
<target>
<mkdir dir="${project.build.directory}/native"/>
<exec executable="cmake" dir="${project.build.directory}/native"
failonerror="true">
<arg line="${basedir}/src/ -DGENERATED_JAVAH=${project.build.directory}/native/javah -DJVM_ARCH_DATA_MODEL=${sun.arch.data.model} -DREQUIRE_LIBWEBHDFS=${require.libwebhdfs} -DREQUIRE_FUSE=${require.fuse} -G 'Visual Studio 10 Win64'"/>
</exec>
<exec executable="msbuild" dir="${project.build.directory}/native"
failonerror="true">
<arg line="ALL_BUILD.vcxproj /nologo /p:Configuration=Release"/>
</exec>
<!-- Copy for inclusion in distribution. -->
<copy todir="${project.build.directory}/bin">
<fileset dir="${project.build.directory}/native/target/bin/Release"/>
</copy>
</target>
</configuration>
</execution>
<execution>
<id>native_tests</id>
<phase>test</phase>
<goals><goal>run</goal></goals>
<configuration>
<skip>${skipTests}</skip>
<target>
<property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/>
<macrodef name="run-test">
<attribute name="test"/>
<sequential>
<echo message="Running @{test}"/>
<exec executable="${project.build.directory}/native/Release/@{test}" failonerror="true" dir="${project.build.directory}/native/">
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<!-- HADOOP_HOME required to find winutils. -->
<env key="HADOOP_HOME" value="${hadoop.common.build.dir}"/>
<!-- Make sure hadoop.dll and jvm.dll are on PATH. -->
<env key="PATH" value="${env.PATH};${hadoop.common.build.dir}/bin;${java.home}/jre/bin/server;${java.home}/bin/server"/>
</exec>
<echo message="Finished @{test}"/>
</sequential>
</macrodef>
<run-test test="test_libhdfs_threaded"/>
<echo message="Skipping test_libhdfs_zerocopy"/>
<run-test test="test_native_mini_dfs"/>
</target>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>native</id>
@ -407,21 +488,25 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<phase>test</phase>
<goals><goal>run</goal></goals>
<configuration>
<skip>${skipTests}</skip>
<target>
<property name="compile_classpath" refid="maven.compile.classpath"/>
<property name="test_classpath" refid="maven.test.classpath"/>
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
<arg value="-c"/>
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<env key="SKIPTESTS" value="${skipTests}"/>
</exec>
<exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
<arg value="-c"/>
<arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/>
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<env key="SKIPTESTS" value="${skipTests}"/>
</exec>
<macrodef name="run-test">
<attribute name="test"/>
<sequential>
<echo message="Running @{test}"/>
<exec executable="${project.build.directory}/native/@{test}" failonerror="true" dir="${project.build.directory}/native/">
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
<!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
<env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
</exec>
<echo message="Finished @{test}"/>
</sequential>
</macrodef>
<run-test test="test_libhdfs_threaded"/>
<run-test test="test_libhdfs_zerocopy"/>
<run-test test="test_native_mini_dfs"/>
</target>
</configuration>
</execution>

View File

@ -76,9 +76,39 @@ if (NOT GENERATED_JAVAH)
MESSAGE(FATAL_ERROR "You must set the CMake variable GENERATED_JAVAH")
endif (NOT GENERATED_JAVAH)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
if (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /O2")
# Set warning level 4.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
# Skip "unreferenced formal parameter".
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4100")
# Skip "conditional expression is constant".
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /wd4127")
# Skip deprecated POSIX function warnings.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_NONSTDC_NO_DEPRECATE")
# Skip CRT non-secure function warnings. If we can convert usage of
# strerror, getenv and ctime to their secure CRT equivalents, then we can
# re-enable the CRT non-secure function warnings.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_CRT_SECURE_NO_WARNINGS")
# Omit unneeded headers.
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
set(OS_DIR main/native/libhdfs/os/windows)
set(OUT_DIR target/bin)
else (WIN32)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -Wall -O2")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_REENTRANT -D_GNU_SOURCE")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_LARGEFILE_SOURCE -D_FILE_OFFSET_BITS=64")
set(OS_DIR main/native/libhdfs/os/posix)
set(OS_LINK_LIBRARIES pthread)
set(OUT_DIR target/usr/local/lib)
endif (WIN32)
include_directories(
${GENERATED_JAVAH}
@ -87,6 +117,7 @@ include_directories(
${JNI_INCLUDE_DIRS}
main/native
main/native/libhdfs
${OS_DIR}
)
set(_FUSE_DFS_VERSION 0.1.0)
@ -96,6 +127,9 @@ add_dual_library(hdfs
main/native/libhdfs/exception.c
main/native/libhdfs/jni_helper.c
main/native/libhdfs/hdfs.c
main/native/libhdfs/common/htable.c
${OS_DIR}/mutexes.c
${OS_DIR}/thread_local_storage.c
)
if (NEED_LINK_DL)
set(LIB_DL dl)
@ -104,17 +138,14 @@ endif(NEED_LINK_DL)
target_link_dual_libraries(hdfs
${JAVA_JVM_LIBRARY}
${LIB_DL}
pthread
${OS_LINK_LIBRARIES}
)
dual_output_directory(hdfs target/usr/local/lib)
dual_output_directory(hdfs ${OUT_DIR})
set(LIBHDFS_VERSION "0.0.0")
set_target_properties(hdfs PROPERTIES
SOVERSION ${LIBHDFS_VERSION})
add_library(posix_util
main/native/util/posix_util.c
)
add_executable(test_libhdfs_ops
main/native/libhdfs/test/test_libhdfs_ops.c
)
@ -156,11 +187,12 @@ target_link_libraries(test_native_mini_dfs
add_executable(test_libhdfs_threaded
main/native/libhdfs/expect.c
main/native/libhdfs/test_libhdfs_threaded.c
${OS_DIR}/thread.c
)
target_link_libraries(test_libhdfs_threaded
hdfs
native_mini_dfs
pthread
${OS_LINK_LIBRARIES}
)
add_executable(test_libhdfs_zerocopy
@ -170,17 +202,21 @@ add_executable(test_libhdfs_zerocopy
target_link_libraries(test_libhdfs_zerocopy
hdfs
native_mini_dfs
pthread
${OS_LINK_LIBRARIES}
)
add_executable(test_libhdfs_vecsum
main/native/libhdfs/test/vecsum.c
)
target_link_libraries(test_libhdfs_vecsum
hdfs
pthread
rt
)
# Skip vecsum on Windows. This could be made to work in the future by
# introducing an abstraction layer over the sys/mman.h functions.
if (NOT WIN32)
add_executable(test_libhdfs_vecsum
main/native/libhdfs/test/vecsum.c
)
target_link_libraries(test_libhdfs_vecsum
hdfs
pthread
rt
)
endif(NOT WIN32)
IF(REQUIRE_LIBWEBHDFS)
add_subdirectory(contrib/libwebhdfs)

View File

@ -1668,9 +1668,11 @@ public static HttpServer2.Builder httpServerTemplateForNNAndJN(
.setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
// initialize the webserver for uploading/downloading files.
LOG.info("Starting web server as: "
+ SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
httpAddr.getHostName()));
if (UserGroupInformation.isSecurityEnabled()) {
LOG.info("Starting web server as: "
+ SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
httpAddr.getHostName()));
}
if (policy.isHttpEnabled()) {
if (httpAddr.getPort() == 0) {

View File

@ -34,6 +34,10 @@
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
@ -90,14 +94,16 @@ public String getBlockpoolID() {
return blockpoolID;
}
/** @return the namenode proxy. */
public NamenodeProtocol getNamenode() {
return namenode;
/** @return blocks with locations. */
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {
return namenode.getBlocks(datanode, size);
}
/** @return the client proxy. */
public ClientProtocol getClient() {
return client;
/** @return live datanode storage reports. */
public DatanodeStorageReport[] getLiveDatanodeStorageReport()
throws IOException {
return client.getDatanodeStorageReport(DatanodeReportType.LIVE);
}
/** @return the key manager */

View File

@ -135,7 +135,10 @@ public class DatanodeManager {
/** The number of stale DataNodes */
private volatile int numStaleNodes;
/** The number of stale storages */
private volatile int numStaleStorages;
/**
* Whether or not this cluster has ever consisted of more than 1 rack,
* according to the NetworkTopology.
@ -1142,6 +1145,22 @@ public int getNumStaleNodes() {
return this.numStaleNodes;
}
/**
* Get the number of content stale storages.
*/
public int getNumStaleStorages() {
return numStaleStorages;
}
/**
* Set the number of content stale storages.
*
* @param numStaleStorages The number of content stale storages.
*/
void setNumStaleStorages(int numStaleStorages) {
this.numStaleStorages = numStaleStorages;
}
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live,
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {

View File

@ -256,6 +256,7 @@ void heartbeatCheck() {
DatanodeID dead = null;
// check the number of stale nodes
int numOfStaleNodes = 0;
int numOfStaleStorages = 0;
synchronized(this) {
for (DatanodeDescriptor d : datanodes) {
if (dead == null && dm.isDatanodeDead(d)) {
@ -265,10 +266,17 @@ void heartbeatCheck() {
if (d.isStale(dm.getStaleInterval())) {
numOfStaleNodes++;
}
DatanodeStorageInfo[] storageInfos = d.getStorageInfos();
for(DatanodeStorageInfo storageInfo : storageInfos) {
if (storageInfo.areBlockContentsStale()) {
numOfStaleStorages++;
}
}
}
// Set the number of stale nodes in the DatanodeManager
dm.setNumStaleNodes(numOfStaleNodes);
dm.setNumStaleStorages(numOfStaleStorages);
}
allAlive = dead == null;

View File

@ -601,7 +601,7 @@ boolean processCommandFromActor(DatanodeCommand cmd,
LOG.info("DatanodeCommand action : DNA_REGISTER from " + actor.nnAddr
+ " with " + actor.state + " state");
actor.reRegister();
return true;
return false;
}
writeLock();
try {

View File

@ -222,7 +222,19 @@ private void connectToNNAndHandshake() throws IOException {
// Second phase of the handshake with the NN.
register();
}
// This is useful to make sure NN gets Heartbeat before Blockreport
// upon NN restart while DN keeps retrying Otherwise,
// 1. NN restarts.
// 2. Heartbeat RPC will retry and succeed. NN asks DN to reregister.
// 3. After reregistration completes, DN will send Blockreport first.
// 4. Given NN receives Blockreport after Heartbeat, it won't mark
// DatanodeStorageInfo#blockContentsStale to false until the next
// Blockreport.
void scheduleHeartbeat() {
lastHeartbeat = 0;
}
/**
* This methods arranges for the data node to send the block report at
* the next heartbeat.
@ -902,6 +914,7 @@ void reRegister() throws IOException {
retrieveNamespaceInfo();
// and re-register
register();
scheduleHeartbeat();
}
}

View File

@ -36,8 +36,10 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Properties;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@ -106,13 +108,22 @@ private BlockPoolSliceStorage() {
void recoverTransitionRead(DataNode datanode, NamespaceInfo nsInfo,
Collection<File> dataDirs, StartupOption startOpt) throws IOException {
LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
Set<String> existingStorageDirs = new HashSet<String>();
for (int i = 0; i < getNumStorageDirs(); i++) {
existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
}
// 1. For each BP data directory analyze the state and
// check whether all is consistent before transitioning.
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(
dataDirs.size());
for (Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next();
if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
LOG.info("Storage directory " + dataDir + " has already been used.");
it.remove();
continue;
}
StorageDirectory sd = new StorageDirectory(dataDir, null, true);
StorageState curState;
try {

View File

@ -55,6 +55,7 @@
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@ -172,43 +173,99 @@ public String getTrashDirectoryForBlockFile(String bpid, File blockFile) {
}
/**
* Analyze storage directories.
* Recover from previous transitions if required.
* Perform fs state transition if necessary depending on the namespace info.
* Read storage info.
* <br>
* This method should be synchronized between multiple DN threads. Only the
* first DN thread does DN level storage dir recoverTransitionRead.
*
* {{@inheritDoc org.apache.hadoop.hdfs.server.common.Storage#writeAll()}}
*/
private void writeAll(Collection<StorageDirectory> dirs) throws IOException {
this.layoutVersion = getServiceLayoutVersion();
for (StorageDirectory dir : dirs) {
writeProperties(dir);
}
}
/**
* Add a list of volumes to be managed by DataStorage. If the volume is empty,
* format it, otherwise recover it from previous transitions if required.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @throws IOException
*/
synchronized void recoverTransitionRead(DataNode datanode,
synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt)
throws IOException {
if (initialized) {
// DN storage has been initialized, no need to do anything
return;
// Similar to recoverTransitionRead, it first ensures the datanode level
// format is completed.
List<StorageLocation> tmpDataDirs =
new ArrayList<StorageLocation>(dataDirs);
addStorageLocations(datanode, nsInfo, tmpDataDirs, startOpt, false, true);
Collection<File> bpDataDirs = new ArrayList<File>();
String bpid = nsInfo.getBlockPoolID();
for (StorageLocation dir : dataDirs) {
File dnRoot = dir.getFile();
File bpRoot = BlockPoolSliceStorage.getBpRoot(bpid, new File(dnRoot,
STORAGE_DIR_CURRENT));
bpDataDirs.add(bpRoot);
}
LOG.info("Data-node version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+ " and name-node layout version: " + nsInfo.getLayoutVersion());
// 1. For each data directory calculate its state and
// check whether all is consistent before transitioning.
// Format and recover.
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
// mkdir for the list of BlockPoolStorage
makeBlockPoolDataDir(bpDataDirs, null);
BlockPoolSliceStorage bpStorage = this.bpStorageMap.get(bpid);
if (bpStorage == null) {
bpStorage = new BlockPoolSliceStorage(
nsInfo.getNamespaceID(), bpid, nsInfo.getCTime(),
nsInfo.getClusterID());
}
bpStorage.recoverTransitionRead(datanode, nsInfo, bpDataDirs, startOpt);
addBlockPoolStorage(bpid, bpStorage);
}
/**
* Add a list of volumes to be managed by this DataStorage. If the volume is
* empty, it formats the volume, otherwise it recovers it from previous
* transitions if required.
*
* If isInitialize is false, only the directories that have finished the
* doTransition() process will be added into DataStorage.
*
* @param datanode the reference to DataNode.
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @param isInitialize whether it is called when DataNode starts up.
* @throws IOException
*/
private synchronized void addStorageLocations(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt, boolean isInitialize, boolean ignoreExistingDirs)
throws IOException {
Set<String> existingStorageDirs = new HashSet<String>();
for (int i = 0; i < getNumStorageDirs(); i++) {
existingStorageDirs.add(getStorageDir(i).getRoot().getAbsolutePath());
}
// 1. For each data directory calculate its state and check whether all is
// consistent before transitioning. Format and recover.
ArrayList<StorageState> dataDirStates =
new ArrayList<StorageState>(dataDirs.size());
List<StorageDirectory> addedStorageDirectories =
new ArrayList<StorageDirectory>();
for(Iterator<StorageLocation> it = dataDirs.iterator(); it.hasNext();) {
File dataDir = it.next().getFile();
if (existingStorageDirs.contains(dataDir.getAbsolutePath())) {
LOG.info("Storage directory " + dataDir + " has already been used.");
it.remove();
continue;
}
StorageDirectory sd = new StorageDirectory(dataDir);
StorageState curState;
try {
curState = sd.analyzeStorage(startOpt, this);
// sd is locked but not opened
switch(curState) {
switch (curState) {
case NORMAL:
break;
case NON_EXISTENT:
@ -217,7 +274,8 @@ synchronized void recoverTransitionRead(DataNode datanode,
it.remove();
continue;
case NOT_FORMATTED: // format
LOG.info("Storage directory " + dataDir + " is not formatted");
LOG.info("Storage directory " + dataDir + " is not formatted for "
+ nsInfo.getBlockPoolID());
LOG.info("Formatting ...");
format(sd, nsInfo, datanode.getDatanodeUuid());
break;
@ -231,33 +289,82 @@ synchronized void recoverTransitionRead(DataNode datanode,
//continue with other good dirs
continue;
}
// add to the storage list
addStorageDir(sd);
if (isInitialize) {
addStorageDir(sd);
}
addedStorageDirectories.add(sd);
dataDirStates.add(curState);
}
if (dataDirs.size() == 0 || dataDirStates.size() == 0) // none of the data dirs exist
if (dataDirs.size() == 0 || dataDirStates.size() == 0) {
// none of the data dirs exist
if (ignoreExistingDirs) {
return;
}
throw new IOException(
"All specified directories are not accessible or do not exist.");
}
// 2. Do transitions
// Each storage directory is treated individually.
// During startup some of them can upgrade or rollback
// while others could be uptodate for the regular startup.
try {
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
createStorageID(getStorageDir(idx));
// During startup some of them can upgrade or rollback
// while others could be up-to-date for the regular startup.
for (Iterator<StorageDirectory> it = addedStorageDirectories.iterator();
it.hasNext(); ) {
StorageDirectory sd = it.next();
try {
doTransition(datanode, sd, nsInfo, startOpt);
createStorageID(sd);
} catch (IOException e) {
if (!isInitialize) {
sd.unlock();
it.remove();
continue;
}
unlockAll();
throw e;
}
} catch (IOException e) {
unlockAll();
throw e;
}
// 3. Update all storages. Some of them might have just been formatted.
this.writeAll();
// 3. Update all successfully loaded storages. Some of them might have just
// been formatted.
this.writeAll(addedStorageDirectories);
// 4. Make newly loaded storage directories visible for service.
if (!isInitialize) {
this.storageDirs.addAll(addedStorageDirectories);
}
}
/**
* Analyze storage directories.
* Recover from previous transitions if required.
* Perform fs state transition if necessary depending on the namespace info.
* Read storage info.
* <br>
* This method should be synchronized between multiple DN threads. Only the
* first DN thread does DN level storage dir recoverTransitionRead.
*
* @param nsInfo namespace information
* @param dataDirs array of data storage directories
* @param startOpt startup option
* @throws IOException
*/
synchronized void recoverTransitionRead(DataNode datanode,
NamespaceInfo nsInfo, Collection<StorageLocation> dataDirs,
StartupOption startOpt)
throws IOException {
if (initialized) {
// DN storage has been initialized, no need to do anything
return;
}
LOG.info("DataNode version: " + HdfsConstants.DATANODE_LAYOUT_VERSION
+ " and NameNode layout version: " + nsInfo.getLayoutVersion());
this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
addStorageLocations(datanode, nsInfo, dataDirs, startOpt, true, false);
// 4. mark DN storage is initialized
// mark DN storage is initialized
this.initialized = true;
}

View File

@ -78,7 +78,7 @@ public File getFile() {
* @return A StorageLocation object if successfully parsed, null otherwise.
* Does not throw any exceptions.
*/
static StorageLocation parse(String rawLocation)
public static StorageLocation parse(String rawLocation)
throws IOException, SecurityException {
Matcher matcher = regex.matcher(rawLocation);
StorageType storageType = StorageType.DEFAULT;

View File

@ -22,6 +22,7 @@
import java.io.FileDescriptor;
import java.io.IOException;
import java.io.InputStream;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@ -39,6 +40,7 @@
import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
import org.apache.hadoop.hdfs.server.datanode.Replica;
import org.apache.hadoop.hdfs.server.datanode.ReplicaInPipelineInterface;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetFactory;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@ -91,6 +93,10 @@ public RollingLogs createRollingLogs(String bpid, String prefix
/** @return a list of volumes. */
public List<V> getVolumes();
/** Add an array of StorageLocation to FsDataset. */
public void addVolumes(Collection<StorageLocation> volumes)
throws IOException;
/** @return a storage with the given storage ID */
public DatanodeStorage getStorage(final String storageUuid);

View File

@ -61,6 +61,7 @@ class FsDatasetAsyncDiskService {
private static final long THREADS_KEEP_ALIVE_SECONDS = 60;
private final DataNode datanode;
private final ThreadGroup threadGroup;
private Map<File, ThreadPoolExecutor> executors
= new HashMap<File, ThreadPoolExecutor>();
@ -70,42 +71,52 @@ class FsDatasetAsyncDiskService {
*
* The AsyncDiskServices uses one ThreadPool per volume to do the async
* disk operations.
*
* @param volumes The roots of the data volumes.
*/
FsDatasetAsyncDiskService(DataNode datanode, File[] volumes) {
FsDatasetAsyncDiskService(DataNode datanode) {
this.datanode = datanode;
this.threadGroup = new ThreadGroup(getClass().getSimpleName());
}
final ThreadGroup threadGroup = new ThreadGroup(getClass().getSimpleName());
// Create one ThreadPool per volume
for (int v = 0 ; v < volumes.length; v++) {
final File vol = volumes[v];
ThreadFactory threadFactory = new ThreadFactory() {
int counter = 0;
private void addExecutorForVolume(final File volume) {
ThreadFactory threadFactory = new ThreadFactory() {
int counter = 0;
@Override
public Thread newThread(Runnable r) {
int thisIndex;
synchronized (this) {
thisIndex = counter++;
}
Thread t = new Thread(threadGroup, r);
t.setName("Async disk worker #" + thisIndex +
" for volume " + vol);
return t;
}
};
@Override
public Thread newThread(Runnable r) {
int thisIndex;
synchronized (this) {
thisIndex = counter++;
}
Thread t = new Thread(threadGroup, r);
t.setName("Async disk worker #" + thisIndex +
" for volume " + volume);
return t;
}
};
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
ThreadPoolExecutor executor = new ThreadPoolExecutor(
CORE_THREADS_PER_VOLUME, MAXIMUM_THREADS_PER_VOLUME,
THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
new LinkedBlockingQueue<Runnable>(), threadFactory);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(vol, executor);
// This can reduce the number of running threads
executor.allowCoreThreadTimeOut(true);
executors.put(volume, executor);
}
/**
* Starts AsyncDiskService for a new volume
* @param volume the root of the new data volume.
*/
synchronized void addVolume(File volume) {
if (executors == null) {
throw new RuntimeException("AsyncDiskService is already shutdown");
}
ThreadPoolExecutor executor = executors.get(volume);
if (executor != null) {
throw new RuntimeException("Volume " + volume + " is already existed.");
}
addExecutorForVolume(volume);
}
synchronized long countPendingDeletions() {

View File

@ -202,6 +202,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
final Map<String, DatanodeStorage> storageMap;
final FsDatasetAsyncDiskService asyncDiskService;
final FsDatasetCache cacheManager;
private final Configuration conf;
private final int validVolsRequired;
final ReplicaMap volumeMap;
@ -216,6 +217,7 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
) throws IOException {
this.datanode = datanode;
this.dataStorage = storage;
this.conf = conf;
// The number of volumes required for operation is the total number
// of volumes minus the number of failed volumes we can tolerate.
final int volFailuresTolerated =
@ -242,38 +244,76 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
}
storageMap = new HashMap<String, DatanodeStorage>();
final List<FsVolumeImpl> volArray = new ArrayList<FsVolumeImpl>(
storage.getNumStorageDirs());
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = storage.getStorageDir(idx);
final File dir = sd.getCurrentDir();
final StorageType storageType = getStorageTypeFromLocations(dataLocations, sd.getRoot());
volArray.add(new FsVolumeImpl(this, sd.getStorageUuid(), dir, conf,
storageType));
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(), DatanodeStorage.State.NORMAL, storageType));
}
volumeMap = new ReplicaMap(this);
@SuppressWarnings("unchecked")
final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
ReflectionUtils.newInstance(conf.getClass(
DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
RoundRobinVolumeChoosingPolicy.class,
VolumeChoosingPolicy.class), conf);
volumes = new FsVolumeList(volArray, volsFailed, blockChooserImpl);
volumes.initializeReplicaMaps(volumeMap);
volumes = new FsVolumeList(volsFailed, blockChooserImpl);
asyncDiskService = new FsDatasetAsyncDiskService(datanode);
File[] roots = new File[storage.getNumStorageDirs()];
for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
roots[idx] = storage.getStorageDir(idx).getCurrentDir();
addVolume(dataLocations, storage.getStorageDir(idx));
}
asyncDiskService = new FsDatasetAsyncDiskService(datanode, roots);
cacheManager = new FsDatasetCache(this);
registerMBean(datanode.getDatanodeUuid());
}
private void addVolume(Collection<StorageLocation> dataLocations,
Storage.StorageDirectory sd) throws IOException {
final File dir = sd.getCurrentDir();
final StorageType storageType =
getStorageTypeFromLocations(dataLocations, sd.getRoot());
// If IOException raises from FsVolumeImpl() or getVolumeMap(), there is
// nothing needed to be rolled back to make various data structures, e.g.,
// storageMap and asyncDiskService, consistent.
FsVolumeImpl fsVolume = new FsVolumeImpl(
this, sd.getStorageUuid(), dir, this.conf, storageType);
fsVolume.getVolumeMap(volumeMap);
volumes.addVolume(fsVolume);
storageMap.put(sd.getStorageUuid(),
new DatanodeStorage(sd.getStorageUuid(),
DatanodeStorage.State.NORMAL,
storageType));
asyncDiskService.addVolume(sd.getCurrentDir());
LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
/**
* Add an array of StorageLocation to FsDataset.
*
* @pre dataStorage must have these volumes.
* @param volumes
* @throws IOException
*/
@Override
public synchronized void addVolumes(Collection<StorageLocation> volumes)
throws IOException {
final Collection<StorageLocation> dataLocations =
DataNode.getStorageLocations(this.conf);
Map<String, Storage.StorageDirectory> allStorageDirs =
new HashMap<String, Storage.StorageDirectory>();
for (int idx = 0; idx < dataStorage.getNumStorageDirs(); idx++) {
Storage.StorageDirectory sd = dataStorage.getStorageDir(idx);
allStorageDirs.put(sd.getRoot().getAbsolutePath(), sd);
}
for (StorageLocation vol : volumes) {
String key = vol.getFile().getAbsolutePath();
if (!allStorageDirs.containsKey(key)) {
LOG.warn("Attempt to add an invalid volume: " + vol.getFile());
} else {
addVolume(dataLocations, allStorageDirs.get(key));
}
}
}
private StorageType getStorageTypeFromLocations(
Collection<StorageLocation> dataLocations, File dir) {
for (StorageLocation dataLocation : dataLocations) {

View File

@ -40,9 +40,8 @@ class FsVolumeList {
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
private volatile int numFailedVolumes;
FsVolumeList(List<FsVolumeImpl> volumes, int failedVols,
FsVolumeList(int failedVols,
VolumeChoosingPolicy<FsVolumeImpl> blockChooser) {
this.volumes = Collections.unmodifiableList(volumes);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
@ -101,12 +100,6 @@ long getRemaining() throws IOException {
}
return remaining;
}
void initializeReplicaMaps(ReplicaMap globalReplicaMap) throws IOException {
for (FsVolumeImpl v : volumes) {
v.getVolumeMap(globalReplicaMap);
}
}
void getAllVolumesMap(final String bpid, final ReplicaMap volumeMap) throws IOException {
long totalStartTime = Time.monotonicNow();
@ -205,6 +198,19 @@ public String toString() {
return volumes.toString();
}
/**
* Dynamically add new volumes to the existing volumes that this DN manages.
* @param newVolume the instance of new FsVolumeImpl.
*/
synchronized void addVolume(FsVolumeImpl newVolume) {
// Make a copy of volumes to add new volumes.
final List<FsVolumeImpl> volumeList = volumes == null ?
new ArrayList<FsVolumeImpl>() :
new ArrayList<FsVolumeImpl>(volumes);
volumeList.add(newVolume);
volumes = Collections.unmodifiableList(volumeList);
FsDatasetImpl.LOG.info("Added new volume: " + newVolume.toString());
}
void addBlockPool(final String bpid, final Configuration conf) throws IOException {
long totalStartTime = Time.monotonicNow();

View File

@ -6091,7 +6091,6 @@ void shutdown() {
blockManager.shutdown();
}
}
@Override // FSNamesystemMBean
public int getNumLiveDataNodes() {
@ -6138,6 +6137,15 @@ public int getNumStaleDataNodes() {
return getBlockManager().getDatanodeManager().getNumStaleNodes();
}
/**
* Storages are marked as "content stale" after NN restart or fails over and
* before NN receives the first Heartbeat followed by the first Blockreport.
*/
@Override // FSNamesystemMBean
public int getNumStaleStorages() {
return getBlockManager().getDatanodeManager().getNumStaleStorages();
}
/**
* Sets the current generation stamp for legacy blocks
*/

View File

@ -151,4 +151,11 @@ public interface FSNamesystemMBean {
* @return number of blocks pending deletion
*/
long getPendingDeletionBlocks();
/**
* Number of content stale storages.
* @return number of content stale storages
*/
public int getNumStaleStorages();
}

View File

@ -22,6 +22,9 @@
/**
* A BlockCommand is an instruction to a datanode to register with the namenode.
* This command can't be combined with other commands in the same response.
* This is because after the datanode processes RegisterCommand, it will skip
* the rest of the DatanodeCommands in the same HeartbeatResponse.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving

View File

@ -37,6 +37,10 @@ ELSE (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
ENDIF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
IF(FUSE_FOUND)
add_library(posix_util
../util/posix_util.c
)
add_executable(fuse_dfs
fuse_dfs.c
fuse_options.c

View File

@ -0,0 +1,271 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "common/htable.h"
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
struct htable_pair {
void *key;
void *val;
};
/**
* A hash table which uses linear probing.
*/
struct htable {
uint32_t capacity;
uint32_t used;
htable_hash_fn_t hash_fun;
htable_eq_fn_t eq_fun;
struct htable_pair *elem;
};
/**
* An internal function for inserting a value into the hash table.
*
* Note: this function assumes that you have made enough space in the table.
*
* @param nelem The new element to insert.
* @param capacity The capacity of the hash table.
* @param hash_fun The hash function to use.
* @param key The key to insert.
* @param val The value to insert.
*/
static void htable_insert_internal(struct htable_pair *nelem,
uint32_t capacity, htable_hash_fn_t hash_fun, void *key,
void *val)
{
uint32_t i;
i = hash_fun(key, capacity);
while (1) {
if (!nelem[i].key) {
nelem[i].key = key;
nelem[i].val = val;
return;
}
i++;
if (i == capacity) {
i = 0;
}
}
}
static int htable_realloc(struct htable *htable, uint32_t new_capacity)
{
struct htable_pair *nelem;
uint32_t i, old_capacity = htable->capacity;
htable_hash_fn_t hash_fun = htable->hash_fun;
nelem = calloc(new_capacity, sizeof(struct htable_pair));
if (!nelem) {
return ENOMEM;
}
for (i = 0; i < old_capacity; i++) {
struct htable_pair *pair = htable->elem + i;
htable_insert_internal(nelem, new_capacity, hash_fun,
pair->key, pair->val);
}
free(htable->elem);
htable->elem = nelem;
htable->capacity = new_capacity;
return 0;
}
struct htable *htable_alloc(uint32_t size,
htable_hash_fn_t hash_fun, htable_eq_fn_t eq_fun)
{
struct htable *htable;
htable = calloc(1, sizeof(*htable));
if (!htable) {
return NULL;
}
size = (size + 1) >> 1;
size = size << 1;
if (size < HTABLE_MIN_SIZE) {
size = HTABLE_MIN_SIZE;
}
htable->hash_fun = hash_fun;
htable->eq_fun = eq_fun;
htable->used = 0;
if (htable_realloc(htable, size)) {
free(htable);
return NULL;
}
return htable;
}
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx)
{
uint32_t i;
for (i = 0; i != htable->capacity; ++i) {
struct htable_pair *elem = htable->elem + i;
if (elem->key) {
fun(ctx, elem->key, elem->val);
}
}
}
void htable_free(struct htable *htable)
{
if (htable) {
free(htable->elem);
free(htable);
}
}
int htable_put(struct htable *htable, void *key, void *val)
{
int ret;
uint32_t nused;
// NULL is not a valid key value.
// This helps us implement htable_get_internal efficiently, since we know
// that we can stop when we encounter the first NULL key.
if (!key) {
return EINVAL;
}
// NULL is not a valid value. Otherwise the results of htable_get would
// be confusing (does a NULL return mean entry not found, or that the
// entry was found and was NULL?)
if (!val) {
return EINVAL;
}
// Re-hash if we have used more than half of the hash table
nused = htable->used + 1;
if (nused >= (htable->capacity / 2)) {
ret = htable_realloc(htable, htable->capacity * 2);
if (ret)
return ret;
}
htable_insert_internal(htable->elem, htable->capacity,
htable->hash_fun, key, val);
htable->used++;
return 0;
}
static int htable_get_internal(const struct htable *htable,
const void *key, uint32_t *out)
{
uint32_t start_idx, idx;
start_idx = htable->hash_fun(key, htable->capacity);
idx = start_idx;
while (1) {
struct htable_pair *pair = htable->elem + idx;
if (!pair->key) {
// We always maintain the invariant that the entries corresponding
// to a given key are stored in a contiguous block, not separated
// by any NULLs. So if we encounter a NULL, our search is over.
return ENOENT;
} else if (htable->eq_fun(pair->key, key)) {
*out = idx;
return 0;
}
idx++;
if (idx == htable->capacity) {
idx = 0;
}
if (idx == start_idx) {
return ENOENT;
}
}
}
void *htable_get(const struct htable *htable, const void *key)
{
uint32_t idx;
if (htable_get_internal(htable, key, &idx)) {
return NULL;
}
return htable->elem[idx].val;
}
void htable_pop(struct htable *htable, const void *key,
void **found_key, void **found_val)
{
uint32_t hole, i;
const void *nkey;
if (htable_get_internal(htable, key, &hole)) {
*found_key = NULL;
*found_val = NULL;
return;
}
i = hole;
htable->used--;
// We need to maintain the compactness invariant used in
// htable_get_internal. This invariant specifies that the entries for any
// given key are never separated by NULLs (although they may be separated
// by entries for other keys.)
while (1) {
i++;
if (i == htable->capacity) {
i = 0;
}
nkey = htable->elem[i].key;
if (!nkey) {
*found_key = htable->elem[hole].key;
*found_val = htable->elem[hole].val;
htable->elem[hole].key = NULL;
htable->elem[hole].val = NULL;
return;
} else if (htable->eq_fun(key, nkey)) {
htable->elem[hole].key = htable->elem[i].key;
htable->elem[hole].val = htable->elem[i].val;
hole = i;
}
}
}
uint32_t htable_used(const struct htable *htable)
{
return htable->used;
}
uint32_t htable_capacity(const struct htable *htable)
{
return htable->capacity;
}
uint32_t ht_hash_string(const void *str, uint32_t max)
{
const char *s = str;
uint32_t hash = 0;
while (*s) {
hash = (hash * 31) + *s;
s++;
}
return hash % max;
}
int ht_compare_string(const void *a, const void *b)
{
return strcmp(a, b) == 0;
}
// vim: ts=4:sw=4:tw=79:et

View File

@ -0,0 +1,161 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef HADOOP_CORE_COMMON_HASH_TABLE
#define HADOOP_CORE_COMMON_HASH_TABLE
#include <inttypes.h>
#include <stdio.h>
#include <stdint.h>
#define HTABLE_MIN_SIZE 4
struct htable;
/**
* An HTable hash function.
*
* @param key The key.
* @param capacity The total capacity.
*
* @return The hash slot. Must be less than the capacity.
*/
typedef uint32_t (*htable_hash_fn_t)(const void *key, uint32_t capacity);
/**
* An HTable equality function. Compares two keys.
*
* @param a First key.
* @param b Second key.
*
* @return nonzero if the keys are equal.
*/
typedef int (*htable_eq_fn_t)(const void *a, const void *b);
/**
* Allocate a new hash table.
*
* @param capacity The minimum suggested starting capacity.
* @param hash_fun The hash function to use in this hash table.
* @param eq_fun The equals function to use in this hash table.
*
* @return The new hash table on success; NULL on OOM.
*/
struct htable *htable_alloc(uint32_t capacity, htable_hash_fn_t hash_fun,
htable_eq_fn_t eq_fun);
typedef void (*visitor_fn_t)(void *ctx, void *key, void *val);
/**
* Visit all of the entries in the hash table.
*
* @param htable The hash table.
* @param fun The callback function to invoke on each key and value.
* @param ctx Context pointer to pass to the callback.
*/
void htable_visit(struct htable *htable, visitor_fn_t fun, void *ctx);
/**
* Free the hash table.
*
* It is up the calling code to ensure that the keys and values inside the
* table are de-allocated, if that is necessary.
*
* @param htable The hash table.
*/
void htable_free(struct htable *htable);
/**
* Add an entry to the hash table.
*
* @param htable The hash table.
* @param key The key to add. This cannot be NULL.
* @param fun The value to add. This cannot be NULL.
*
* @return 0 on success;
* EEXIST if the value already exists in the table;
* ENOMEM if there is not enough memory to add the element.
* EFBIG if the hash table has too many entries to fit in 32
* bits.
*/
int htable_put(struct htable *htable, void *key, void *val);
/**
* Get an entry from the hash table.
*
* @param htable The hash table.
* @param key The key to find.
*
* @return NULL if there is no such entry; the entry otherwise.
*/
void *htable_get(const struct htable *htable, const void *key);
/**
* Get an entry from the hash table and remove it.
*
* @param htable The hash table.
* @param key The key for the entry find and remove.
* @param found_key (out param) NULL if the entry was not found; the found key
* otherwise.
* @param found_val (out param) NULL if the entry was not found; the found
* value otherwise.
*/
void htable_pop(struct htable *htable, const void *key,
void **found_key, void **found_val);
/**
* Get the number of entries used in the hash table.
*
* @param htable The hash table.
*
* @return The number of entries used in the hash table.
*/
uint32_t htable_used(const struct htable *htable);
/**
* Get the capacity of the hash table.
*
* @param htable The hash table.
*
* @return The capacity of the hash table.
*/
uint32_t htable_capacity(const struct htable *htable);
/**
* Hash a string.
*
* @param str The string.
* @param max Maximum hash value
*
* @return A number less than max.
*/
uint32_t ht_hash_string(const void *str, uint32_t max);
/**
* Compare two strings.
*
* @param a The first string.
* @param b The second string.
*
* @return 1 if the strings are identical; 0 otherwise.
*/
int ht_compare_string(const void *a, const void *b);
#endif
// vim: ts=4:sw=4:tw=79:et

View File

@ -19,8 +19,8 @@
#include "exception.h"
#include "hdfs.h"
#include "jni_helper.h"
#include "platform.h"
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -35,54 +35,54 @@ struct ExceptionInfo {
static const struct ExceptionInfo gExceptionInfo[] = {
{
.name = "java.io.FileNotFoundException",
.noPrintFlag = NOPRINT_EXC_FILE_NOT_FOUND,
.excErrno = ENOENT,
"java.io.FileNotFoundException",
NOPRINT_EXC_FILE_NOT_FOUND,
ENOENT,
},
{
.name = "org.apache.hadoop.security.AccessControlException",
.noPrintFlag = NOPRINT_EXC_ACCESS_CONTROL,
.excErrno = EACCES,
"org.apache.hadoop.security.AccessControlException",
NOPRINT_EXC_ACCESS_CONTROL,
EACCES,
},
{
.name = "org.apache.hadoop.fs.UnresolvedLinkException",
.noPrintFlag = NOPRINT_EXC_UNRESOLVED_LINK,
.excErrno = ENOLINK,
"org.apache.hadoop.fs.UnresolvedLinkException",
NOPRINT_EXC_UNRESOLVED_LINK,
ENOLINK,
},
{
.name = "org.apache.hadoop.fs.ParentNotDirectoryException",
.noPrintFlag = NOPRINT_EXC_PARENT_NOT_DIRECTORY,
.excErrno = ENOTDIR,
"org.apache.hadoop.fs.ParentNotDirectoryException",
NOPRINT_EXC_PARENT_NOT_DIRECTORY,
ENOTDIR,
},
{
.name = "java.lang.IllegalArgumentException",
.noPrintFlag = NOPRINT_EXC_ILLEGAL_ARGUMENT,
.excErrno = EINVAL,
"java.lang.IllegalArgumentException",
NOPRINT_EXC_ILLEGAL_ARGUMENT,
EINVAL,
},
{
.name = "java.lang.OutOfMemoryError",
.noPrintFlag = 0,
.excErrno = ENOMEM,
"java.lang.OutOfMemoryError",
0,
ENOMEM,
},
{
.name = "org.apache.hadoop.hdfs.server.namenode.SafeModeException",
.noPrintFlag = 0,
.excErrno = EROFS,
"org.apache.hadoop.hdfs.server.namenode.SafeModeException",
0,
EROFS,
},
{
.name = "org.apache.hadoop.fs.FileAlreadyExistsException",
.noPrintFlag = 0,
.excErrno = EEXIST,
"org.apache.hadoop.fs.FileAlreadyExistsException",
0,
EEXIST,
},
{
.name = "org.apache.hadoop.hdfs.protocol.QuotaExceededException",
.noPrintFlag = 0,
.excErrno = EDQUOT,
"org.apache.hadoop.hdfs.protocol.QuotaExceededException",
0,
EDQUOT,
},
{
.name = "org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
.noPrintFlag = 0,
.excErrno = ESTALE,
"org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException",
0,
ESTALE,
},
};
@ -113,6 +113,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
jstring jStr = NULL;
jvalue jVal;
jthrowable jthr;
const char *stackTrace;
jthr = classNameOfObject(exc, env, &className);
if (jthr) {
@ -148,7 +149,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
destroyLocalReference(env, jthr);
} else {
jStr = jVal.l;
const char *stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
stackTrace = (*env)->GetStringUTFChars(env, jStr, NULL);
if (!stackTrace) {
fprintf(stderr, "(unable to get stack trace for %s exception: "
"GetStringUTFChars error.)\n", className);

View File

@ -34,13 +34,14 @@
* usually not what you want.)
*/
#include "platform.h"
#include <jni.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <search.h>
#include <pthread.h>
#include <errno.h>
/**
@ -109,7 +110,7 @@ int printExceptionAndFreeV(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object.
*/
int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
const char *fmt, ...) __attribute__((format(printf, 4, 5)));
const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(4, 5);
/**
* Print out information about the pending exception and free it.
@ -124,7 +125,7 @@ int printExceptionAndFree(JNIEnv *env, jthrowable exc, int noPrintFlags,
* object.
*/
int printPendingExceptionAndFree(JNIEnv *env, int noPrintFlags,
const char *fmt, ...) __attribute__((format(printf, 3, 4)));
const char *fmt, ...) TYPE_CHECKED_PRINTF_FORMAT(3, 4);
/**
* Get a local reference to the pending exception and clear it.
@ -150,6 +151,7 @@ jthrowable getPendingExceptionAndClear(JNIEnv *env);
* @return A local reference to a RuntimeError
*/
jthrowable newRuntimeError(JNIEnv *env, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
TYPE_CHECKED_PRINTF_FORMAT(2, 3);
#undef TYPE_CHECKED_PRINTF_FORMAT
#endif

View File

@ -49,18 +49,18 @@ int expectFileStats(hdfsFile file,
stats->totalShortCircuitBytesRead,
stats->totalZeroCopyBytesRead);
if (expectedTotalBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
EXPECT_UINT64_EQ(expectedTotalBytesRead, stats->totalBytesRead);
}
if (expectedTotalLocalBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalLocalBytesRead,
EXPECT_UINT64_EQ(expectedTotalLocalBytesRead,
stats->totalLocalBytesRead);
}
if (expectedTotalShortCircuitBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalShortCircuitBytesRead,
EXPECT_UINT64_EQ(expectedTotalShortCircuitBytesRead,
stats->totalShortCircuitBytesRead);
}
if (expectedTotalZeroCopyBytesRead != UINT64_MAX) {
EXPECT_INT64_EQ(expectedTotalZeroCopyBytesRead,
EXPECT_UINT64_EQ(expectedTotalZeroCopyBytesRead,
stats->totalZeroCopyBytesRead);
}
hdfsFileFreeReadStatistics(stats);

View File

@ -126,6 +126,18 @@ struct hdfsFile_internal;
} \
} while (0);
#define EXPECT_UINT64_EQ(x, y) \
do { \
uint64_t __my_ret__ = y; \
int __my_errno__ = errno; \
if (__my_ret__ != (x)) { \
fprintf(stderr, "TEST_ERROR: failed on %s:%d with return " \
"value %"PRIu64" (errno: %d): expected %"PRIu64"\n", \
__FILE__, __LINE__, __my_ret__, __my_errno__, (x)); \
return -1; \
} \
} while (0);
#define RETRY_ON_EINTR_GET_ERRNO(ret, expr) do { \
ret = expr; \
if (!ret) \

View File

@ -19,20 +19,18 @@
#include "config.h"
#include "exception.h"
#include "jni_helper.h"
#include "platform.h"
#include "common/htable.h"
#include "os/mutexes.h"
#include "os/thread_local_storage.h"
#include <stdio.h>
#include <string.h>
static pthread_mutex_t hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t jvmMutex = PTHREAD_MUTEX_INITIALIZER;
static volatile int hashTableInited = 0;
#define LOCK_HASH_TABLE() pthread_mutex_lock(&hdfsHashMutex)
#define UNLOCK_HASH_TABLE() pthread_mutex_unlock(&hdfsHashMutex)
static struct htable *gClassRefHTable = NULL;
/** The Native return types that methods could return */
#define VOID 'V'
#define JVOID 'V'
#define JOBJECT 'L'
#define JARRAYOBJECT '['
#define JBOOLEAN 'Z'
@ -51,40 +49,10 @@ static volatile int hashTableInited = 0;
*/
#define MAX_HASH_TABLE_ELEM 4096
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
/** Pthreads thread-local storage for each library thread. */
struct hdfsTls {
JNIEnv *env;
};
/**
* The function that is called whenever a thread with libhdfs thread local data
* is destroyed.
*
* @param v The thread-local data
* Length of buffer for retrieving created JVMs. (We only ever create one.)
*/
static void hdfsThreadDestructor(void *v)
{
struct hdfsTls *tls = v;
JavaVM *vm;
JNIEnv *env = tls->env;
jint ret;
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with "
"error %d\n", ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
free(tls);
}
#define VM_BUF_LENGTH 1
void destroyLocalReference(JNIEnv *env, jobject jObject)
{
@ -138,67 +106,6 @@ jthrowable newCStr(JNIEnv *env, jstring jstr, char **out)
return NULL;
}
static int hashTableInit(void)
{
if (!hashTableInited) {
LOCK_HASH_TABLE();
if (!hashTableInited) {
if (hcreate(MAX_HASH_TABLE_ELEM) == 0) {
fprintf(stderr, "error creating hashtable, <%d>: %s\n",
errno, strerror(errno));
UNLOCK_HASH_TABLE();
return 0;
}
hashTableInited = 1;
}
UNLOCK_HASH_TABLE();
}
return 1;
}
static int insertEntryIntoTable(const char *key, void *data)
{
ENTRY e, *ep;
if (key == NULL || data == NULL) {
return 0;
}
if (! hashTableInit()) {
return -1;
}
e.data = data;
e.key = (char*)key;
LOCK_HASH_TABLE();
ep = hsearch(e, ENTER);
UNLOCK_HASH_TABLE();
if (ep == NULL) {
fprintf(stderr, "warn adding key (%s) to hash table, <%d>: %s\n",
key, errno, strerror(errno));
}
return 0;
}
static void* searchEntryFromTable(const char *key)
{
ENTRY e,*ep;
if (key == NULL) {
return NULL;
}
hashTableInit();
e.key = (char*)key;
LOCK_HASH_TABLE();
ep = hsearch(e, FIND);
UNLOCK_HASH_TABLE();
if (ep != NULL) {
return ep->data;
}
return NULL;
}
jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
jobject instObj, const char *className,
const char *methName, const char *methSignature, ...)
@ -235,7 +142,7 @@ jthrowable invokeMethod(JNIEnv *env, jvalue *retval, MethType methType,
}
retval->l = jobj;
}
else if (returnType == VOID) {
else if (returnType == JVOID) {
if (methType == STATIC) {
(*env)->CallStaticVoidMethodV(env, cls, mid, args);
}
@ -325,11 +232,11 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
{
jclass cls;
jthrowable jthr;
jmethodID mid = 0;
jthr = globalClassReference(className, env, &cls);
if (jthr)
return jthr;
jmethodID mid = 0;
jthr = validateMethodType(env, methType);
if (jthr)
return jthr;
@ -350,25 +257,50 @@ jthrowable methodIdFromClass(const char *className, const char *methName,
jthrowable globalClassReference(const char *className, JNIEnv *env, jclass *out)
{
jclass clsLocalRef;
jclass cls = searchEntryFromTable(className);
if (cls) {
*out = cls;
return NULL;
jthrowable jthr = NULL;
jclass local_clazz = NULL;
jclass clazz = NULL;
int ret;
mutexLock(&hdfsHashMutex);
if (!gClassRefHTable) {
gClassRefHTable = htable_alloc(MAX_HASH_TABLE_ELEM, ht_hash_string,
ht_compare_string);
if (!gClassRefHTable) {
jthr = newRuntimeError(env, "htable_alloc failed\n");
goto done;
}
}
clsLocalRef = (*env)->FindClass(env,className);
if (clsLocalRef == NULL) {
return getPendingExceptionAndClear(env);
clazz = htable_get(gClassRefHTable, className);
if (clazz) {
*out = clazz;
goto done;
}
cls = (*env)->NewGlobalRef(env, clsLocalRef);
if (cls == NULL) {
(*env)->DeleteLocalRef(env, clsLocalRef);
return getPendingExceptionAndClear(env);
local_clazz = (*env)->FindClass(env,className);
if (!local_clazz) {
jthr = getPendingExceptionAndClear(env);
goto done;
}
(*env)->DeleteLocalRef(env, clsLocalRef);
insertEntryIntoTable(className, cls);
*out = cls;
return NULL;
clazz = (*env)->NewGlobalRef(env, local_clazz);
if (!clazz) {
jthr = getPendingExceptionAndClear(env);
goto done;
}
ret = htable_put(gClassRefHTable, (void*)className, clazz);
if (ret) {
jthr = newRuntimeError(env, "htable_put failed with error "
"code %d\n", ret);
goto done;
}
*out = clazz;
jthr = NULL;
done:
mutexUnlock(&hdfsHashMutex);
(*env)->DeleteLocalRef(env, local_clazz);
if (jthr && clazz) {
(*env)->DeleteGlobalRef(env, clazz);
}
return jthr;
}
jthrowable classNameOfObject(jobject jobj, JNIEnv *env, char **name)
@ -436,14 +368,24 @@ done:
*/
static JNIEnv* getGlobalJNIEnv(void)
{
const jsize vmBufLength = 1;
JavaVM* vmBuf[vmBufLength];
JavaVM* vmBuf[VM_BUF_LENGTH];
JNIEnv *env;
jint rv = 0;
jint noVMs = 0;
jthrowable jthr;
char *hadoopClassPath;
const char *hadoopClassPathVMArg = "-Djava.class.path=";
size_t optHadoopClassPathLen;
char *optHadoopClassPath;
int noArgs = 1;
char *hadoopJvmArgs;
char jvmArgDelims[] = " ";
char *str, *token, *savePtr;
JavaVMInitArgs vm_args;
JavaVM *vm;
JavaVMOption *options;
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), VM_BUF_LENGTH, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
return NULL;
@ -451,23 +393,19 @@ static JNIEnv* getGlobalJNIEnv(void)
if (noVMs == 0) {
//Get the environment variables for initializing the JVM
char *hadoopClassPath = getenv("CLASSPATH");
hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
return NULL;
}
char *hadoopClassPathVMArg = "-Djava.class.path=";
size_t optHadoopClassPathLen = strlen(hadoopClassPath) +
optHadoopClassPathLen = strlen(hadoopClassPath) +
strlen(hadoopClassPathVMArg) + 1;
char *optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
optHadoopClassPath = malloc(sizeof(char)*optHadoopClassPathLen);
snprintf(optHadoopClassPath, optHadoopClassPathLen,
"%s%s", hadoopClassPathVMArg, hadoopClassPath);
// Determine the # of LIBHDFS_OPTS args
int noArgs = 1;
char *hadoopJvmArgs = getenv("LIBHDFS_OPTS");
char jvmArgDelims[] = " ";
char *str, *token, *savePtr;
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
hadoopJvmArgs = strdup(hadoopJvmArgs);
for (noArgs = 1, str = hadoopJvmArgs; ; noArgs++, str = NULL) {
@ -480,7 +418,12 @@ static JNIEnv* getGlobalJNIEnv(void)
}
// Now that we know the # args, populate the options array
JavaVMOption options[noArgs];
options = calloc(noArgs, sizeof(JavaVMOption));
if (!options) {
fputs("Call to calloc failed\n", stderr);
free(optHadoopClassPath);
return NULL;
}
options[0].optionString = optHadoopClassPath;
hadoopJvmArgs = getenv("LIBHDFS_OPTS");
if (hadoopJvmArgs != NULL) {
@ -495,8 +438,6 @@ static JNIEnv* getGlobalJNIEnv(void)
}
//Create the VM
JavaVMInitArgs vm_args;
JavaVM *vm;
vm_args.version = JNI_VERSION_1_2;
vm_args.options = options;
vm_args.nOptions = noArgs;
@ -508,6 +449,7 @@ static JNIEnv* getGlobalJNIEnv(void)
free(hadoopJvmArgs);
}
free(optHadoopClassPath);
free(options);
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
@ -523,7 +465,7 @@ static JNIEnv* getGlobalJNIEnv(void)
}
else {
//Attach this thread to the VM
JavaVM* vm = vmBuf[0];
vm = vmBuf[0];
rv = (*vm)->AttachCurrentThread(vm, (void*)&env, 0);
if (rv != 0) {
fprintf(stderr, "Call to AttachCurrentThread "
@ -557,54 +499,27 @@ static JNIEnv* getGlobalJNIEnv(void)
JNIEnv* getJNIEnv(void)
{
JNIEnv *env;
struct hdfsTls *tls;
int ret;
#ifdef HAVE_BETTER_TLS
static __thread struct hdfsTls *quickTls = NULL;
if (quickTls)
return quickTls->env;
#endif
pthread_mutex_lock(&jvmMutex);
if (!gTlsKeyInitialized) {
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
if (ret) {
pthread_mutex_unlock(&jvmMutex);
fprintf(stderr, "getJNIEnv: pthread_key_create failed with "
"error %d\n", ret);
return NULL;
}
gTlsKeyInitialized = 1;
THREAD_LOCAL_STORAGE_GET_QUICK();
mutexLock(&jvmMutex);
if (threadLocalStorageGet(&env)) {
mutexUnlock(&jvmMutex);
return NULL;
}
tls = pthread_getspecific(gTlsKey);
if (tls) {
pthread_mutex_unlock(&jvmMutex);
return tls->env;
if (env) {
mutexUnlock(&jvmMutex);
return env;
}
env = getGlobalJNIEnv();
pthread_mutex_unlock(&jvmMutex);
mutexUnlock(&jvmMutex);
if (!env) {
fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
return NULL;
fprintf(stderr, "getJNIEnv: getGlobalJNIEnv failed\n");
return NULL;
}
tls = calloc(1, sizeof(struct hdfsTls));
if (!tls) {
fprintf(stderr, "getJNIEnv: OOM allocating %zd bytes\n",
sizeof(struct hdfsTls));
return NULL;
if (threadLocalStorageSet(env)) {
return NULL;
}
tls->env = env;
ret = pthread_setspecific(gTlsKey, tls);
if (ret) {
fprintf(stderr, "getJNIEnv: pthread_setspecific failed with "
"error code %d\n", ret);
hdfsThreadDestructor(tls);
return NULL;
}
#ifdef HAVE_BETTER_TLS
quickTls = tls;
#endif
THREAD_LOCAL_STORAGE_SET_QUICK(env);
return env;
}

View File

@ -24,8 +24,6 @@
#include <stdlib.h>
#include <stdarg.h>
#include <search.h>
#include <pthread.h>
#include <errno.h>
#define PATH_SEPARATOR ':'

View File

@ -21,6 +21,7 @@
#include "hdfs_test.h"
#include "jni_helper.h"
#include "native_mini_dfs.h"
#include "platform.h"
#include <errno.h>
#include <jni.h>
@ -347,10 +348,11 @@ error_dlr_nn:
int nmdConfigureHdfsBuilder(struct NativeMiniDfsCluster *cl,
struct hdfsBuilder *bld)
{
int port, ret;
int ret;
tPort port;
hdfsBuilderSetNameNode(bld, "localhost");
port = nmdGetNameNodePort(cl);
port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) {
fprintf(stderr, "nmdGetNameNodePort failed with error %d\n", -port);
return EIO;

View File

@ -0,0 +1,55 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_MUTEXES_H
#define LIBHDFS_MUTEXES_H
/*
* Defines abstraction over platform-specific mutexes. libhdfs has no formal
* initialization function that users would call from a single-threaded context
* to initialize the library. This creates a challenge for bootstrapping the
* mutexes. To address this, all required mutexes are pre-defined here with
* external storage. Platform-specific implementations must guarantee that the
* mutexes are initialized via static initialization.
*/
#include "platform.h"
/** Mutex protecting the class reference hash table. */
extern mutex hdfsHashMutex;
/** Mutex protecting singleton JVM instance. */
extern mutex jvmMutex;
/**
* Locks a mutex.
*
* @param m mutex
* @return 0 if successful, non-zero otherwise
*/
int mutexLock(mutex *m);
/**
* Unlocks a mutex.
*
* @param m mutex
* @return 0 if successful, non-zero otherwise
*/
int mutexUnlock(mutex *m);
#endif

View File

@ -0,0 +1,43 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/mutexes.h"
#include <pthread.h>
#include <stdio.h>
mutex hdfsHashMutex = PTHREAD_MUTEX_INITIALIZER;
mutex jvmMutex = PTHREAD_MUTEX_INITIALIZER;
int mutexLock(mutex *m) {
int ret = pthread_mutex_lock(m);
if (ret) {
fprintf(stderr, "mutexLock: pthread_mutex_lock failed with error %d\n",
ret);
}
return ret;
}
int mutexUnlock(mutex *m) {
int ret = pthread_mutex_unlock(m);
if (ret) {
fprintf(stderr, "mutexUnlock: pthread_mutex_unlock failed with error %d\n",
ret);
}
return ret;
}

View File

@ -0,0 +1,34 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_PLATFORM_H
#define LIBHDFS_PLATFORM_H
#include <pthread.h>
/* Use gcc type-checked format arguments. */
#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs) \
__attribute__((format(printf, formatArg, varArgs)))
/*
* Mutex and thread data types defined by pthreads.
*/
typedef pthread_mutex_t mutex;
typedef pthread_t threadId;
#endif

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread.h"
#include <pthread.h>
#include <stdio.h>
/**
* Defines a helper function that adapts function pointer provided by caller to
* the type required by pthread_create.
*
* @param toRun thread to run
* @return void* result of running thread (always NULL)
*/
static void* runThread(void *toRun) {
const thread *t = toRun;
t->start(t->arg);
return NULL;
}
int threadCreate(thread *t) {
int ret;
ret = pthread_create(&t->id, NULL, runThread, t);
if (ret) {
fprintf(stderr, "threadCreate: pthread_create failed with error %d\n", ret);
}
return ret;
}
int threadJoin(const thread *t) {
int ret = pthread_join(t->id, NULL);
if (ret) {
fprintf(stderr, "threadJoin: pthread_join failed with error %d\n", ret);
}
return ret;
}

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread_local_storage.h"
#include <jni.h>
#include <pthread.h>
#include <stdio.h>
/** Key that allows us to retrieve thread-local storage */
static pthread_key_t gTlsKey;
/** nonzero if we succeeded in initializing gTlsKey. Protected by the jvmMutex */
static int gTlsKeyInitialized = 0;
/**
* The function that is called whenever a thread with libhdfs thread local data
* is destroyed.
*
* @param v The thread-local data
*/
static void hdfsThreadDestructor(void *v)
{
JavaVM *vm;
JNIEnv *env = v;
jint ret;
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr, "hdfsThreadDestructor: GetJavaVM failed with error %d\n",
ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
}
int threadLocalStorageGet(JNIEnv **env)
{
int ret = 0;
if (!gTlsKeyInitialized) {
ret = pthread_key_create(&gTlsKey, hdfsThreadDestructor);
if (ret) {
fprintf(stderr,
"threadLocalStorageGet: pthread_key_create failed with error %d\n",
ret);
return ret;
}
gTlsKeyInitialized = 1;
}
*env = pthread_getspecific(gTlsKey);
return ret;
}
int threadLocalStorageSet(JNIEnv *env)
{
int ret = pthread_setspecific(gTlsKey, env);
if (ret) {
fprintf(stderr,
"threadLocalStorageSet: pthread_setspecific failed with error %d\n",
ret);
hdfsThreadDestructor(env);
}
return ret;
}

View File

@ -0,0 +1,54 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_THREAD_H
#define LIBHDFS_THREAD_H
/*
* Defines abstraction over platform-specific threads.
*/
#include "platform.h"
/** Pointer to function to run in thread. */
typedef void (*threadProcedure)(void *);
/** Structure containing a thread's ID, starting address and argument. */
typedef struct {
threadId id;
threadProcedure start;
void *arg;
} thread;
/**
* Creates and immediately starts a new thread.
*
* @param t thread to create
* @return 0 if successful, non-zero otherwise
*/
int threadCreate(thread *t);
/**
* Joins to the given thread, blocking if necessary.
*
* @param t thread to join
* @return 0 if successful, non-zero otherwise
*/
int threadJoin(const thread *t);
#endif

View File

@ -0,0 +1,75 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_THREAD_LOCAL_STORAGE_H
#define LIBHDFS_THREAD_LOCAL_STORAGE_H
/*
* Defines abstraction over platform-specific thread-local storage. libhdfs
* currently only needs thread-local storage for a single piece of data: the
* thread's JNIEnv. For simplicity, this interface is defined in terms of
* JNIEnv, not general-purpose thread-local storage of any arbitrary data.
*/
#include <jni.h>
/*
* Most operating systems support the more efficient __thread construct, which
* is initialized by the linker. The following macros use this technique on the
* operating systems that support it.
*/
#ifdef HAVE_BETTER_TLS
#define THREAD_LOCAL_STORAGE_GET_QUICK() \
static __thread JNIEnv *quickTlsEnv = NULL; \
{ \
if (quickTlsEnv) { \
return quickTlsEnv; \
} \
}
#define THREAD_LOCAL_STORAGE_SET_QUICK(env) \
{ \
quickTlsEnv = (env); \
}
#else
#define THREAD_LOCAL_STORAGE_GET_QUICK()
#define THREAD_LOCAL_STORAGE_SET_QUICK(env)
#endif
/**
* Gets the JNIEnv in thread-local storage for the current thread. If the call
* succeeds, and there is a JNIEnv associated with this thread, then returns 0
* and populates env. If the call succeeds, but there is no JNIEnv associated
* with this thread, then returns 0 and sets JNIEnv to NULL. If the call fails,
* then returns non-zero. Only one thread at a time may execute this function.
* The caller is responsible for enforcing mutual exclusion.
*
* @param env JNIEnv out parameter
* @return 0 if successful, non-zero otherwise
*/
int threadLocalStorageGet(JNIEnv **env);
/**
* Sets the JNIEnv in thread-local storage for the current thread.
*
* @param env JNIEnv to set
* @return 0 if successful, non-zero otherwise
*/
int threadLocalStorageSet(JNIEnv *env);
#endif

View File

@ -0,0 +1,28 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_INTTYPES_H
#define LIBHDFS_INTTYPES_H
/* On Windows, inttypes.h does not exist, so manually define what we need. */
#define PRId64 "I64d"
#define PRIu64 "I64u"
typedef unsigned __int64 uint64_t;
#endif

View File

@ -0,0 +1,52 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/mutexes.h"
#include <windows.h>
mutex hdfsHashMutex;
mutex jvmMutex;
/**
* Unfortunately, there is no simple static initializer for a critical section.
* Instead, the API requires calling InitializeCriticalSection. Since libhdfs
* lacks an explicit initialization function, there is no obvious existing place
* for the InitializeCriticalSection calls. To work around this, we define an
* initialization function and instruct the linker to set a pointer to that
* function as a user-defined global initializer. See discussion of CRT
* Initialization:
* http://msdn.microsoft.com/en-us/library/bb918180.aspx
*/
static void __cdecl initializeMutexes(void) {
InitializeCriticalSection(&hdfsHashMutex);
InitializeCriticalSection(&jvmMutex);
}
#pragma section(".CRT$XCU", read)
__declspec(allocate(".CRT$XCU"))
const void (__cdecl *pInitialize)(void) = initializeMutexes;
int mutexLock(mutex *m) {
EnterCriticalSection(m);
return 0;
}
int mutexUnlock(mutex *m) {
LeaveCriticalSection(m);
return 0;
}

View File

@ -0,0 +1,86 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_PLATFORM_H
#define LIBHDFS_PLATFORM_H
#include <stdio.h>
#include <windows.h>
#include <winsock.h>
/*
* O_ACCMODE defined to match Linux definition.
*/
#ifndef O_ACCMODE
#define O_ACCMODE 0x0003
#endif
/*
* Windows has a different name for its maximum path length constant.
*/
#ifndef PATH_MAX
#define PATH_MAX MAX_PATH
#endif
/*
* Windows does not define EDQUOT and ESTALE in errno.h. The closest equivalents
* are these constants from winsock.h.
*/
#ifndef EDQUOT
#define EDQUOT WSAEDQUOT
#endif
#ifndef ESTALE
#define ESTALE WSAESTALE
#endif
/*
* gcc-style type-checked format arguments are not supported on Windows, so just
* stub this macro.
*/
#define TYPE_CHECKED_PRINTF_FORMAT(formatArg, varArgs)
/*
* Define macros for various string formatting functions not defined on Windows.
* Where possible, we reroute to one of the secure CRT variants. On Windows,
* the preprocessor does support variadic macros, even though they weren't
* defined until C99.
*/
#define snprintf(str, size, format, ...) \
_snprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
#define strncpy(dest, src, n) \
strncpy_s((dest), (n), (src), _TRUNCATE)
#define strtok_r(str, delim, saveptr) \
strtok_s((str), (delim), (saveptr))
#define vsnprintf(str, size, format, ...) \
vsnprintf_s((str), (size), _TRUNCATE, (format), __VA_ARGS__)
/*
* Mutex data type defined as Windows CRITICAL_SECTION. A critical section (not
* Windows mutex) is used, because libhdfs only needs synchronization of multiple
* threads within a single process, not synchronization across process
* boundaries.
*/
typedef CRITICAL_SECTION mutex;
/*
* Thread data type defined as HANDLE to a Windows thread.
*/
typedef HANDLE threadId;
#endif

View File

@ -0,0 +1,66 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread.h"
#include <stdio.h>
#include <windows.h>
/**
* Defines a helper function that adapts function pointer provided by caller to
* the type required by CreateThread.
*
* @param toRun thread to run
* @return DWORD result of running thread (always 0)
*/
static DWORD runThread(LPVOID toRun) {
const thread *t = toRun;
t->start(t->arg);
return 0;
}
int threadCreate(thread *t) {
DWORD ret = 0;
HANDLE h;
h = CreateThread(NULL, 0, runThread, t, 0, NULL);
if (h) {
t->id = h;
} else {
ret = GetLastError();
fprintf(stderr, "threadCreate: CreateThread failed with error %d\n", ret);
}
return ret;
}
int threadJoin(const thread *t) {
DWORD ret = WaitForSingleObject(t->id, INFINITE);
switch (ret) {
case WAIT_OBJECT_0:
break;
case WAIT_FAILED:
ret = GetLastError();
fprintf(stderr, "threadJoin: WaitForSingleObject failed with error %d\n",
ret);
break;
default:
fprintf(stderr, "threadJoin: WaitForSingleObject unexpected error %d\n",
ret);
break;
}
return ret;
}

View File

@ -0,0 +1,164 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "os/thread_local_storage.h"
#include <jni.h>
#include <stdio.h>
#include <windows.h>
/** Key that allows us to retrieve thread-local storage */
static DWORD gTlsIndex = TLS_OUT_OF_INDEXES;
/**
* If the current thread has a JNIEnv in thread-local storage, then detaches the
* current thread from the JVM.
*/
static void detachCurrentThreadFromJvm()
{
JNIEnv *env = NULL;
JavaVM *vm;
jint ret;
if (threadLocalStorageGet(&env) || !env) {
return;
}
ret = (*env)->GetJavaVM(env, &vm);
if (ret) {
fprintf(stderr,
"detachCurrentThreadFromJvm: GetJavaVM failed with error %d\n",
ret);
(*env)->ExceptionDescribe(env);
} else {
(*vm)->DetachCurrentThread(vm);
}
}
/**
* Unlike pthreads, the Windows API does not seem to provide a convenient way to
* hook a callback onto thread shutdown. However, the Windows portable
* executable format does define a concept of thread-local storage callbacks.
* Here, we define a function and instruct the linker to set a pointer to that
* function in the segment for thread-local storage callbacks. See page 85 of
* Microsoft Portable Executable and Common Object File Format Specification:
* http://msdn.microsoft.com/en-us/gg463119.aspx
* This technique only works for implicit linking (OS loads DLL on demand), not
* for explicit linking (user code calls LoadLibrary directly). This effectively
* means that we have a known limitation: libhdfs may not work correctly if a
* Windows application attempts to use it via explicit linking.
*
* @param h module handle
* @param reason the reason for calling the callback
* @param pv reserved, unused
*/
static void NTAPI tlsCallback(PVOID h, DWORD reason, PVOID pv)
{
DWORD tlsIndex;
switch (reason) {
case DLL_THREAD_DETACH:
detachCurrentThreadFromJvm();
break;
case DLL_PROCESS_DETACH:
detachCurrentThreadFromJvm();
tlsIndex = gTlsIndex;
gTlsIndex = TLS_OUT_OF_INDEXES;
if (!TlsFree(tlsIndex)) {
fprintf(stderr, "tlsCallback: TlsFree failed with error %d\n",
GetLastError());
}
break;
default:
break;
}
}
/*
* A variable named _tls_used contains the TLS directory, which contains a list
* of pointers to callback functions. Normally, the linker won't retain this
* variable unless the executable has implicit thread-local variables, defined
* using the __declspec(thread) extended storage-class modifier. libhdfs
* doesn't use __declspec(thread), and we have no guarantee that the executable
* linked to libhdfs will use __declspec(thread). By forcing the linker to
* reference _tls_used, we guarantee that the binary retains the TLS directory.
* See Microsoft Visual Studio 10.0/VC/crt/src/tlssup.c .
*/
#pragma comment(linker, "/INCLUDE:_tls_used")
/*
* We must retain a pointer to the callback function. Force the linker to keep
* this symbol, even though it appears that nothing in our source code uses it.
*/
#pragma comment(linker, "/INCLUDE:pTlsCallback")
/*
* Define constant pointer to our callback, and tell the linker to pin it into
* the TLS directory so that it receives thread callbacks. Use external linkage
* to protect against the linker discarding the seemingly unused symbol.
*/
#pragma const_seg(".CRT$XLB")
extern const PIMAGE_TLS_CALLBACK pTlsCallback;
const PIMAGE_TLS_CALLBACK pTlsCallback = tlsCallback;
#pragma const_seg()
int threadLocalStorageGet(JNIEnv **env)
{
LPVOID tls;
DWORD ret;
if (TLS_OUT_OF_INDEXES == gTlsIndex) {
gTlsIndex = TlsAlloc();
if (TLS_OUT_OF_INDEXES == gTlsIndex) {
fprintf(stderr,
"threadLocalStorageGet: TlsAlloc failed with error %d\n",
TLS_OUT_OF_INDEXES);
return TLS_OUT_OF_INDEXES;
}
}
tls = TlsGetValue(gTlsIndex);
if (tls) {
*env = tls;
return 0;
} else {
ret = GetLastError();
if (ERROR_SUCCESS == ret) {
/* Thread-local storage contains NULL, because we haven't set it yet. */
*env = NULL;
return 0;
} else {
/*
* The API call failed. According to documentation, TlsGetValue cannot
* fail as long as the index is a valid index from a successful TlsAlloc
* call. This error handling is purely defensive.
*/
fprintf(stderr,
"threadLocalStorageGet: TlsGetValue failed with error %d\n", ret);
return ret;
}
}
}
int threadLocalStorageSet(JNIEnv *env)
{
DWORD ret = 0;
if (!TlsSetValue(gTlsIndex, (LPVOID)env)) {
ret = GetLastError();
fprintf(stderr,
"threadLocalStorageSet: TlsSetValue failed with error %d\n",
ret);
detachCurrentThreadFromJvm(env);
}
return ret;
}

View File

@ -0,0 +1,29 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBHDFS_UNISTD_H
#define LIBHDFS_UNISTD_H
/* On Windows, unistd.h does not exist, so manually define what we need. */
#include <process.h> /* Declares getpid(). */
#include <windows.h>
/* Re-route sleep to Sleep, converting units from seconds to milliseconds. */
#define sleep(seconds) Sleep((seconds) * 1000)
#endif

View File

@ -18,6 +18,7 @@
#include "hdfs.h"
#include "hdfs_test.h"
#include "platform.h"
#include <inttypes.h>
#include <jni.h>
@ -28,12 +29,13 @@
#include <unistd.h>
void permission_disp(short permissions, char *rtr) {
rtr[9] = '\0';
int i;
short permissionsId;
char* perm;
rtr[9] = '\0';
for(i=2;i>=0;i--)
{
short permissionsId = permissions >> (i * 3) & (short)7;
char* perm;
permissionsId = permissions >> (i * 3) & (short)7;
switch(permissionsId) {
case 7:
perm = "rwx"; break;
@ -60,35 +62,56 @@ void permission_disp(short permissions, char *rtr) {
}
int main(int argc, char **argv) {
char buffer[32];
tSize num_written_bytes;
const char *writePath = "/tmp/testfile.txt";
const char *fileContents = "Hello, World!";
const char *readPath = "/tmp/testfile.txt";
const char *srcPath = "/tmp/testfile.txt";
const char *dstPath = "/tmp/testfile2.txt";
const char *slashTmp = "/tmp";
const char *newDirectory = "/tmp/newdir";
const char *newOwner = "root";
const char *tuser = "nobody";
const char *appendPath = "/tmp/appends";
const char *userPath = "/tmp/usertestfile.txt";
hdfsFS fs = hdfsConnectNewInstance("default", 0);
char buffer[32], buffer2[256], rdbuffer[32];
tSize num_written_bytes, num_read_bytes;
hdfsFS fs, lfs;
hdfsFile writeFile, readFile, localFile, appendFile, userFile;
tOffset currentPos, seekPos;
int exists, totalResult, result, numEntries, i, j;
const char *resp;
hdfsFileInfo *fileInfo, *fileList, *finfo;
char *buffer3;
char permissions[10];
char ***hosts;
short newPerm = 0666;
tTime newMtime, newAtime;
fs = hdfsConnectNewInstance("default", 0);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
hdfsFS lfs = hdfsConnectNewInstance(NULL, 0);
lfs = hdfsConnectNewInstance(NULL, 0);
if(!lfs) {
fprintf(stderr, "Oops! Failed to connect to 'local' hdfs!\n");
exit(-1);
}
const char* writePath = "/tmp/testfile.txt";
const char* fileContents = "Hello, World!";
{
//Write tests
hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
num_written_bytes =
hdfsWrite(fs, writeFile, (void*)fileContents, strlen(fileContents)+1);
hdfsWrite(fs, writeFile, (void*)fileContents,
(tSize)(strlen(fileContents)+1));
if (num_written_bytes != strlen(fileContents) + 1) {
fprintf(stderr, "Failed to write correct number of bytes - expected %d, got %d\n",
(int)(strlen(fileContents) + 1), (int)num_written_bytes);
@ -96,7 +119,7 @@ int main(int argc, char **argv) {
}
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
tOffset currentPos = -1;
currentPos = -1;
if ((currentPos = hdfsTell(fs, writeFile)) == -1) {
fprintf(stderr,
"Failed to get current file position correctly! Got %ld!\n",
@ -123,15 +146,14 @@ int main(int argc, char **argv) {
{
//Read tests
const char* readPath = "/tmp/testfile.txt";
int exists = hdfsExists(fs, readPath);
exists = hdfsExists(fs, readPath);
if (exists) {
fprintf(stderr, "Failed to validate existence of %s\n", readPath);
exit(-1);
}
hdfsFile readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
readFile = hdfsOpenFile(fs, readPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", readPath);
exit(-1);
@ -146,13 +168,13 @@ int main(int argc, char **argv) {
fprintf(stderr, "hdfsAvailable: %d\n", hdfsAvailable(fs, readFile));
tOffset seekPos = 1;
seekPos = 1;
if(hdfsSeek(fs, readFile, seekPos)) {
fprintf(stderr, "Failed to seek %s for reading!\n", readPath);
exit(-1);
}
tOffset currentPos = -1;
currentPos = -1;
if((currentPos = hdfsTell(fs, readFile)) != seekPos) {
fprintf(stderr,
"Failed to get current file position correctly! Got %ld!\n",
@ -175,7 +197,7 @@ int main(int argc, char **argv) {
exit(-1);
}
memset(buffer, 0, sizeof(buffer));
tSize num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
num_read_bytes = hdfsRead(fs, readFile, (void*)buffer,
sizeof(buffer));
if (strncmp(fileContents, buffer, strlen(fileContents)) != 0) {
fprintf(stderr, "Failed to read (direct). Expected %s but got %s (%d bytes)\n",
@ -208,14 +230,14 @@ int main(int argc, char **argv) {
hdfsCloseFile(fs, readFile);
// Test correct behaviour for unsupported filesystems
hdfsFile localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
localFile = hdfsOpenFile(lfs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!localFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
exit(-1);
}
num_written_bytes = hdfsWrite(lfs, localFile, (void*)fileContents,
strlen(fileContents) + 1);
(tSize)(strlen(fileContents) + 1));
hdfsCloseFile(lfs, localFile);
localFile = hdfsOpenFile(lfs, writePath, O_RDONLY, 0, 0, 0);
@ -229,50 +251,43 @@ int main(int argc, char **argv) {
hdfsCloseFile(lfs, localFile);
}
int totalResult = 0;
int result = 0;
totalResult = 0;
result = 0;
{
//Generic file-system operations
const char* srcPath = "/tmp/testfile.txt";
const char* dstPath = "/tmp/testfile2.txt";
fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsCopy(remote-local): %s\n", ((result = hdfsCopy(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsMove(local-local): %s\n", ((result = hdfsMove(lfs, srcPath, lfs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsMove(remote-local): %s\n", ((result = hdfsMove(fs, srcPath, lfs, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsRename: %s\n", ((result = hdfsRename(fs, dstPath, srcPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsCopy(remote-remote): %s\n", ((result = hdfsCopy(fs, srcPath, fs, dstPath)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
const char* slashTmp = "/tmp";
const char* newDirectory = "/tmp/newdir";
fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsCreateDirectory: %s\n", ((result = hdfsCreateDirectory(fs, newDirectory)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsSetReplication: %s\n", ((result = hdfsSetReplication(fs, srcPath, 2)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
char buffer[256];
const char *resp;
fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
totalResult += (resp ? 0 : 1);
fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsSetWorkingDirectory: %s\n", ((result = hdfsSetWorkingDirectory(fs, slashTmp)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer, sizeof(buffer))) ? buffer : "Failed!"));
fprintf(stderr, "hdfsGetWorkingDirectory: %s\n", ((resp = hdfsGetWorkingDirectory(fs, buffer2, sizeof(buffer2))) != 0 ? buffer2 : "Failed!"));
totalResult += (resp ? 0 : 1);
fprintf(stderr, "hdfsGetDefaultBlockSize: %ld\n", hdfsGetDefaultBlockSize(fs));
fprintf(stderr, "hdfsGetCapacity: %ld\n", hdfsGetCapacity(fs));
fprintf(stderr, "hdfsGetUsed: %ld\n", hdfsGetUsed(fs));
hdfsFileInfo *fileInfo = NULL;
fileInfo = NULL;
if((fileInfo = hdfsGetPathInfo(fs, slashTmp)) != NULL) {
fprintf(stderr, "hdfsGetPathInfo - SUCCESS!\n");
fprintf(stderr, "Name: %s, ", fileInfo->mName);
@ -283,7 +298,6 @@ int main(int argc, char **argv) {
fprintf(stderr, "LastMod: %s", ctime(&fileInfo->mLastMod));
fprintf(stderr, "Owner: %s, ", fileInfo->mOwner);
fprintf(stderr, "Group: %s, ", fileInfo->mGroup);
char permissions[10];
permission_disp(fileInfo->mPermissions, permissions);
fprintf(stderr, "Permissions: %d (%s)\n", fileInfo->mPermissions, permissions);
hdfsFreeFileInfo(fileInfo, 1);
@ -292,10 +306,8 @@ int main(int argc, char **argv) {
fprintf(stderr, "waah! hdfsGetPathInfo for %s - FAILED!\n", slashTmp);
}
hdfsFileInfo *fileList = 0;
int numEntries = 0;
fileList = 0;
if((fileList = hdfsListDirectory(fs, slashTmp, &numEntries)) != NULL) {
int i = 0;
for(i=0; i < numEntries; ++i) {
fprintf(stderr, "Name: %s, ", fileList[i].mName);
fprintf(stderr, "Type: %c, ", (char)fileList[i].mKind);
@ -305,7 +317,6 @@ int main(int argc, char **argv) {
fprintf(stderr, "LastMod: %s", ctime(&fileList[i].mLastMod));
fprintf(stderr, "Owner: %s, ", fileList[i].mOwner);
fprintf(stderr, "Group: %s, ", fileList[i].mGroup);
char permissions[10];
permission_disp(fileList[i].mPermissions, permissions);
fprintf(stderr, "Permissions: %d (%s)\n", fileList[i].mPermissions, permissions);
}
@ -319,12 +330,12 @@ int main(int argc, char **argv) {
}
}
char*** hosts = hdfsGetHosts(fs, srcPath, 0, 1);
hosts = hdfsGetHosts(fs, srcPath, 0, 1);
if(hosts) {
fprintf(stderr, "hdfsGetHosts - SUCCESS! ... \n");
int i=0;
i=0;
while(hosts[i]) {
int j = 0;
j = 0;
while(hosts[i][j]) {
fprintf(stderr,
"\thosts[%d][%d] - %s\n", i, j, hosts[i][j]);
@ -337,131 +348,129 @@ int main(int argc, char **argv) {
fprintf(stderr, "waah! hdfsGetHosts - FAILED!\n");
}
char *newOwner = "root";
// setting tmp dir to 777 so later when connectAsUser nobody, we can write to it
short newPerm = 0666;
// chown write
fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, NULL, "users")) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsChown: %s\n", ((result = hdfsChown(fs, writePath, newOwner, NULL)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// chmod write
fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, writePath, newPerm)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
sleep(2);
tTime newMtime = time(NULL);
tTime newAtime = time(NULL);
newMtime = time(NULL);
newAtime = time(NULL);
// utime write
fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsUtime: %s\n", ((result = hdfsUtime(fs, writePath, newMtime, newAtime)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// chown/chmod/utime read
hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
finfo = hdfsGetPathInfo(fs, writePath);
fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner) != 0)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsChown read: %s\n", ((result = (strcmp(finfo->mOwner, newOwner))) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsChmod read: %s\n", ((result = (finfo->mPermissions != newPerm)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// will later use /tmp/ as a different user so enable it
fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsChmod: %s\n", ((result = hdfsChmod(fs, "/tmp/", 0777)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr,"newMTime=%ld\n",newMtime);
fprintf(stderr,"curMTime=%ld\n",finfo->mLastMod);
fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsUtime read (mtime): %s\n", ((result = (finfo->mLastMod != newMtime)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
// No easy way to turn on access times from hdfs_test right now
// fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) ? "Failed!" : "Success!"));
// fprintf(stderr, "hdfsUtime read (atime): %s\n", ((result = (finfo->mLastAccess != newAtime)) != 0 ? "Failed!" : "Success!"));
// totalResult += result;
hdfsFreeFileInfo(finfo, 1);
// Clean up
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, newDirectory, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(fs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, srcPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) ? "Failed!" : "Success!"));
fprintf(stderr, "hdfsDelete: %s\n", ((result = hdfsDelete(lfs, dstPath, 1)) != 0 ? "Failed!" : "Success!"));
totalResult += result;
fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) ? "Success!" : "Failed!"));
fprintf(stderr, "hdfsExists: %s\n", ((result = hdfsExists(fs, newDirectory)) != 0 ? "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
}
{
// TEST APPENDS
const char *writePath = "/tmp/appends";
// CREATE
hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY, 0, 0, 0);
if(!appendFile) {
fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
char* buffer = "Hello,";
tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer));
buffer3 = "Hello,";
num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
(tSize)strlen(buffer3));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
if (hdfsFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", writePath);
if (hdfsFlush(fs, appendFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1);
}
fprintf(stderr, "Flushed %s successfully!\n", writePath);
fprintf(stderr, "Flushed %s successfully!\n", appendPath);
hdfsCloseFile(fs, writeFile);
hdfsCloseFile(fs, appendFile);
// RE-OPEN
writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_APPEND, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
appendFile = hdfsOpenFile(fs, appendPath, O_WRONLY|O_APPEND, 0, 0, 0);
if(!appendFile) {
fprintf(stderr, "Failed to open %s for writing!\n", appendPath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
fprintf(stderr, "Opened %s for writing successfully...\n", appendPath);
buffer = " World";
num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer) + 1);
buffer3 = " World";
num_written_bytes = hdfsWrite(fs, appendFile, (void*)buffer3,
(tSize)(strlen(buffer3) + 1));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
if (hdfsFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", writePath);
if (hdfsFlush(fs, appendFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", appendPath);
exit(-1);
}
fprintf(stderr, "Flushed %s successfully!\n", writePath);
fprintf(stderr, "Flushed %s successfully!\n", appendPath);
hdfsCloseFile(fs, writeFile);
hdfsCloseFile(fs, appendFile);
// CHECK size
hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == strlen("Hello, World") + 1)) ? "Success!" : "Failed!"));
finfo = hdfsGetPathInfo(fs, appendPath);
fprintf(stderr, "fileinfo->mSize: == total %s\n", ((result = (finfo->mSize == (tOffset)(strlen("Hello, World") + 1))) == 1 ? "Success!" : "Failed!"));
totalResult += (result ? 0 : 1);
// READ and check data
hdfsFile readFile = hdfsOpenFile(fs, writePath, O_RDONLY, 0, 0, 0);
readFile = hdfsOpenFile(fs, appendPath, O_RDONLY, 0, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for reading!\n", writePath);
fprintf(stderr, "Failed to open %s for reading!\n", appendPath);
exit(-1);
}
char rdbuffer[32];
tSize num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
num_read_bytes = hdfsRead(fs, readFile, (void*)rdbuffer, sizeof(rdbuffer));
fprintf(stderr, "Read following %d bytes:\n%s\n",
num_read_bytes, rdbuffer);
fprintf(stderr, "read == Hello, World %s\n", (result = (strcmp(rdbuffer, "Hello, World") == 0)) ? "Success!" : "Failed!");
fprintf(stderr, "read == Hello, World %s\n", ((result = (strcmp(rdbuffer, "Hello, World"))) == 0 ? "Success!" : "Failed!"));
hdfsCloseFile(fs, readFile);
@ -478,36 +487,33 @@ int main(int argc, char **argv) {
// the actual fs user capabilities. Thus just create a file and read
// the owner is correct.
const char *tuser = "nobody";
const char* writePath = "/tmp/usertestfile.txt";
fs = hdfsConnectAsUserNewInstance("default", 0, tuser);
if(!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs as user %s!\n",tuser);
exit(-1);
}
hdfsFile writeFile = hdfsOpenFile(fs, writePath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writePath);
userFile = hdfsOpenFile(fs, userPath, O_WRONLY|O_CREAT, 0, 0, 0);
if(!userFile) {
fprintf(stderr, "Failed to open %s for writing!\n", userPath);
exit(-1);
}
fprintf(stderr, "Opened %s for writing successfully...\n", writePath);
fprintf(stderr, "Opened %s for writing successfully...\n", userPath);
char* buffer = "Hello, World!";
tSize num_written_bytes = hdfsWrite(fs, writeFile, (void*)buffer, strlen(buffer)+1);
num_written_bytes = hdfsWrite(fs, userFile, (void*)fileContents,
(tSize)(strlen(fileContents)+1));
fprintf(stderr, "Wrote %d bytes\n", num_written_bytes);
if (hdfsFlush(fs, writeFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", writePath);
if (hdfsFlush(fs, userFile)) {
fprintf(stderr, "Failed to 'flush' %s\n", userPath);
exit(-1);
}
fprintf(stderr, "Flushed %s successfully!\n", writePath);
fprintf(stderr, "Flushed %s successfully!\n", userPath);
hdfsCloseFile(fs, writeFile);
hdfsCloseFile(fs, userFile);
hdfsFileInfo *finfo = hdfsGetPathInfo(fs, writePath);
fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser) != 0)) ? "Failed!" : "Success!"));
finfo = hdfsGetPathInfo(fs, userPath);
fprintf(stderr, "hdfs new file user is correct: %s\n", ((result = (strcmp(finfo->mOwner, tuser))) != 0 ? "Failed!" : "Success!"));
totalResult += result;
}

View File

@ -22,35 +22,38 @@
#include <stdlib.h>
int main(int argc, char **argv) {
hdfsFS fs;
const char *rfile = argv[1];
tSize bufferSize = strtoul(argv[3], NULL, 10);
hdfsFile readFile;
char* buffer;
tSize curSize;
if (argc != 4) {
fprintf(stderr, "Usage: hdfs_read <filename> <filesize> <buffersize>\n");
exit(-1);
}
hdfsFS fs = hdfsConnect("default", 0);
fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
const char* rfile = argv[1];
tSize bufferSize = strtoul(argv[3], NULL, 10);
hdfsFile readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
readFile = hdfsOpenFile(fs, rfile, O_RDONLY, bufferSize, 0, 0);
if (!readFile) {
fprintf(stderr, "Failed to open %s for writing!\n", rfile);
exit(-2);
}
// data to be written to the file
char* buffer = malloc(sizeof(char) * bufferSize);
buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
return -2;
}
// read from the file
tSize curSize = bufferSize;
curSize = bufferSize;
for (; curSize == bufferSize;) {
curSize = hdfsRead(fs, readFile, (void*)buffer, curSize);
}

View File

@ -21,23 +21,31 @@
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
int main(int argc, char **argv) {
hdfsFS fs;
const char *writeFileName = argv[1];
off_t fileTotalSize = strtoul(argv[2], NULL, 10);
long long tmpBufferSize = strtoul(argv[3], NULL, 10);
tSize bufferSize;
hdfsFile writeFile;
char* buffer;
int i;
off_t nrRemaining;
tSize curSize;
tSize written;
if (argc != 4) {
fprintf(stderr, "Usage: hdfs_write <filename> <filesize> <buffersize>\n");
exit(-1);
}
hdfsFS fs = hdfsConnect("default", 0);
fs = hdfsConnect("default", 0);
if (!fs) {
fprintf(stderr, "Oops! Failed to connect to hdfs!\n");
exit(-1);
}
const char* writeFileName = argv[1];
off_t fileTotalSize = strtoul(argv[2], NULL, 10);
long long tmpBufferSize = strtoul(argv[3], NULL, 10);
// sanity check
if(fileTotalSize == ULONG_MAX && errno == ERANGE) {
@ -51,30 +59,27 @@ int main(int argc, char **argv) {
exit(-3);
}
tSize bufferSize = tmpBufferSize;
bufferSize = (tSize)tmpBufferSize;
hdfsFile writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
writeFile = hdfsOpenFile(fs, writeFileName, O_WRONLY, bufferSize, 0, 0);
if (!writeFile) {
fprintf(stderr, "Failed to open %s for writing!\n", writeFileName);
exit(-2);
}
// data to be written to the file
char* buffer = malloc(sizeof(char) * bufferSize);
buffer = malloc(sizeof(char) * bufferSize);
if(buffer == NULL) {
fprintf(stderr, "Could not allocate buffer of size %d\n", bufferSize);
return -2;
}
int i = 0;
for (i=0; i < bufferSize; ++i) {
buffer[i] = 'a' + (i%26);
}
// write to the file
off_t nrRemaining;
for (nrRemaining = fileTotalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
tSize curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
tSize written;
curSize = ( bufferSize < nrRemaining ) ? bufferSize : (tSize)nrRemaining;
if ((written = hdfsWrite(fs, writeFile, (void*)buffer, curSize)) != curSize) {
fprintf(stderr, "ERROR: hdfsWrite returned an error on write: %d\n", written);
exit(-3);

View File

@ -19,12 +19,12 @@
#include "expect.h"
#include "hdfs.h"
#include "native_mini_dfs.h"
#include "platform.h"
#include <errno.h>
#include <inttypes.h>
#include <semaphore.h>
#include <pthread.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -53,7 +53,7 @@ static uint8_t *getZeroCopyBlockData(int blockIdx)
exit(1);
}
for (i = 0; i < TEST_ZEROCOPY_FULL_BLOCK_SIZE; i++) {
buf[i] = blockIdx + (i % 17);
buf[i] = (uint8_t)(blockIdx + (i % 17));
}
return buf;
}
@ -69,18 +69,6 @@ static int getZeroCopyBlockLen(int blockIdx)
}
}
static void printBuf(const uint8_t *buf, size_t len) __attribute__((unused));
static void printBuf(const uint8_t *buf, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
fprintf(stderr, "%02x", buf[i]);
}
fprintf(stderr, "\n");
}
static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
{
hdfsFile file = NULL;
@ -127,8 +115,9 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
EXPECT_NONNULL(block);
EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer), SMALL_READ_LEN));
hadoopRzBufferFree(file, buffer);
EXPECT_INT_EQ(TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
hdfsTell(fs, file));
EXPECT_INT64_EQ(
(int64_t)TEST_ZEROCOPY_FULL_BLOCK_SIZE + (int64_t)SMALL_READ_LEN,
hdfsTell(fs, file));
EXPECT_ZERO(expectFileStats(file,
TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
TEST_ZEROCOPY_FULL_BLOCK_SIZE + SMALL_READ_LEN,
@ -165,7 +154,7 @@ static int doTestZeroCopyReads(hdfsFS fs, const char *fileName)
free(block);
block = getZeroCopyBlockData(2);
EXPECT_NONNULL(block);
EXPECT_ZERO(memcmp(block, hadoopRzBufferGet(buffer) +
EXPECT_ZERO(memcmp(block, (uint8_t*)hadoopRzBufferGet(buffer) +
(TEST_ZEROCOPY_FULL_BLOCK_SIZE - SMALL_READ_LEN), SMALL_READ_LEN));
hadoopRzBufferFree(file, buffer);
@ -219,8 +208,10 @@ int main(void)
{
int port;
struct NativeMiniDfsConf conf = {
.doFormat = 1,
.configureShortCircuit = 1,
1, /* doFormat */
0, /* webhdfsEnabled */
0, /* namenodeHttpPort */
1, /* configureShortCircuit */
};
char testFileName[TEST_FILE_NAME_LENGTH];
hdfsFS fs;

View File

@ -19,11 +19,11 @@
#include "expect.h"
#include "hdfs.h"
#include "native_mini_dfs.h"
#include "os/thread.h"
#include <errno.h>
#include <inttypes.h>
#include <semaphore.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@ -35,8 +35,6 @@
#define TLH_DEFAULT_BLOCK_SIZE 134217728
static sem_t tlhSem;
static struct NativeMiniDfsCluster* tlhCluster;
struct tlhThreadInfo {
@ -44,18 +42,19 @@ struct tlhThreadInfo {
int threadIdx;
/** 0 = thread was successful; error code otherwise */
int success;
/** pthread identifier */
pthread_t thread;
/** thread identifier */
thread theThread;
};
static int hdfsSingleNameNodeConnect(struct NativeMiniDfsCluster *cl, hdfsFS *fs,
const char *username)
{
int ret, port;
int ret;
tPort port;
hdfsFS hdfs;
struct hdfsBuilder *bld;
port = nmdGetNameNodePort(cl);
port = (tPort)nmdGetNameNodePort(cl);
if (port < 0) {
fprintf(stderr, "hdfsSingleNameNodeConnect: nmdGetNameNodePort "
"returned error %d\n", port);
@ -164,7 +163,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
EXPECT_NONNULL(file);
/* TODO: implement writeFully and use it here */
expected = strlen(paths->prefix);
expected = (int)strlen(paths->prefix);
ret = hdfsWrite(fs, file, paths->prefix, expected);
if (ret < 0) {
ret = errno;
@ -186,9 +185,9 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
errno = 0;
EXPECT_ZERO(readStats->totalBytesRead);
EXPECT_ZERO(readStats->totalLocalBytesRead);
EXPECT_ZERO(readStats->totalShortCircuitBytesRead);
EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalBytesRead);
EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalLocalBytesRead);
EXPECT_UINT64_EQ(UINT64_C(0), readStats->totalShortCircuitBytesRead);
hdfsFileFreeReadStatistics(readStats);
/* TODO: implement readFully and use it here */
ret = hdfsRead(fs, file, tmp, sizeof(tmp));
@ -204,7 +203,7 @@ static int doTestHdfsOperations(struct tlhThreadInfo *ti, hdfsFS fs,
}
EXPECT_ZERO(hdfsFileGetReadStatistics(file, &readStats));
errno = 0;
EXPECT_INT_EQ(expected, readStats->totalBytesRead);
EXPECT_UINT64_EQ((uint64_t)expected, readStats->totalBytesRead);
hdfsFileFreeReadStatistics(readStats);
EXPECT_ZERO(memcmp(paths->prefix, tmp, expected));
EXPECT_ZERO(hdfsCloseFile(fs, file));
@ -262,12 +261,11 @@ static int testHdfsOperationsImpl(struct tlhThreadInfo *ti)
return 0;
}
static void *testHdfsOperations(void *v)
static void testHdfsOperations(void *v)
{
struct tlhThreadInfo *ti = (struct tlhThreadInfo*)v;
int ret = testHdfsOperationsImpl(ti);
ti->success = ret;
return NULL;
}
static int checkFailures(struct tlhThreadInfo *ti, int tlhNumThreads)
@ -304,7 +302,7 @@ int main(void)
const char *tlhNumThreadsStr;
struct tlhThreadInfo ti[TLH_MAX_THREADS];
struct NativeMiniDfsConf conf = {
.doFormat = 1,
1, /* doFormat */
};
tlhNumThreadsStr = getenv("TLH_NUM_THREADS");
@ -323,21 +321,20 @@ int main(void)
ti[i].threadIdx = i;
}
EXPECT_ZERO(sem_init(&tlhSem, 0, tlhNumThreads));
tlhCluster = nmdCreate(&conf);
EXPECT_NONNULL(tlhCluster);
EXPECT_ZERO(nmdWaitClusterUp(tlhCluster));
for (i = 0; i < tlhNumThreads; i++) {
EXPECT_ZERO(pthread_create(&ti[i].thread, NULL,
testHdfsOperations, &ti[i]));
ti[i].theThread.start = testHdfsOperations;
ti[i].theThread.arg = &ti[i];
EXPECT_ZERO(threadCreate(&ti[i].theThread));
}
for (i = 0; i < tlhNumThreads; i++) {
EXPECT_ZERO(pthread_join(ti[i].thread, NULL));
EXPECT_ZERO(threadJoin(&ti[i].theThread));
}
EXPECT_ZERO(nmdShutdown(tlhCluster));
nmdFree(tlhCluster);
EXPECT_ZERO(sem_destroy(&tlhSem));
return checkFailures(ti, tlhNumThreads);
}

View File

@ -22,7 +22,7 @@
#include <errno.h>
static struct NativeMiniDfsConf conf = {
.doFormat = 1,
1, /* doFormat */
};
/**

View File

@ -281,7 +281,7 @@
{#DeadNodes}
<tr class="danger">
<td>{name} ({xferaddr})</td>
<td>{lastContact}</td>
<td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
<td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
<td>-</td>
<td>-</td>

View File

@ -139,6 +139,14 @@
}
function load_datanode_info() {
var HELPERS = {
'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
var value = dust.helpers.tap(params.value, chunk, ctx);
return chunk.write('' + new Date(Date.now()-1000*Number(value)));
}
};
function workaround(r) {
function node_map_to_array(nodes) {
var res = [];
@ -160,7 +168,8 @@
'/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
guard_with_startup_progress(function (resp) {
var data = workaround(resp.beans[0]);
dust.render('datanode-info', data, function(err, out) {
var base = dust.makeBase(HELPERS);
dust.render('datanode-info', base.push(data), function(err, out) {
$('#tab-datanode').html(out);
$('#ui-tabs a[href="#tab-datanode"]').tab('show');
});

View File

@ -0,0 +1,408 @@
~~ Licensed under the Apache License, Version 2.0 (the "License");
~~ you may not use this file except in compliance with the License.
~~ You may obtain a copy of the License at
~~
~~ http://www.apache.org/licenses/LICENSE-2.0
~~
~~ Unless required by applicable law or agreed to in writing, software
~~ distributed under the License is distributed on an "AS IS" BASIS,
~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
~~ See the License for the specific language governing permissions and
~~ limitations under the License. See accompanying LICENSE file.
---
HDFS Commands Guide
---
---
${maven.build.timestamp}
HDFS Commands Guide
%{toc|section=1|fromDepth=2|toDepth=4}
* Overview
All HDFS commands are invoked by the <<<bin/hdfs>>> script. Running the
hdfs script without any arguments prints the description for all
commands.
Usage: <<<hdfs [--config confdir] [COMMAND] [GENERIC_OPTIONS]
[COMMAND_OPTIONS]>>>
Hadoop has an option parsing framework that employs parsing generic options
as well as running classes.
*-----------------------+---------------+
|| COMMAND_OPTION || Description
*-----------------------+---------------+
| <<<--config confdir>>>| Overwrites the default Configuration directory.
| | Default is <<<${HADOOP_HOME}/conf>>>.
*-----------------------+---------------+
| GENERIC_OPTIONS | The common set of options supported by multiple
| | commands. Full list is
| | {{{../hadoop-common/CommandsManual.html#Generic_Options}here}}.
*-----------------------+---------------+
| COMMAND_OPTIONS | Various commands with their options are described in
| | the following sections. The commands have been
| | grouped into {{{User Commands}}} and
| | {{{Administration Commands}}}.
*-----------------------+---------------+
* User Commands
Commands useful for users of a hadoop cluster.
** <<<dfs>>>
Usage: <<<hdfs dfs [GENERIC_OPTIONS] [COMMAND_OPTIONS]>>>
Run a filesystem command on the file system supported in Hadoop.
The various COMMAND_OPTIONS can be found at
{{{../hadoop-common/FileSystemShell.html}File System Shell Guide}}.
** <<<fetchdt>>>
Gets Delegation Token from a NameNode.
See {{{./HdfsUserGuide.html#fetchdt}fetchdt}} for more info.
Usage: <<<hdfs fetchdt [GENERIC_OPTIONS]
[--webservice <namenode_http_addr>] <path> >>>
*------------------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
*------------------------------+---------------------------------------------+
| <fileName> | File name to store the token into.
*------------------------------+---------------------------------------------+
| --webservice <https_address> | use http protocol instead of RPC
*------------------------------+---------------------------------------------+
** <<<fsck>>>
Runs a HDFS filesystem checking utility.
See {{{./HdfsUserGuide.html#fsck}fsck}} for more info.
Usage: <<<hdfs fsck [GENERIC_OPTIONS] <path>
[-move | -delete | -openforwrite]
[-files [-blocks [-locations | -racks]]]
[-showprogress]>>>
*------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
*------------------+---------------------------------------------+
| <path> | Start checking from this path.
*------------------+---------------------------------------------+
| -move | Move corrupted files to /lost+found
*------------------+---------------------------------------------+
| -delete | Delete corrupted files.
*------------------+---------------------------------------------+
| -openforwrite | Print out files opened for write.
*------------------+---------------------------------------------+
| -files | Print out files being checked.
*------------------+---------------------------------------------+
| -blocks | Print out block report.
*------------------+---------------------------------------------+
| -locations | Print out locations for every block.
*------------------+---------------------------------------------+
| -racks | Print out network topology for data-node locations.
*------------------+---------------------------------------------+
| -showprogress | Print out dots for progress in output. Default is OFF
| | (no progress).
*------------------+---------------------------------------------+
* Administration Commands
Commands useful for administrators of a hadoop cluster.
** <<<balancer>>>
Runs a cluster balancing utility. An administrator can simply press Ctrl-C
to stop the rebalancing process. See
{{{./HdfsUserGuide.html#Balancer}Balancer}} for more details.
Usage: <<<hdfs balancer [-threshold <threshold>] [-policy <policy>]>>>
*------------------------+----------------------------------------------------+
|| COMMAND_OPTION | Description
*------------------------+----------------------------------------------------+
| -threshold <threshold> | Percentage of disk capacity. This overwrites the
| | default threshold.
*------------------------+----------------------------------------------------+
| -policy <policy> | <<<datanode>>> (default): Cluster is balanced if
| | each datanode is balanced. \
| | <<<blockpool>>>: Cluster is balanced if each block
| | pool in each datanode is balanced.
*------------------------+----------------------------------------------------+
Note that the <<<blockpool>>> policy is more strict than the <<<datanode>>>
policy.
** <<<datanode>>>
Runs a HDFS datanode.
Usage: <<<hdfs datanode [-regular | -rollback | -rollingupgrace rollback]>>>
*-----------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-----------------+-----------------------------------------------------------+
| -regular | Normal datanode startup (default).
*-----------------+-----------------------------------------------------------+
| -rollback | Rollsback the datanode to the previous version. This should
| | be used after stopping the datanode and distributing the
| | old hadoop version.
*-----------------+-----------------------------------------------------------+
| -rollingupgrade rollback | Rollsback a rolling upgrade operation.
*-----------------+-----------------------------------------------------------+
** <<<dfsadmin>>>
Runs a HDFS dfsadmin client.
Usage: <<<hdfs dfsadmin [GENERIC_OPTIONS]
[-report [-live] [-dead] [-decommissioning]]
[-safemode enter | leave | get | wait]
[-saveNamespace]
[-rollEdits]
[-restoreFailedStorage true|false|check]
[-refreshNodes]
[-setQuota <quota> <dirname>...<dirname>]
[-clrQuota <dirname>...<dirname>]
[-setSpaceQuota <quota> <dirname>...<dirname>]
[-clrSpaceQuota <dirname>...<dirname>]
[-finalizeUpgrade]
[-rollingUpgrade [<query>|<prepare>|<finalize>]]
[-metasave filename]
[-refreshServiceAcl]
[-refreshUserToGroupsMappings]
[-refreshSuperUserGroupsConfiguration]
[-refreshCallQueue]
[-refresh <host:ipc_port> <key> [arg1..argn]]
[-printTopology]
[-refreshNamenodes datanodehost:port]
[-deleteBlockPool datanode-host:port blockpoolId [force]]
[-setBalancerBandwidth <bandwidth in bytes per second>]
[-allowSnapshot <snapshotDir>]
[-disallowSnapshot <snapshotDir>]
[-fetchImage <local directory>]
[-shutdownDatanode <datanode_host:ipc_port> [upgrade]]
[-getDatanodeInfo <datanode_host:ipc_port>]
[-help [cmd]]>>>
*-----------------+-----------------------------------------------------------+
|| COMMAND_OPTION || Description
*-----------------+-----------------------------------------------------------+
| -report [-live] [-dead] [-decommissioning] | Reports basic filesystem
| information and statistics. Optional flags may be used to
| filter the list of displayed DataNodes.
*-----------------+-----------------------------------------------------------+
| -safemode enter\|leave\|get\|wait | Safe mode maintenance command. Safe
| mode is a Namenode state in which it \
| 1. does not accept changes to the name space (read-only) \
| 2. does not replicate or delete blocks. \
| Safe mode is entered automatically at Namenode startup, and
| leaves safe mode automatically when the configured minimum
| percentage of blocks satisfies the minimum replication
| condition. Safe mode can also be entered manually, but then
| it can only be turned off manually as well.
*-----------------+-----------------------------------------------------------+
| -saveNamespace | Save current namespace into storage directories and reset
| edits log. Requires safe mode.
*-----------------+-----------------------------------------------------------+
| -rollEdits | Rolls the edit log on the active NameNode.
*-----------------+-----------------------------------------------------------+
| -restoreFailedStorage true\|false\|check | This option will turn on/off
| automatic attempt to restore failed storage replicas.
| If a failed storage becomes available again the system will
| attempt to restore edits and/or fsimage during checkpoint.
| 'check' option will return current setting.
*-----------------+-----------------------------------------------------------+
| -refreshNodes | Re-read the hosts and exclude files to update the set of
| Datanodes that are allowed to connect to the Namenode and
| those that should be decommissioned or recommissioned.
*-----------------+-----------------------------------------------------------+
| -setQuota \<quota\> \<dirname\>...\<dirname\> | See
| {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -clrQuota \<dirname\>...\<dirname\> | See
| {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -setSpaceQuota \<quota\> \<dirname\>...\<dirname\> | See
| {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -clrSpaceQuota \<dirname\>...\<dirname\> | See
| {{{../hadoop-hdfs/HdfsQuotaAdminGuide.html#Administrative_Commands}HDFS Quotas Guide}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -finalizeUpgrade| Finalize upgrade of HDFS. Datanodes delete their previous
| version working directories, followed by Namenode doing the
| same. This completes the upgrade process.
*-----------------+-----------------------------------------------------------+
| -rollingUpgrade [\<query\>\|\<prepare\>\|\<finalize\>] | See
| {{{../hadoop-hdfs/HdfsRollingUpgrade.html#dfsadmin_-rollingUpgrade}Rolling Upgrade document}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -metasave filename | Save Namenode's primary data structures to <filename> in
| the directory specified by hadoop.log.dir property.
| <filename> is overwritten if it exists.
| <filename> will contain one line for each of the following\
| 1. Datanodes heart beating with Namenode\
| 2. Blocks waiting to be replicated\
| 3. Blocks currrently being replicated\
| 4. Blocks waiting to be deleted
*-----------------+-----------------------------------------------------------+
| -refreshServiceAcl | Reload the service-level authorization policy file.
*-----------------+-----------------------------------------------------------+
| -refreshUserToGroupsMappings | Refresh user-to-groups mappings.
*-----------------+-----------------------------------------------------------+
| -refreshSuperUserGroupsConfiguration |Refresh superuser proxy groups mappings
*-----------------+-----------------------------------------------------------+
| -refreshCallQueue | Reload the call queue from config.
*-----------------+-----------------------------------------------------------+
| -refresh \<host:ipc_port\> \<key\> [arg1..argn] | Triggers a runtime-refresh
| of the resource specified by \<key\> on \<host:ipc_port\>.
| All other args after are sent to the host.
*-----------------+-----------------------------------------------------------+
| -printTopology | Print a tree of the racks and their nodes as reported by
| the Namenode
*-----------------+-----------------------------------------------------------+
| -refreshNamenodes datanodehost:port | For the given datanode, reloads the
| configuration files, stops serving the removed block-pools
| and starts serving new block-pools.
*-----------------+-----------------------------------------------------------+
| -deleteBlockPool datanode-host:port blockpoolId [force] | If force is passed,
| block pool directory for the given blockpool id on the
| given datanode is deleted along with its contents,
| otherwise the directory is deleted only if it is empty.
| The command will fail if datanode is still serving the
| block pool. Refer to refreshNamenodes to shutdown a block
| pool service on a datanode.
*-----------------+-----------------------------------------------------------+
| -setBalancerBandwidth \<bandwidth in bytes per second\> | Changes the network
| bandwidth used by each datanode during HDFS block
| balancing. \<bandwidth\> is the maximum number of bytes per
| second that will be used by each datanode. This value
| overrides the dfs.balance.bandwidthPerSec parameter.\
| NOTE: The new value is not persistent on the DataNode.
*-----------------+-----------------------------------------------------------+
| -allowSnapshot \<snapshotDir\> | Allowing snapshots of a directory to be
| created. If the operation completes successfully, the
| directory becomes snapshottable.
*-----------------+-----------------------------------------------------------+
| -disallowSnapshot \<snapshotDir\> | Disallowing snapshots of a directory to
| be created. All snapshots of the directory must be deleted
| before disallowing snapshots.
*-----------------+-----------------------------------------------------------+
| -fetchImage \<local directory\> | Downloads the most recent fsimage from the
| NameNode and saves it in the specified local directory.
*-----------------+-----------------------------------------------------------+
| -shutdownDatanode \<datanode_host:ipc_port\> [upgrade] | Submit a shutdown
| request for the given datanode. See
| {{{./HdfsRollingUpgrade.html#dfsadmin_-shutdownDatanode}Rolling Upgrade document}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -getDatanodeInfo \<datanode_host:ipc_port\> | Get the information about the
| given datanode. See
| {{{./HdfsRollingUpgrade.html#dfsadmin_-getDatanodeInfo}Rolling Upgrade document}}
| for the detail.
*-----------------+-----------------------------------------------------------+
| -help [cmd] | Displays help for the given command or all commands if none
| is specified.
*-----------------+-----------------------------------------------------------+
** <<<namenode>>>
Runs the namenode. More info about the upgrade, rollback and finalize is at
{{{./HdfsUserGuide.html#Upgrade_and_Rollback}Upgrade Rollback}}.
Usage: <<<hdfs namenode [-backup] |
[-checkpoint] |
[-format [-clusterid cid ] [-force] [-nonInteractive] ] |
[-upgrade [-clusterid cid] [-renameReserved<k-v pairs>] ] |
[-upgradeOnly [-clusterid cid] [-renameReserved<k-v pairs>] ] |
[-rollback] |
[-rollingUpgrade <downgrade|rollback> ] |
[-finalize] |
[-importCheckpoint] |
[-initializeSharedEdits] |
[-bootstrapStandby] |
[-recover [-force] ] |
[-metadataVersion ]>>>
*--------------------+--------------------------------------------------------+
|| COMMAND_OPTION || Description
*--------------------+--------------------------------------------------------+
| -backup | Start backup node.
*--------------------+--------------------------------------------------------+
| -checkpoint | Start checkpoint node.
*--------------------+--------------------------------------------------------+
| -format [-clusterid cid] [-force] [-nonInteractive] | Formats the specified
| NameNode. It starts the NameNode, formats it and then
| shut it down. -force option formats if the name
| directory exists. -nonInteractive option aborts if the
| name directory exists, unless -force option is specified.
*--------------------+--------------------------------------------------------+
| -upgrade [-clusterid cid] [-renameReserved\<k-v pairs\>] | Namenode should be
| started with upgrade option after
| the distribution of new Hadoop version.
*--------------------+--------------------------------------------------------+
| -upgradeOnly [-clusterid cid] [-renameReserved\<k-v pairs\>] | Upgrade the
| specified NameNode and then shutdown it.
*--------------------+--------------------------------------------------------+
| -rollback | Rollsback the NameNode to the previous version. This
| should be used after stopping the cluster and
| distributing the old Hadoop version.
*--------------------+--------------------------------------------------------+
| -rollingUpgrade \<downgrade\|rollback\|started\> | See
| {{{./HdfsRollingUpgrade.html#NameNode_Startup_Options}Rolling Upgrade document}}
| for the detail.
*--------------------+--------------------------------------------------------+
| -finalize | Finalize will remove the previous state of the files
| system. Recent upgrade will become permanent. Rollback
| option will not be available anymore. After finalization
| it shuts the NameNode down.
*--------------------+--------------------------------------------------------+
| -importCheckpoint | Loads image from a checkpoint directory and save it
| into the current one. Checkpoint dir is read from
| property fs.checkpoint.dir
*--------------------+--------------------------------------------------------+
| -initializeSharedEdits | Format a new shared edits dir and copy in enough
| edit log segments so that the standby NameNode can start
| up.
*--------------------+--------------------------------------------------------+
| -bootstrapStandby | Allows the standby NameNode's storage directories to be
| bootstrapped by copying the latest namespace snapshot
| from the active NameNode. This is used when first
| configuring an HA cluster.
*--------------------+--------------------------------------------------------+
| -recover [-force] | Recover lost metadata on a corrupt filesystem. See
| {{{./HdfsUserGuide.html#Recovery_Mode}HDFS User Guide}}
| for the detail.
*--------------------+--------------------------------------------------------+
| -metadataVersion | Verify that configured directories exist, then print the
| metadata versions of the software and the image.
*--------------------+--------------------------------------------------------+
** <<<secondarynamenode>>>
Runs the HDFS secondary namenode.
See {{{./HdfsUserGuide.html#Secondary_NameNode}Secondary Namenode}}
for more info.
Usage: <<<hdfs secondarynamenode [-checkpoint [force]] | [-format] |
[-geteditsize]>>>
*----------------------+------------------------------------------------------+
|| COMMAND_OPTION || Description
*----------------------+------------------------------------------------------+
| -checkpoint [force] | Checkpoints the SecondaryNameNode if EditLog size
| >= fs.checkpoint.size. If <<<force>>> is used,
| checkpoint irrespective of EditLog size.
*----------------------+------------------------------------------------------+
| -format | Format the local storage during startup.
*----------------------+------------------------------------------------------+
| -geteditsize | Prints the number of uncheckpointed transactions on
| the NameNode.
*----------------------+------------------------------------------------------+

View File

@ -143,8 +143,8 @@ HDFS Users Guide
** DFSAdmin Command
The <<<bin/hadoop dfsadmin>>> command supports a few HDFS administration
related operations. The <<<bin/hadoop dfsadmin -help>>> command lists all the
The <<<bin/hdfs dfsadmin>>> command supports a few HDFS administration
related operations. The <<<bin/hdfs dfsadmin -help>>> command lists all the
commands currently supported. For e.g.:
* <<<-report>>>: reports basic statistics of HDFS. Some of this
@ -172,7 +172,7 @@ HDFS Users Guide
of racks and datanodes attached to the tracks as viewed by the
NameNode.
For command usage, see {{{../hadoop-common/CommandsManual.html#dfsadmin}dfsadmin}}.
For command usage, see {{{./HDFSCommands.html#dfsadmin}dfsadmin}}.
* Secondary NameNode
@ -207,7 +207,7 @@ HDFS Users Guide
primary NameNode if necessary.
For command usage,
see {{{../hadoop-common/CommandsManual.html#secondarynamenode}secondarynamenode}}.
see {{{./HDFSCommands.html#secondarynamenode}secondarynamenode}}.
* Checkpoint Node
@ -249,7 +249,7 @@ HDFS Users Guide
Multiple checkpoint nodes may be specified in the cluster configuration
file.
For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
* Backup Node
@ -291,7 +291,7 @@ HDFS Users Guide
For a complete discussion of the motivation behind the creation of the
Backup node and Checkpoint node, see {{{https://issues.apache.org/jira/browse/HADOOP-4539}HADOOP-4539}}.
For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
* Import Checkpoint
@ -314,7 +314,7 @@ HDFS Users Guide
verifies that the image in <<<dfs.namenode.checkpoint.dir>>> is consistent,
but does not modify it in any way.
For command usage, see {{{../hadoop-common/CommandsManual.html#namenode}namenode}}.
For command usage, see {{{./HDFSCommands.html#namenode}namenode}}.
* Balancer
@ -341,7 +341,7 @@ HDFS Users Guide
A brief administrator's guide for balancer is available at
{{{https://issues.apache.org/jira/browse/HADOOP-1652}HADOOP-1652}}.
For command usage, see {{{../hadoop-common/CommandsManual.html#balancer}balancer}}.
For command usage, see {{{./HDFSCommands.html#balancer}balancer}}.
* Rack Awareness
@ -368,7 +368,7 @@ HDFS Users Guide
allow any modifications to file system or blocks. Normally the NameNode
leaves Safemode automatically after the DataNodes have reported that
most file system blocks are available. If required, HDFS could be
placed in Safemode explicitly using <<<bin/hadoop dfsadmin -safemode>>>
placed in Safemode explicitly using <<<bin/hdfs dfsadmin -safemode>>>
command. NameNode front page shows whether Safemode is on or off. A
more detailed description and configuration is maintained as JavaDoc
for <<<setSafeMode()>>>.
@ -383,8 +383,8 @@ HDFS Users Guide
most of the recoverable failures. By default fsck ignores open files
but provides an option to select all files during reporting. The HDFS
fsck command is not a Hadoop shell command. It can be run as
<<<bin/hadoop fsck>>>. For command usage, see
{{{../hadoop-common/CommandsManual.html#fsck}fsck}}. fsck can be run on
<<<bin/hdfs fsck>>>. For command usage, see
{{{./HDFSCommands.html#fsck}fsck}}. fsck can be run on
the whole file system or on a subset of files.
* fetchdt
@ -395,11 +395,11 @@ HDFS Users Guide
Utility uses either RPC or HTTPS (over Kerberos) to get the token, and
thus requires kerberos tickets to be present before the run (run kinit
to get the tickets). The HDFS fetchdt command is not a Hadoop shell
command. It can be run as <<<bin/hadoop fetchdt DTfile>>>. After you got
command. It can be run as <<<bin/hdfs fetchdt DTfile>>>. After you got
the token you can run an HDFS command without having Kerberos tickets,
by pointing <<<HADOOP_TOKEN_FILE_LOCATION>>> environmental variable to the
delegation token file. For command usage, see
{{{../hadoop-common/CommandsManual.html#fetchdt}fetchdt}} command.
{{{./HDFSCommands.html#fetchdt}fetchdt}} command.
* Recovery Mode
@ -533,5 +533,4 @@ HDFS Users Guide
* Explore {{{./hdfs-default.xml}hdfs-default.xml}}. It includes
brief description of most of the configuration variables available.
* {{{../hadoop-common/CommandsManual.html}Hadoop Commands Guide}}:
Hadoop commands usage.
* {{{./HDFSCommands.html}HDFS Commands Guide}}: HDFS commands usage.

View File

@ -89,7 +89,7 @@ public class TestBalancer {
private static final Random r = new Random();
static {
Balancer.setBlockMoveWaitTime(1000L) ;
Dispatcher.setBlockMoveWaitTime(1000L) ;
}
static void initConf(Configuration conf) {
@ -305,12 +305,12 @@ static void waitForBalancer(long totalUsedSpace, long totalCapacity,
for (DatanodeInfo datanode : datanodeReport) {
double nodeUtilization = ((double)datanode.getDfsUsed())
/ datanode.getCapacity();
if (Balancer.Util.shouldBeExcluded(p.nodesToBeExcluded, datanode)) {
if (Dispatcher.Util.isExcluded(p.nodesToBeExcluded, datanode)) {
assertTrue(nodeUtilization == 0);
actualExcludedNodeCount++;
continue;
}
if (!Balancer.Util.shouldBeIncluded(p.nodesToBeIncluded, datanode)) {
if (!Dispatcher.Util.isIncluded(p.nodesToBeIncluded, datanode)) {
assertTrue(nodeUtilization == 0);
actualExcludedNodeCount++;
continue;

View File

@ -44,7 +44,7 @@ public class TestBalancerWithHANameNodes {
ClientProtocol client;
static {
Balancer.setBlockMoveWaitTime(1000L);
Dispatcher.setBlockMoveWaitTime(1000L);
}
/**

View File

@ -73,7 +73,7 @@ public class TestBalancerWithMultipleNameNodes {
private static final Random RANDOM = new Random();
static {
Balancer.setBlockMoveWaitTime(1000L) ;
Dispatcher.setBlockMoveWaitTime(1000L) ;
}
/** Common objects used in various methods. */

Some files were not shown because too many files have changed in this diff Show More