Merge from trunk to branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1612403 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2014-07-21 21:44:50 +00:00
commit cb20bb6117
178 changed files with 7894 additions and 1246 deletions

View File

@ -139,6 +139,17 @@
<attach>true</attach>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>test-jar</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>

View File

@ -36,10 +36,6 @@ Trunk (Unreleased)
HADOOP-7595. Upgrade dependency to Avro 1.5.3. (Alejandro Abdelnur via atm)
HADOOP-7664. Remove warmings when overriding final parameter configuration
if the override value is same as the final parameter value.
(Ravi Prakash via suresh)
HADOOP-8078. Add capability to turn on security in unit tests. (Jaimin
Jetly via jitendra)
@ -162,9 +158,6 @@ Trunk (Unreleased)
HADOOP-10485. Remove dead classes in hadoop-streaming. (wheat9)
HADOOP-10607. Create API to separate credential/password storage from
applications. (Larry McCay via omalley)
HADOOP-10696. Add optional attributes to KeyProvider Options and Metadata.
(tucu)
@ -182,6 +175,20 @@ Trunk (Unreleased)
HADOOP-10736. Add key attributes to the key shell. (Mike Yoder via wang)
HADOOP-10824. Refactor KMSACLs to avoid locking. (Benoy Antony via umamahesh)
HADOOP-10841. EncryptedKeyVersion should have a key name property.
(asuresh via tucu)
HADOOP-10842. CryptoExtension generateEncryptedKey method should
receive the key name. (asuresh via tucu)
HADOOP-10750. KMSKeyProviderCache should be in hadoop-common.
(asuresh via tucu)
HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
in the REST API. (asuresh via tucu)
BUG FIXES
HADOOP-9451. Fault single-layer config if node group topology is enabled.
@ -383,6 +390,12 @@ Trunk (Unreleased)
HADOOP-10834. Typo in CredentialShell usage. (Benoy Antony via umamahesh)
HADOOP-10816. KeyShell returns -1 on error to the shell, should be 1.
(Mike Yoder via wang)
HADOOP-10840. Fix OutOfMemoryError caused by metrics system in Azure File
System. (Shanyu Zhao via cnauroth)
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
@ -401,6 +414,33 @@ Release 2.6.0 - UNRELEASED
HADOOP-10815. Implement Windows equivalent of mlock. (cnauroth)
HADOOP-7664. Remove warmings when overriding final parameter configuration
if the override value is same as the final parameter value.
(Ravi Prakash via suresh)
HADOOP-10673. Update rpc metrics when the call throws an exception. (Ming Ma
via jing9)
HADOOP-10845. Add common tests for ACLs in combination with viewfs.
(Stephen Chu via cnauroth)
HADOOP-10839. Add unregisterSource() to MetricsSystem API.
(Shanyu Zhao via cnauroth)
HADOOP-10607. Create an API to separate credentials/password storage
from applications (Larry McCay via omalley)
HADOOP-10732. Fix locking in credential update. (Ted Yu via omalley)
HADOOP-10733. Fix potential null dereference in CredShell. (Ted Yu via
omalley)
HADOOP-10610. Upgrade S3n s3.fs.buffer.dir to support multi directories.
(Ted Malaska via atm)
HADOOP-10817. ProxyUsers configuration should support configurable
prefixes. (tucu)
OPTIMIZATIONS
BUG FIXES
@ -416,6 +456,15 @@ Release 2.6.0 - UNRELEASED
HADOOP-10810. Clean up native code compilation warnings. (cnauroth)
HADOOP-9921. daemon scripts should remove pid file on stop call after stop
or process is found not running ( vinayakumarb )
HADOOP-10591. Compression codecs must used pooled direct buffers or
deallocate direct buffers when stream is closed (cmccabe)
HADOOP-10857. Native Libraries Guide doen't mention a dependency on
openssl-development package (ozawa via cmccabe)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -198,6 +198,7 @@ case $startStop in
else
echo no $command to stop
fi
rm -f $pid
else
echo no $command to stop
fi

View File

@ -0,0 +1,174 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
/**
* A <code>KeyProviderExtension</code> implementation providing a short lived
* cache for <code>KeyVersions</code> and <code>Metadata</code>to avoid burst
* of requests to hit the underlying <code>KeyProvider</code>.
*/
public class CachingKeyProvider extends
KeyProviderExtension<CachingKeyProvider.CacheExtension> {
static class CacheExtension implements KeyProviderExtension.Extension {
private final KeyProvider provider;
private LoadingCache<String, KeyVersion> keyVersionCache;
private LoadingCache<String, KeyVersion> currentKeyCache;
private LoadingCache<String, Metadata> keyMetadataCache;
CacheExtension(KeyProvider prov, long keyTimeoutMillis,
long currKeyTimeoutMillis) {
this.provider = prov;
keyVersionCache =
CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, KeyVersion>() {
@Override
public KeyVersion load(String key) throws Exception {
KeyVersion kv = provider.getKeyVersion(key);
if (kv == null) {
throw new KeyNotFoundException();
}
return kv;
}
});
keyMetadataCache =
CacheBuilder.newBuilder().expireAfterAccess(keyTimeoutMillis,
TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, Metadata>() {
@Override
public Metadata load(String key) throws Exception {
Metadata meta = provider.getMetadata(key);
if (meta == null) {
throw new KeyNotFoundException();
}
return meta;
}
});
currentKeyCache =
CacheBuilder.newBuilder().expireAfterWrite(currKeyTimeoutMillis,
TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, KeyVersion>() {
@Override
public KeyVersion load(String key) throws Exception {
KeyVersion kv = provider.getCurrentKey(key);
if (kv == null) {
throw new KeyNotFoundException();
}
return kv;
}
});
}
}
@SuppressWarnings("serial")
private static class KeyNotFoundException extends Exception { }
public CachingKeyProvider(KeyProvider keyProvider, long keyTimeoutMillis,
long currKeyTimeoutMillis) {
super(keyProvider, new CacheExtension(keyProvider, keyTimeoutMillis,
currKeyTimeoutMillis));
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
try {
return getExtension().currentKeyCache.get(name);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
@Override
public KeyVersion getKeyVersion(String versionName)
throws IOException {
try {
return getExtension().keyVersionCache.get(versionName);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
@Override
public void deleteKey(String name) throws IOException {
getKeyProvider().deleteKey(name);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
// invalidating all key versions as we don't know
// which ones belonged to the deleted key
getExtension().keyVersionCache.invalidateAll();
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
KeyVersion key = getKeyProvider().rollNewVersion(name, material);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
return key;
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
KeyVersion key = getKeyProvider().rollNewVersion(name);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
return key;
}
@Override
public Metadata getMetadata(String name) throws IOException {
try {
return getExtension().keyMetadataCache.get(name);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
}

View File

@ -27,34 +27,42 @@ import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A KeyProvider with Cytographic Extensions specifically for generating
* Encrypted Keys as well as decrypting them
*
*/
@InterfaceAudience.Private
public class KeyProviderCryptoExtension extends
KeyProviderExtension<KeyProviderCryptoExtension.CryptoExtension> {
protected static final String EEK = "EEK";
protected static final String EK = "EK";
public static final String EEK = "EEK";
public static final String EK = "EK";
/**
* This is a holder class whose instance contains the keyVersionName, iv
* used to generate the encrypted Key and the encrypted KeyVersion
*/
public static class EncryptedKeyVersion {
private String keyName;
private String keyVersionName;
private byte[] iv;
private KeyVersion encryptedKey;
protected EncryptedKeyVersion(String keyVersionName, byte[] iv,
KeyVersion encryptedKey) {
protected EncryptedKeyVersion(String keyName, String keyVersionName,
byte[] iv, KeyVersion encryptedKey) {
this.keyName = keyName;
this.keyVersionName = keyVersionName;
this.iv = iv;
this.encryptedKey = encryptedKey;
}
public String getKeyName() {
return keyName;
}
public String getKeyVersionName() {
return keyVersionName;
}
@ -75,17 +83,24 @@ public class KeyProviderCryptoExtension extends
*/
public interface CryptoExtension extends KeyProviderExtension.Extension {
/**
* Calls to this method allows the underlying KeyProvider to warm-up any
* implementation specific caches used to store the Encrypted Keys.
* @param keyNames Array of Key Names
*/
public void warmUpEncryptedKeys(String... keyNames)
throws IOException;
/**
* Generates a key material and encrypts it using the given key version name
* and initialization vector. The generated key material is of the same
* length as the <code>KeyVersion</code> material and is encrypted using the
* same cipher.
* length as the <code>KeyVersion</code> material of the latest key version
* of the key and is encrypted using the same cipher.
* <p/>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param encryptionKeyVersion
* a KeyVersion object containing the keyVersion name and material
* to encrypt.
* @param encryptionKeyName
* The latest KeyVersion of this key's material will be encrypted.
* @return EncryptedKeyVersion with the generated key material, the version
* name is 'EEK' (for Encrypted Encryption Key)
* @throws IOException
@ -95,7 +110,7 @@ public class KeyProviderCryptoExtension extends
* cryptographic issue.
*/
public EncryptedKeyVersion generateEncryptedKey(
KeyVersion encryptionKeyVersion) throws IOException,
String encryptionKeyName) throws IOException,
GeneralSecurityException;
/**
@ -140,12 +155,11 @@ public class KeyProviderCryptoExtension extends
}
@Override
public EncryptedKeyVersion generateEncryptedKey(KeyVersion keyVersion)
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException, GeneralSecurityException {
KeyVersion keyVer =
keyProvider.getKeyVersion(keyVersion.getVersionName());
Preconditions.checkNotNull(keyVer, "KeyVersion name '%s' does not exist",
keyVersion.getVersionName());
KeyVersion keyVer = keyProvider.getCurrentKey(encryptionKeyName);
Preconditions.checkNotNull(keyVer, "No KeyVersion exists for key '%s' ",
encryptionKeyName);
byte[] newKey = new byte[keyVer.getMaterial().length];
SecureRandom.getInstance("SHA1PRNG").nextBytes(newKey);
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
@ -153,7 +167,8 @@ public class KeyProviderCryptoExtension extends
cipher.init(Cipher.ENCRYPT_MODE, new SecretKeySpec(keyVer.getMaterial(),
"AES"), new IvParameterSpec(flipIV(iv)));
byte[] ek = cipher.doFinal(newKey);
return new EncryptedKeyVersion(keyVersion.getVersionName(), iv,
return new EncryptedKeyVersion(encryptionKeyName,
keyVer.getVersionName(), iv,
new KeyVersion(keyVer.getName(), EEK, ek));
}
@ -175,13 +190,35 @@ public class KeyProviderCryptoExtension extends
return new KeyVersion(keyVer.getName(), EK, ek);
}
@Override
public void warmUpEncryptedKeys(String... keyNames)
throws IOException {
// NO-OP since the default version does not cache any keys
}
}
private KeyProviderCryptoExtension(KeyProvider keyProvider,
/**
* This constructor is to be used by sub classes that provide
* delegating/proxying functionality to the {@link KeyProviderCryptoExtension}
* @param keyProvider
* @param extension
*/
protected KeyProviderCryptoExtension(KeyProvider keyProvider,
CryptoExtension extension) {
super(keyProvider, extension);
}
/**
* Notifies the Underlying CryptoExtension implementation to warm up any
* implementation specific caches for the specified KeyVersions
* @param keyNames Arrays of key Names
*/
public void warmUpEncryptedKeys(String... keyNames)
throws IOException {
getExtension().warmUpEncryptedKeys(keyNames);
}
/**
* Generates a key material and encrypts it using the given key version name
* and initialization vector. The generated key material is of the same
@ -190,18 +227,18 @@ public class KeyProviderCryptoExtension extends
* <p/>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param encryptionKey a KeyVersion object containing the keyVersion name and
* material to encrypt.
* @param encryptionKeyName The latest KeyVersion of this key's material will
* be encrypted.
* @return EncryptedKeyVersion with the generated key material, the version
* name is 'EEK' (for Encrypted Encryption Key)
* @throws IOException thrown if the key material could not be generated
* @throws GeneralSecurityException thrown if the key material could not be
* encrypted because of a cryptographic issue.
*/
public EncryptedKeyVersion generateEncryptedKey(KeyVersion encryptionKey)
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException,
GeneralSecurityException {
return getExtension().generateEncryptedKey(encryptionKey);
return getExtension().generateEncryptedKey(encryptionKeyName);
}
/**

View File

@ -57,6 +57,16 @@ public class KeyShell extends Configured implements Tool {
private boolean userSuppliedProvider = false;
/**
* Primary entry point for the KeyShell; called via main().
*
* @param args Command line arguments.
* @return 0 on success and 1 on failure. This value is passed back to
* the unix shell, so we must follow shell return code conventions:
* the return code is an unsigned character, and 0 means success, and
* small positive integers mean failure.
* @throws Exception
*/
@Override
public int run(String[] args) throws Exception {
int exitCode = 0;
@ -68,11 +78,11 @@ public class KeyShell extends Configured implements Tool {
if (command.validate()) {
command.execute();
} else {
exitCode = -1;
exitCode = 1;
}
} catch (Exception e) {
e.printStackTrace(err);
return -1;
return 1;
}
return exitCode;
}
@ -86,8 +96,8 @@ public class KeyShell extends Configured implements Tool {
* % hadoop key list [-provider providerPath]
* % hadoop key delete keyName [--provider providerPath] [-i]
* </pre>
* @param args
* @return
* @param args Command line arguments.
* @return 0 on success, 1 on failure.
* @throws IOException
*/
private int init(String[] args) throws IOException {
@ -105,7 +115,7 @@ public class KeyShell extends Configured implements Tool {
command = new CreateCommand(keyName, options);
if ("--help".equals(keyName)) {
printKeyShellUsage();
return -1;
return 1;
}
} else if (args[i].equals("delete")) {
String keyName = "--help";
@ -116,7 +126,7 @@ public class KeyShell extends Configured implements Tool {
command = new DeleteCommand(keyName);
if ("--help".equals(keyName)) {
printKeyShellUsage();
return -1;
return 1;
}
} else if (args[i].equals("roll")) {
String keyName = "--help";
@ -127,7 +137,7 @@ public class KeyShell extends Configured implements Tool {
command = new RollCommand(keyName);
if ("--help".equals(keyName)) {
printKeyShellUsage();
return -1;
return 1;
}
} else if ("list".equals(args[i])) {
command = new ListCommand();
@ -145,13 +155,13 @@ public class KeyShell extends Configured implements Tool {
out.println("\nAttributes must be in attribute=value form, " +
"or quoted\nlike \"attribute = value\"\n");
printKeyShellUsage();
return -1;
return 1;
}
if (attributes.containsKey(attr)) {
out.println("\nEach attribute must correspond to only one value:\n" +
"atttribute \"" + attr + "\" was repeated\n" );
printKeyShellUsage();
return -1;
return 1;
}
attributes.put(attr, val);
} else if ("--provider".equals(args[i]) && moreTokens) {
@ -163,17 +173,17 @@ public class KeyShell extends Configured implements Tool {
interactive = true;
} else if ("--help".equals(args[i])) {
printKeyShellUsage();
return -1;
return 1;
} else {
printKeyShellUsage();
ToolRunner.printGenericCommandUsage(System.err);
return -1;
return 1;
}
}
if (command == null) {
printKeyShellUsage();
return -1;
return 1;
}
if (!attributes.isEmpty()) {
@ -491,10 +501,11 @@ public class KeyShell extends Configured implements Tool {
}
/**
* Main program.
* main() entry point for the KeyShell. While strictly speaking the
* return is void, it will System.exit() with a return code: 0 is for
* success and 1 for failure.
*
* @param args
* Command line arguments
* @param args Command line arguments.
* @throws Exception
*/
public static void main(String[] args) throws Exception {

View File

@ -21,7 +21,9 @@ import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
@ -33,6 +35,7 @@ import org.apache.http.client.utils.URIBuilder;
import org.codehaus.jackson.map.ObjectMapper;
import javax.net.ssl.HttpsURLConnection;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@ -40,6 +43,7 @@ import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.reflect.Constructor;
import java.net.HttpURLConnection;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@ -50,14 +54,22 @@ import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import com.google.common.base.Preconditions;
/**
* KMS client <code>KeyProvider</code> implementation.
*/
@InterfaceAudience.Private
public class KMSClientProvider extends KeyProvider {
public class KMSClientProvider extends KeyProvider implements CryptoExtension {
public static final String SCHEME_NAME = "kms";
@ -78,6 +90,73 @@ public class KMSClientProvider extends KeyProvider {
public static final String TIMEOUT_ATTR = CONFIG_PREFIX + "timeout";
public static final int DEFAULT_TIMEOUT = 60;
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
private class EncryptedQueueRefiller implements
ValueQueue.QueueRefiller<EncryptedKeyVersion> {
@Override
public void fillQueueForKey(String keyName,
Queue<EncryptedKeyVersion> keyQueue, int numEKVs) throws IOException {
checkNotNull(keyName, "keyName");
Map<String, String> params = new HashMap<String, String>();
params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_GENERATE);
params.put(KMSRESTConstants.EEK_NUM_KEYS, "" + numEKVs);
URL url = createURL(KMSRESTConstants.KEY_RESOURCE, keyName,
KMSRESTConstants.EEK_SUB_RESOURCE, params);
HttpURLConnection conn = createConnection(url, HTTP_GET);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
List response = call(conn, null,
HttpURLConnection.HTTP_OK, List.class);
List<EncryptedKeyVersion> ekvs =
parseJSONEncKeyVersion(keyName, response);
keyQueue.addAll(ekvs);
}
}
public static class KMSEncryptedKeyVersion extends EncryptedKeyVersion {
public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
byte[] iv, String encryptedVersionName, byte[] keyMaterial) {
super(keyName, keyVersionName, iv, new KMSKeyVersion(null,
encryptedVersionName, keyMaterial));
}
}
@SuppressWarnings("rawtypes")
private static List<EncryptedKeyVersion>
parseJSONEncKeyVersion(String keyName, List valueList) {
List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
if (!valueList.isEmpty()) {
for (Object values : valueList) {
Map valueMap = (Map) values;
String versionName = checkNotNull(
(String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] iv = Base64.decodeBase64(checkNotNull(
(String) valueMap.get(KMSRESTConstants.IV_FIELD),
KMSRESTConstants.IV_FIELD));
Map encValueMap = checkNotNull((Map)
valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
String encVersionName = checkNotNull((String)
encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
KMSRESTConstants.MATERIAL_FIELD));
ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv,
encVersionName, encKeyMaterial));
}
}
return ekvs;
}
private static KeyVersion parseJSONKeyVersion(Map valueMap) {
KeyVersion keyVersion = null;
if (!valueMap.isEmpty()) {
@ -208,6 +287,28 @@ public class KMSClientProvider extends KeyProvider {
}
int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
configurator = new TimeoutConnConfigurator(timeout, sslFactory);
encKeyVersionQueue =
new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
conf.getInt(
CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
conf.getFloat(
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
conf.getInt(
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
conf.getInt(
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
CommonConfigurationKeysPublic.
KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
new EncryptedQueueRefiller());
}
private String createServiceURL(URL url) throws IOException {
@ -527,6 +628,51 @@ public class KMSClientProvider extends KeyProvider {
}
}
@Override
public EncryptedKeyVersion generateEncryptedKey(
String encryptionKeyName) throws IOException, GeneralSecurityException {
try {
return encKeyVersionQueue.getNext(encryptionKeyName);
} catch (ExecutionException e) {
if (e.getCause() instanceof SocketTimeoutException) {
throw (SocketTimeoutException)e.getCause();
}
throw new IOException(e);
}
}
@SuppressWarnings("rawtypes")
@Override
public KeyVersion decryptEncryptedKey(
EncryptedKeyVersion encryptedKeyVersion) throws IOException,
GeneralSecurityException {
checkNotNull(encryptedKeyVersion.getKeyVersionName(), "versionName");
checkNotNull(encryptedKeyVersion.getIv(), "iv");
Preconditions.checkArgument(encryptedKeyVersion.getEncryptedKey()
.getVersionName().equals(KeyProviderCryptoExtension.EEK),
"encryptedKey version name must be '%s', is '%s'",
KeyProviderCryptoExtension.EK, encryptedKeyVersion.getEncryptedKey()
.getVersionName());
checkNotNull(encryptedKeyVersion.getEncryptedKey(), "encryptedKey");
Map<String, String> params = new HashMap<String, String>();
params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_DECRYPT);
Map<String, Object> jsonPayload = new HashMap<String, Object>();
jsonPayload.put(KMSRESTConstants.NAME_FIELD,
encryptedKeyVersion.getKeyName());
jsonPayload.put(KMSRESTConstants.IV_FIELD, Base64.encodeBase64String(
encryptedKeyVersion.getIv()));
jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD, Base64.encodeBase64String(
encryptedKeyVersion.getEncryptedKey().getMaterial()));
URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
encryptedKeyVersion.getKeyVersionName(),
KMSRESTConstants.EEK_SUB_RESOURCE, params);
HttpURLConnection conn = createConnection(url, HTTP_POST);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
Map response =
call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class);
return parseJSONKeyVersion(response);
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
checkNotEmpty(name, "name");
@ -570,4 +716,14 @@ public class KMSClientProvider extends KeyProvider {
// the server should not keep in memory state on behalf of clients either.
}
@Override
public void warmUpEncryptedKeys(String... keyNames)
throws IOException {
try {
encKeyVersionQueue.initializeQueuesForKeys(keyNames);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
}

View File

@ -34,10 +34,16 @@ public class KMSRESTConstants {
public static final String KEY_VERSION_RESOURCE = "keyversion";
public static final String METADATA_SUB_RESOURCE = "_metadata";
public static final String VERSIONS_SUB_RESOURCE = "_versions";
public static final String EEK_SUB_RESOURCE = "_eek";
public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
public static final String KEY_OP = "key";
public static final String EEK_OP = "eek_op";
public static final String EEK_GENERATE = "generate";
public static final String EEK_DECRYPT = "decrypt";
public static final String EEK_NUM_KEYS = "num_keys";
public static final String IV_FIELD = "iv";
public static final String NAME_FIELD = "name";
public static final String CIPHER_FIELD = "cipher";
public static final String LENGTH_FIELD = "length";
@ -47,6 +53,8 @@ public class KMSRESTConstants {
public static final String VERSIONS_FIELD = "versions";
public static final String MATERIAL_FIELD = "material";
public static final String VERSION_NAME_FIELD = "versionName";
public static final String ENCRYPTED_KEY_VERSION_FIELD =
"encryptedKeyVersion";
public static final String ERROR_EXCEPTION_JSON = "exception";
public static final String ERROR_MESSAGE_JSON = "message";

View File

@ -0,0 +1,317 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms;
import java.io.IOException;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
/**
* A Utility class that maintains a Queue of entries for a given key. It tries
* to ensure that there is are always at-least <code>numValues</code> entries
* available for the client to consume for a particular key.
* It also uses an underlying Cache to evict queues for keys that have not been
* accessed for a configurable period of time.
* Implementing classes are required to implement the
* <code>QueueRefiller</code> interface that exposes a method to refill the
* queue, when empty
*/
@InterfaceAudience.Private
public class ValueQueue <E> {
/**
* QueueRefiller interface a client must implement to use this class
*/
public interface QueueRefiller <E> {
/**
* Method that has to be implemented by implementing classes to fill the
* Queue.
* @param keyName Key name
* @param keyQueue Queue that needs to be filled
* @param numValues number of Values to be added to the queue.
* @throws IOException
*/
public void fillQueueForKey(String keyName,
Queue<E> keyQueue, int numValues) throws IOException;
}
private static final String REFILL_THREAD =
ValueQueue.class.getName() + "_thread";
private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
private final ThreadPoolExecutor executor;
private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
private final QueueRefiller<E> refiller;
private final SyncGenerationPolicy policy;
private final int numValues;
private final float lowWatermark;
/**
* A <code>Runnable</code> which takes a string name.
*/
private abstract static class NamedRunnable implements Runnable {
final String name;
private NamedRunnable(String keyName) {
this.name = keyName;
}
}
/**
* This backing blocking queue used in conjunction with the
* <code>ThreadPoolExecutor</code> used by the <code>ValueQueue</code>. This
* Queue accepts a task only if the task is not currently in the process
* of being run by a thread which is implied by the presence of the key
* in the <code>keysInProgress</code> set.
*
* NOTE: Only methods that ware explicitly called by the
* <code>ThreadPoolExecutor</code> need to be over-ridden.
*/
private static class UniqueKeyBlockingQueue extends
LinkedBlockingQueue<Runnable> {
private static final long serialVersionUID = -2152747693695890371L;
private HashSet<String> keysInProgress = new HashSet<String>();
@Override
public synchronized void put(Runnable e) throws InterruptedException {
if (keysInProgress.add(((NamedRunnable)e).name)) {
super.put(e);
}
}
@Override
public Runnable take() throws InterruptedException {
Runnable k = super.take();
if (k != null) {
keysInProgress.remove(((NamedRunnable)k).name);
}
return k;
}
@Override
public Runnable poll(long timeout, TimeUnit unit)
throws InterruptedException {
Runnable k = super.poll(timeout, unit);
if (k != null) {
keysInProgress.remove(((NamedRunnable)k).name);
}
return k;
}
}
/**
* Policy to decide how many values to return to client when client asks for
* "n" values and Queue is empty.
* This decides how many values to return when client calls "getAtMost"
*/
public static enum SyncGenerationPolicy {
ATLEAST_ONE, // Return atleast 1 value
LOW_WATERMARK, // Return min(n, lowWatermark * numValues) values
ALL // Return n values
}
/**
* Constructor takes the following tunable configuration parameters
* @param numValues The number of values cached in the Queue for a
* particular key.
* @param lowWatermark The ratio of (number of current entries/numValues)
* below which the <code>fillQueueForKey()</code> funciton will be
* invoked to fill the Queue.
* @param expiry Expiry time after which the Key and associated Queue are
* evicted from the cache.
* @param numFillerThreads Number of threads to use for the filler thread
* @param policy The SyncGenerationPolicy to use when client
* calls "getAtMost"
* @param refiller implementation of the QueueRefiller
*/
public ValueQueue(final int numValues, final float lowWatermark,
long expiry, int numFillerThreads, SyncGenerationPolicy policy,
final QueueRefiller<E> refiller) {
Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0");
Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)),
"\"lowWatermark\" must be > 0 and <= 1");
Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0");
Preconditions.checkArgument(numFillerThreads > 0,
"\"numFillerThreads\" must be > 0");
Preconditions.checkNotNull(policy, "\"policy\" must not be null");
this.refiller = refiller;
this.policy = policy;
this.numValues = numValues;
this.lowWatermark = lowWatermark;
keyQueues = CacheBuilder.newBuilder()
.expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
@Override
public LinkedBlockingQueue<E> load(String keyName)
throws Exception {
LinkedBlockingQueue<E> keyQueue =
new LinkedBlockingQueue<E>();
refiller.fillQueueForKey(keyName, keyQueue,
(int)(lowWatermark * numValues));
return keyQueue;
}
});
executor =
new ThreadPoolExecutor(numFillerThreads, numFillerThreads, 0L,
TimeUnit.MILLISECONDS, queue, new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(REFILL_THREAD).build());
// To ensure all requests are first queued, make coreThreads = maxThreads
// and pre-start all the Core Threads.
executor.prestartAllCoreThreads();
}
public ValueQueue(final int numValues, final float lowWaterMark, long expiry,
int numFillerThreads, QueueRefiller<E> fetcher) {
this(numValues, lowWaterMark, expiry, numFillerThreads,
SyncGenerationPolicy.ALL, fetcher);
}
/**
* Initializes the Value Queues for the provided keys by calling the
* fill Method with "numInitValues" values
* @param keyNames Array of key Names
* @throws ExecutionException
*/
public void initializeQueuesForKeys(String... keyNames)
throws ExecutionException {
for (String keyName : keyNames) {
keyQueues.get(keyName);
}
}
/**
* This removes the value currently at the head of the Queue for the
* provided key. Will immediately fire the Queue filler function if key
* does not exist.
* If Queue exists but all values are drained, It will ask the generator
* function to add 1 value to Queue and then drain it.
* @param keyName String key name
* @return E the next value in the Queue
* @throws IOException
* @throws ExecutionException
*/
public E getNext(String keyName)
throws IOException, ExecutionException {
return getAtMost(keyName, 1).get(0);
}
/**
* This removes the "num" values currently at the head of the Queue for the
* provided key. Will immediately fire the Queue filler function if key
* does not exist
* How many values are actually returned is governed by the
* <code>SyncGenerationPolicy</code> specified by the user.
* @param keyName String key name
* @param num Minimum number of values to return.
* @return List<E> values returned
* @throws IOException
* @throws ExecutionException
*/
public List<E> getAtMost(String keyName, int num) throws IOException,
ExecutionException {
LinkedBlockingQueue<E> keyQueue = keyQueues.get(keyName);
// Using poll to avoid race condition..
LinkedList<E> ekvs = new LinkedList<E>();
try {
for (int i = 0; i < num; i++) {
E val = keyQueue.poll();
// If queue is empty now, Based on the provided SyncGenerationPolicy,
// figure out how many new values need to be generated synchronously
if (val == null) {
// Synchronous call to get remaining values
int numToFill = 0;
switch (policy) {
case ATLEAST_ONE:
numToFill = (ekvs.size() < 1) ? 1 : 0;
break;
case LOW_WATERMARK:
numToFill =
Math.min(num, (int) (lowWatermark * numValues)) - ekvs.size();
break;
case ALL:
numToFill = num - ekvs.size();
break;
}
// Synchronous fill if not enough values found
if (numToFill > 0) {
refiller.fillQueueForKey(keyName, ekvs, numToFill);
}
// Asynch task to fill > lowWatermark
if (i <= (int) (lowWatermark * numValues)) {
submitRefillTask(keyName, keyQueue);
}
return ekvs;
}
ekvs.add(val);
}
} catch (Exception e) {
throw new IOException("Exeption while contacting value generator ", e);
}
return ekvs;
}
private void submitRefillTask(final String keyName,
final Queue<E> keyQueue) throws InterruptedException {
// The submit/execute method of the ThreadPoolExecutor is bypassed and
// the Runnable is directly put in the backing BlockingQueue so that we
// can control exactly how the runnable is inserted into the queue.
queue.put(
new NamedRunnable(keyName) {
@Override
public void run() {
int cacheSize = numValues;
int threshold = (int) (lowWatermark * (float) cacheSize);
// Need to ensure that only one refill task per key is executed
try {
if (keyQueue.size() < threshold) {
refiller.fillQueueForKey(name, keyQueue,
cacheSize - keyQueue.size());
}
} catch (final Exception e) {
throw new RuntimeException(e);
}
}
}
);
}
/**
* Cleanly shutdown
*/
public void shutdown() {
executor.shutdownNow();
}
}

View File

@ -304,6 +304,34 @@ public class CommonConfigurationKeysPublic {
/** Class to override Impersonation provider */
public static final String HADOOP_SECURITY_IMPERSONATION_PROVIDER_CLASS =
"hadoop.security.impersonation.provider.class";
// <!--- KMSClientProvider configurations >
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_SIZE =
"hadoop.security.kms.client.encrypted.key.cache.size";
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_SIZE */
public static final int KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT = 500;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK =
"hadoop.security.kms.client.encrypted.key.cache.low-watermark";
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK */
public static final float KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT =
0.3f;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS =
"hadoop.security.kms.client.encrypted.key.cache.num.refill.threads";
/** Default value for KMS_CLIENT_ENC_KEY_NUM_REFILL_THREADS */
public static final int KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT =
2;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS =
"hadoop.security.kms.client.encrypted.key.cache.expiry";
/** Default value for KMS_CLIENT_ENC_KEY_CACHE_EXPIRY (12 hrs)*/
public static final int KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_JAVA_SECURE_RANDOM_ALGORITHM_KEY =
"hadoop.security.java.secure.random.algorithm";

View File

@ -50,6 +50,7 @@ import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.s3.S3Exception;
@ -225,6 +226,7 @@ public class NativeS3FileSystem extends FileSystem {
private OutputStream backupStream;
private MessageDigest digest;
private boolean closed;
private LocalDirAllocator lDirAlloc;
public NativeS3FsOutputStream(Configuration conf,
NativeFileSystemStore store, String key, Progressable progress,
@ -246,11 +248,10 @@ public class NativeS3FileSystem extends FileSystem {
}
private File newBackupFile() throws IOException {
File dir = new File(conf.get("fs.s3.buffer.dir"));
if (!dir.mkdirs() && !dir.exists()) {
throw new IOException("Cannot create S3 buffer directory: " + dir);
if (lDirAlloc == null) {
lDirAlloc = new LocalDirAllocator("fs.s3.buffer.dir");
}
File result = File.createTempFile("output-", ".tmp", dir);
File result = lDirAlloc.createTmpFileForWrite("output-", LocalDirAllocator.SIZE_UNKNOWN, conf);
result.deleteOnExit();
return result;
}

View File

@ -37,6 +37,8 @@ import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
@ -279,6 +281,38 @@ class ChRootedFs extends AbstractFileSystem {
myFs.setTimes(fullPath(f), mtime, atime);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
myFs.modifyAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
myFs.removeAclEntries(fullPath(path), aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
myFs.removeDefaultAcl(fullPath(path));
}
@Override
public void removeAcl(Path path) throws IOException {
myFs.removeAcl(fullPath(path));
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
myFs.setAcl(fullPath(path), aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return myFs.getAclStatus(fullPath(path));
}
@Override
public void setVerifyChecksum(final boolean verifyChecksum)
throws IOException, UnresolvedLinkException {

View File

@ -50,6 +50,7 @@ import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -871,5 +872,46 @@ public class ViewFileSystem extends FileSystem {
public short getDefaultReplication(Path f) {
throw new NotInMountpointException(f, "getDefaultReplication");
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("modifyAclEntries", path);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAclEntries", path);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeDefaultAcl", path);
}
@Override
public void removeAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAcl", path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("setAcl", path);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getUserName())
.group(ugi.getGroupNames()[0])
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();
}
}
}

View File

@ -49,6 +49,9 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.local.LocalConfigKeys;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@ -603,6 +606,51 @@ public class ViewFs extends AbstractFileSystem {
return true;
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAclEntries(res.remainingPath, aclSpec);
}
@Override
public void removeDefaultAcl(Path path)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeDefaultAcl(res.remainingPath);
}
@Override
public void removeAcl(Path path)
throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.removeAcl(res.remainingPath);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
res.targetFileSystem.setAcl(res.remainingPath, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
InodeTree.ResolveResult<AbstractFileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getAclStatus(res.remainingPath);
}
/*
@ -832,5 +880,46 @@ public class ViewFs extends AbstractFileSystem {
throws AccessControlException {
throw readOnlyMountTable("setVerifyChecksum", "");
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("modifyAclEntries", path);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAclEntries", path);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeDefaultAcl", path);
}
@Override
public void removeAcl(Path path) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("removeAcl", path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
checkPathIsSlash(path);
throw readOnlyMountTable("setAcl", path);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getUserName())
.group(ugi.getGroupNames()[0])
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();
}
}
}

View File

@ -100,7 +100,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return createOutputStream(out, createCompressor());
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
@ -153,7 +154,8 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return createInputStream(in, createDecompressor());
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**

View File

@ -24,6 +24,7 @@ import java.io.OutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
/**
* This class encapsulates a streaming compression/decompression pair.
@ -113,4 +114,58 @@ public interface CompressionCodec {
* @return the extension including the '.'
*/
String getDefaultExtension();
static class Util {
/**
* Create an output stream with a codec taken from the global CodecPool.
*
* @param codec The codec to use to create the output stream.
* @param conf The configuration to use if we need to create a new codec.
* @param out The output stream to wrap.
* @return The new output stream
* @throws IOException
*/
static CompressionOutputStream createOutputStreamWithCodecPool(
CompressionCodec codec, Configuration conf, OutputStream out)
throws IOException {
Compressor compressor = CodecPool.getCompressor(codec, conf);
CompressionOutputStream stream = null;
try {
stream = codec.createOutputStream(out, compressor);
} finally {
if (stream == null) {
CodecPool.returnCompressor(compressor);
} else {
stream.setTrackedCompressor(compressor);
}
}
return stream;
}
/**
* Create an input stream with a codec taken from the global CodecPool.
*
* @param codec The codec to use to create the input stream.
* @param conf The configuration to use if we need to create a new codec.
* @param in The input stream to wrap.
* @return The new input stream
* @throws IOException
*/
static CompressionInputStream createInputStreamWithCodecPool(
CompressionCodec codec, Configuration conf, InputStream in)
throws IOException {
Decompressor decompressor = CodecPool.getDecompressor(codec);
CompressionInputStream stream = null;
try {
stream = codec.createInputStream(in, decompressor);
} finally {
if (stream == null) {
CodecPool.returnDecompressor(decompressor);
} else {
stream.setTrackedDecompressor(decompressor);
}
}
return stream;
}
}
}

View File

@ -41,6 +41,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
protected final InputStream in;
protected long maxAvailableData = 0L;
private Decompressor trackedDecompressor;
/**
* Create a compression input stream that reads
* the decompressed bytes from the given stream.
@ -58,6 +60,10 @@ public abstract class CompressionInputStream extends InputStream implements Seek
@Override
public void close() throws IOException {
in.close();
if (trackedDecompressor != null) {
CodecPool.returnDecompressor(trackedDecompressor);
trackedDecompressor = null;
}
}
/**
@ -112,4 +118,8 @@ public abstract class CompressionInputStream extends InputStream implements Seek
public boolean seekToNewSource(long targetPos) throws UnsupportedOperationException {
throw new UnsupportedOperationException();
}
void setTrackedDecompressor(Decompressor decompressor) {
trackedDecompressor = decompressor;
}
}

View File

@ -34,7 +34,13 @@ public abstract class CompressionOutputStream extends OutputStream {
* The output stream to be compressed.
*/
protected final OutputStream out;
/**
* If non-null, this is the Compressor object that we should call
* CodecPool#returnCompressor on when this stream is closed.
*/
private Compressor trackedCompressor;
/**
* Create a compression output stream that writes
* the compressed bytes to the given stream.
@ -43,11 +49,19 @@ public abstract class CompressionOutputStream extends OutputStream {
protected CompressionOutputStream(OutputStream out) {
this.out = out;
}
void setTrackedCompressor(Compressor compressor) {
trackedCompressor = compressor;
}
@Override
public void close() throws IOException {
finish();
out.close();
if (trackedCompressor != null) {
CodecPool.returnCompressor(trackedCompressor);
trackedCompressor = null;
}
}
@Override

View File

@ -51,14 +51,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
// This may leak memory if called in a loop. The createCompressor() call
// may cause allocation of an untracked direct-backed buffer if native
// libs are being used (even if you close the stream). A Compressor
// object should be reused between successive calls.
LOG.warn("DefaultCodec.createOutputStream() may leak memory. "
+ "Create a compressor first.");
return new CompressorStream(out, createCompressor(),
conf.getInt("io.file.buffer.size", 4*1024));
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
@Override
@ -82,8 +76,8 @@ public class DefaultCodec implements Configurable, CompressionCodec, DirectDecom
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return new DecompressorStream(in, createDecompressor(),
conf.getInt("io.file.buffer.size", 4*1024));
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
@Override

View File

@ -159,10 +159,11 @@ public class GzipCodec extends DefaultCodec {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return (ZlibFactory.isNativeZlibLoaded(conf)) ?
new CompressorStream(out, createCompressor(),
conf.getInt("io.file.buffer.size", 4*1024)) :
new GzipOutputStream(out);
if (!ZlibFactory.isNativeZlibLoaded(conf)) {
return new GzipOutputStream(out);
}
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
@Override
@ -192,8 +193,9 @@ public class GzipCodec extends DefaultCodec {
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return createInputStream(in, null);
throws IOException {
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
@Override

View File

@ -84,7 +84,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return createOutputStream(out, createCompressor());
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
@ -157,7 +158,8 @@ public class Lz4Codec implements Configurable, CompressionCodec {
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return createInputStream(in, createDecompressor());
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**

View File

@ -95,7 +95,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return createOutputStream(out, createCompressor());
return CompressionCodec.Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
@ -158,7 +159,8 @@ public class SnappyCodec implements Configurable, CompressionCodec, DirectDecomp
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return createInputStream(in, createDecompressor());
return CompressionCodec.Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**

View File

@ -599,24 +599,35 @@ public class ProtobufRpcEngine implements RpcEngine {
.mergeFrom(request.theRequestRead).build();
Message result;
long startTime = Time.now();
int qTime = (int) (startTime - receiveTime);
Exception exception = null;
try {
long startTime = Time.now();
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
result = service.callBlockingMethod(methodDescriptor, null, param);
int processingTime = (int) (Time.now() - startTime);
int qTime = (int) (startTime - receiveTime);
if (LOG.isDebugEnabled()) {
LOG.info("Served: " + methodName + " queueTime= " + qTime +
" procesingTime= " + processingTime);
}
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(methodName,
processingTime);
} catch (ServiceException e) {
exception = (Exception) e.getCause();
throw (Exception) e.getCause();
} catch (Exception e) {
exception = e;
throw e;
} finally {
int processingTime = (int) (Time.now() - startTime);
if (LOG.isDebugEnabled()) {
String msg = "Served: " + methodName + " queueTime= " + qTime +
" procesingTime= " + processingTime;
if (exception != null) {
msg += " exception= " + exception.getClass().getSimpleName();
}
LOG.debug(msg);
}
String detailedMetricsName = (exception == null) ?
methodName :
exception.getClass().getSimpleName();
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
}
return new RpcResponseWrapper(result);
}

View File

@ -355,8 +355,8 @@ public abstract class Server {
private int readThreads; // number of read threads
private int readerPendingConnectionQueue; // number of connections to queue per read thread
private Class<? extends Writable> rpcRequestClass; // class used for deserializing the rpc request
protected RpcMetrics rpcMetrics;
protected RpcDetailedMetrics rpcDetailedMetrics;
final protected RpcMetrics rpcMetrics;
final protected RpcDetailedMetrics rpcDetailedMetrics;
private Configuration conf;
private String portRangeConfig = null;
@ -2494,12 +2494,8 @@ public abstract class Server {
listener.doStop();
responder.interrupt();
notifyAll();
if (this.rpcMetrics != null) {
this.rpcMetrics.shutdown();
}
if (this.rpcDetailedMetrics != null) {
this.rpcDetailedMetrics.shutdown();
}
this.rpcMetrics.shutdown();
this.rpcDetailedMetrics.shutdown();
}
/** Wait for the server to be stopped.

View File

@ -471,37 +471,29 @@ public class WritableRpcEngine implements RpcEngine {
// Invoke the protocol method
long startTime = Time.now();
int qTime = (int) (startTime-receivedTime);
Exception exception = null;
try {
long startTime = Time.now();
Method method =
Method method =
protocolImpl.protocolClass.getMethod(call.getMethodName(),
call.getParameterClasses());
method.setAccessible(true);
server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
Object value =
method.invoke(protocolImpl.protocolImpl, call.getParameters());
int processingTime = (int) (Time.now() - startTime);
int qTime = (int) (startTime-receivedTime);
if (LOG.isDebugEnabled()) {
LOG.debug("Served: " + call.getMethodName() +
" queueTime= " + qTime +
" procesingTime= " + processingTime);
}
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
processingTime);
if (server.verbose) log("Return: "+value);
return new ObjectWritable(method.getReturnType(), value);
} catch (InvocationTargetException e) {
Throwable target = e.getTargetException();
if (target instanceof IOException) {
exception = (IOException)target;
throw (IOException)target;
} else {
IOException ioe = new IOException(target.toString());
ioe.setStackTrace(target.getStackTrace());
exception = ioe;
throw ioe;
}
} catch (Throwable e) {
@ -510,8 +502,27 @@ public class WritableRpcEngine implements RpcEngine {
}
IOException ioe = new IOException(e.toString());
ioe.setStackTrace(e.getStackTrace());
exception = ioe;
throw ioe;
}
} finally {
int processingTime = (int) (Time.now() - startTime);
if (LOG.isDebugEnabled()) {
String msg = "Served: " + call.getMethodName() +
" queueTime= " + qTime +
" procesingTime= " + processingTime;
if (exception != null) {
msg += " exception= " + exception.getClass().getSimpleName();
}
LOG.debug(msg);
}
String detailedMetricsName = (exception == null) ?
call.getMethodName() :
exception.getClass().getSimpleName();
server.rpcMetrics.addRpcQueueTime(qTime);
server.rpcMetrics.addRpcProcessingTime(processingTime);
server.rpcDetailedMetrics.addProcessingTime(detailedMetricsName,
processingTime);
}
}
}
}

View File

@ -54,6 +54,12 @@ public abstract class MetricsSystem implements MetricsSystemMXBean {
*/
public abstract <T> T register(String name, String desc, T source);
/**
* Unregister a metrics source
* @param name of the source. This is the name you use to call register()
*/
public abstract void unregisterSource(String name);
/**
* Register a metrics source (deriving name and description from the object)
* @param <T> the actual type of the source object

View File

@ -232,6 +232,17 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
return source;
}
@Override public synchronized
void unregisterSource(String name) {
if (sources.containsKey(name)) {
sources.get(name).stop();
sources.remove(name);
}
if (allSources.containsKey(name)) {
allSources.remove(name);
}
}
synchronized
void registerSource(String name, String desc, MetricsSource source) {
checkNotNull(config, "config");

View File

@ -373,12 +373,12 @@ public class CredentialShell extends Configured implements Tool {
char[] newPassword2 = c.readPassword("Enter password again: ");
noMatch = !Arrays.equals(newPassword1, newPassword2);
if (noMatch) {
Arrays.fill(newPassword1, ' ');
if (newPassword1 != null) Arrays.fill(newPassword1, ' ');
c.format("Passwords don't match. Try again.%n");
} else {
cred = newPassword1;
}
Arrays.fill(newPassword2, ' ');
if (newPassword2 != null) Arrays.fill(newPassword2, ' ');
} while (noMatch);
return cred;
}

View File

@ -230,6 +230,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
CredentialEntry innerSetCredential(String alias, char[] material)
throws IOException {
writeLock.lock();
try {
keyStore.setKeyEntry(alias, new SecretKeySpec(
new String(material).getBytes("UTF-8"), "AES"),
@ -237,6 +238,8 @@ public class JavaKeyStoreProvider extends CredentialProvider {
} catch (KeyStoreException e) {
throw new IOException("Can't store credential " + alias + " in " + this,
e);
} finally {
writeLock.unlock();
}
changed = true;
return new CredentialEntry(alias, material);

View File

@ -24,37 +24,64 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.MachineList;
import com.google.common.annotations.VisibleForTesting;
@InterfaceStability.Unstable
@InterfaceAudience.Public
public class DefaultImpersonationProvider implements ImpersonationProvider {
private static final String CONF_HOSTS = ".hosts";
private static final String CONF_USERS = ".users";
private static final String CONF_GROUPS = ".groups";
private static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser.";
private static final String CONF_HADOOP_PROXYUSER_RE = "hadoop\\.proxyuser\\.";
private static final String CONF_HADOOP_PROXYUSER_RE_USERS_GROUPS =
CONF_HADOOP_PROXYUSER_RE+"[^.]*(" + Pattern.quote(CONF_USERS) +
"|" + Pattern.quote(CONF_GROUPS) + ")";
private static final String CONF_HADOOP_PROXYUSER_RE_HOSTS =
CONF_HADOOP_PROXYUSER_RE+"[^.]*"+ Pattern.quote(CONF_HOSTS);
// acl and list of hosts per proxyuser
private Map<String, AccessControlList> proxyUserAcl =
new HashMap<String, AccessControlList>();
private static Map<String, MachineList> proxyHosts =
private Map<String, MachineList> proxyHosts =
new HashMap<String, MachineList>();
private Configuration conf;
private static DefaultImpersonationProvider testProvider;
public static synchronized DefaultImpersonationProvider getTestProvider() {
if (testProvider == null) {
testProvider = new DefaultImpersonationProvider();
testProvider.setConf(new Configuration());
testProvider.init(ProxyUsers.CONF_HADOOP_PROXYUSER);
}
return testProvider;
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
// get list of users and groups per proxyuser
private String configPrefix;
@Override
public void init(String configurationPrefix) {
configPrefix = configurationPrefix +
(configurationPrefix.endsWith(".") ? "" : ".");
// constructing regex to match the following patterns:
// $configPrefix.[ANY].users
// $configPrefix.[ANY].groups
// $configPrefix.[ANY].hosts
//
String prefixRegEx = configPrefix.replace(".", "\\.");
String usersGroupsRegEx = prefixRegEx + "[^.]*(" +
Pattern.quote(CONF_USERS) + "|" + Pattern.quote(CONF_GROUPS) + ")";
String hostsRegEx = prefixRegEx + "[^.]*" + Pattern.quote(CONF_HOSTS);
// get list of users and groups per proxyuser
Map<String,String> allMatchKeys =
conf.getValByRegex(CONF_HADOOP_PROXYUSER_RE_USERS_GROUPS);
conf.getValByRegex(usersGroupsRegEx);
for(Entry<String, String> entry : allMatchKeys.entrySet()) {
String aclKey = getAclKey(entry.getKey());
if (!proxyUserAcl.containsKey(aclKey)) {
@ -65,7 +92,7 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
}
// get hosts per proxyuser
allMatchKeys = conf.getValByRegex(CONF_HADOOP_PROXYUSER_RE_HOSTS);
allMatchKeys = conf.getValByRegex(hostsRegEx);
for(Entry<String, String> entry : allMatchKeys.entrySet()) {
proxyHosts.put(entry.getKey(),
new MachineList(entry.getValue()));
@ -86,8 +113,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
return;
}
AccessControlList acl = proxyUserAcl.get(
CONF_HADOOP_PROXYUSER+realUser.getShortUserName());
AccessControlList acl = proxyUserAcl.get(configPrefix +
realUser.getShortUserName());
if (acl == null || !acl.isUserAllowed(user)) {
throw new AuthorizationException("User: " + realUser.getUserName()
+ " is not allowed to impersonate " + user.getUserName());
@ -116,8 +143,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
* @param userName name of the superuser
* @return configuration key for superuser usergroups
*/
public static String getProxySuperuserUserConfKey(String userName) {
return CONF_HADOOP_PROXYUSER+userName+CONF_USERS;
public String getProxySuperuserUserConfKey(String userName) {
return configPrefix + userName + CONF_USERS;
}
/**
@ -126,8 +153,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
* @param userName name of the superuser
* @return configuration key for superuser groups
*/
public static String getProxySuperuserGroupConfKey(String userName) {
return CONF_HADOOP_PROXYUSER+userName+CONF_GROUPS;
public String getProxySuperuserGroupConfKey(String userName) {
return configPrefix + userName + CONF_GROUPS;
}
/**
@ -136,8 +163,8 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
* @param userName name of the superuser
* @return configuration key for superuser ip-addresses
*/
public static String getProxySuperuserIpConfKey(String userName) {
return CONF_HADOOP_PROXYUSER+userName+CONF_HOSTS;
public String getProxySuperuserIpConfKey(String userName) {
return configPrefix + userName + CONF_HOSTS;
}
@VisibleForTesting

View File

@ -18,10 +18,25 @@
package org.apache.hadoop.security.authorize;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.security.UserGroupInformation;
@InterfaceStability.Unstable
@InterfaceAudience.Public
public interface ImpersonationProvider extends Configurable {
/**
* Specifies the configuration prefix for the proxy user properties and
* initializes the provider.
*
* @param configurationPrefix the configuration prefix for the proxy user
* properties
*/
public void init(String configurationPrefix);
/**
* Authorize the superuser which is doing doAs
*

View File

@ -18,7 +18,9 @@
package org.apache.hadoop.security.authorize;
import com.google.common.base.Preconditions;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.security.UserGroupInformation;
@ -26,9 +28,12 @@ import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting;
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive"})
public class ProxyUsers {
public static final String CONF_HADOOP_PROXYUSER = "hadoop.proxyuser";
private static volatile ImpersonationProvider sip ;
/**
@ -54,15 +59,31 @@ public class ProxyUsers {
}
/**
* refresh configuration
* @param conf
* Refreshes configuration using the specified Proxy user prefix for
* properties.
*
* @param conf configuration
* @param proxyUserPrefix proxy user configuration prefix
*/
public static void refreshSuperUserGroupsConfiguration(Configuration conf) {
public static void refreshSuperUserGroupsConfiguration(Configuration conf,
String proxyUserPrefix) {
Preconditions.checkArgument(proxyUserPrefix != null &&
!proxyUserPrefix.isEmpty(), "prefix cannot be NULL or empty");
// sip is volatile. Any assignment to it as well as the object's state
// will be visible to all the other threads.
sip = getInstance(conf);
ImpersonationProvider ip = getInstance(conf);
ip.init(proxyUserPrefix);
sip = ip;
ProxyServers.refresh(conf);
}
/**
* Refreshes configuration using the default Proxy user prefix for properties.
* @param conf configuration
*/
public static void refreshSuperUserGroupsConfiguration(Configuration conf) {
refreshSuperUserGroupsConfiguration(conf, CONF_HADOOP_PROXYUSER);
}
/**
* Authorize the superuser which is doing doAs

View File

@ -1516,4 +1516,37 @@ for ldap providers in the same way as above does.
<description>Don't cache 'har' filesystem instances.</description>
</property>
<!--- KMSClientProvider configurations -->
<property>
<name>hadoop.security.kms.client.encrypted.key.cache.size</name>
<value>500</value>
<description>
Size of the EncryptedKeyVersion cache Queue for each key
</description>
</property>
<property>
<name>hadoop.security.kms.client.encrypted.key.cache.low-watermark</name>
<value>0.3f</value>
<description>
If size of the EncryptedKeyVersion cache Queue falls below the
low watermark, this cache queue will be scheduled for a refill
</description>
</property>
<property>
<name>hadoop.security.kms.client.encrypted.key.cache.num.refill.threads</name>
<value>2</value>
<description>
Number of threads to use for refilling depleted EncryptedKeyVersion
cache Queues
</description>
</property>
<property>
<name>"hadoop.security.kms.client.encrypted.key.cache.expiry</name>
<value>43200000</value>
<description>
Cache expiry time for a Key, after which the cache Queue for this
key will be dropped. Default = 12hrs
</description>
</property>
</configuration>

View File

@ -127,7 +127,7 @@ User Commands
Runs a HDFS filesystem checking utility.
See {{{../hadoop-hdfs/HdfsUserGuide.html#fsck}fsck}} for more info.
Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]]>>>
Usage: <<<hadoop fsck [GENERIC_OPTIONS] <path> [-move | -delete | -openforwrite] [-files [-blocks [-locations | -racks]]] [-showprogress]>>>
*------------------+---------------------------------------------+
|| COMMAND_OPTION || Description
@ -148,6 +148,8 @@ User Commands
*------------------+---------------------------------------------+
| -racks | Print out network topology for data-node locations.
*------------------+---------------------------------------------+
| -showprogress | Print out show progress in output. Default is OFF (no progress).
*------------------+---------------------------------------------+
* <<<fetchdt>>>

View File

@ -116,6 +116,8 @@ Native Libraries Guide
* zlib-development package (stable version >= 1.2.0)
* openssl-development package(e.g. libssl-dev)
Once you installed the prerequisite packages use the standard hadoop
pom.xml file and pass along the native flag to build the native hadoop
library:

View File

@ -15,17 +15,16 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
package org.apache.hadoop.crypto.key;
import java.util.Date;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
import java.util.Date;
public class TestKMSCacheKeyProvider {
public class TestCachingKeyProvider {
@Test
public void testCurrentKey() throws Exception {
@ -33,7 +32,7 @@ public class TestKMSCacheKeyProvider {
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k2"))).thenReturn(null);
KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
@ -45,7 +44,7 @@ public class TestKMSCacheKeyProvider {
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
// asserting no caching when key is not known
cache = new KMSCacheKeyProvider(mockProv, 100);
cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(null, cache.getCurrentKey("k2"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k2"));
Assert.assertEquals(null, cache.getCurrentKey("k2"));
@ -56,25 +55,56 @@ public class TestKMSCacheKeyProvider {
public void testKeyVersion() throws Exception {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
.thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k2@0"))).thenReturn(null);
KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
Thread.sleep(200);
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k1@0"));
// asserting no caching when key is not known
cache = new KMSCacheKeyProvider(mockProv, 100);
cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k2@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k2@0"));
Assert.assertEquals(null, cache.getKeyVersion("k2@0"));
Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k2@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k2@0"));
}
@Test
public void testMetadata() throws Exception {
KeyProvider.Metadata mockMeta = Mockito.mock(KeyProvider.Metadata.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(mockMeta);
Mockito.when(mockProv.getMetadata(Mockito.eq("k2"))).thenReturn(null);
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
// asserting caching
Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k1"));
Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k1"));
Thread.sleep(200);
Assert.assertEquals(mockMeta, cache.getMetadata("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getMetadata(Mockito.eq("k1"));
// asserting no caching when key is not known
cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(null, cache.getMetadata("k2"));
Mockito.verify(mockProv, Mockito.times(1)).getMetadata(Mockito.eq("k2"));
Assert.assertEquals(null, cache.getMetadata("k2"));
Mockito.verify(mockProv, Mockito.times(2)).getMetadata(Mockito.eq("k2"));
}
@Test
@ -82,7 +112,7 @@ public class TestKMSCacheKeyProvider {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
cache.rollNewVersion("k1");
@ -100,21 +130,23 @@ public class TestKMSCacheKeyProvider {
KeyProvider.KeyVersion mockKey = Mockito.mock(KeyProvider.KeyVersion.class);
KeyProvider mockProv = Mockito.mock(KeyProvider.class);
Mockito.when(mockProv.getCurrentKey(Mockito.eq("k1"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0"))).thenReturn(mockKey);
Mockito.when(mockProv.getKeyVersion(Mockito.eq("k1@0")))
.thenReturn(mockKey);
Mockito.when(mockProv.getMetadata(Mockito.eq("k1"))).thenReturn(
new KMSClientProvider.KMSMetadata("c", 0, "l", null, new Date(), 1));
KeyProvider cache = new KMSCacheKeyProvider(mockProv, 100);
KeyProvider cache = new CachingKeyProvider(mockProv, 100, 100);
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(1)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(1)).getKeyVersion(Mockito.eq("k1@0"));
Mockito.verify(mockProv, Mockito.times(1))
.getKeyVersion(Mockito.eq("k1@0"));
cache.deleteKey("k1");
// asserting the cache is purged
Assert.assertEquals(mockKey, cache.getCurrentKey("k1"));
Mockito.verify(mockProv, Mockito.times(2)).getCurrentKey(Mockito.eq("k1"));
Assert.assertEquals(mockKey, cache.getKeyVersion("k1@0"));
Mockito.verify(mockProv, Mockito.times(2)).getKeyVersion(Mockito.eq("k1@0"));
Mockito.verify(mockProv, Mockito.times(2))
.getKeyVersion(Mockito.eq("k1@0"));
}
}

View File

@ -42,9 +42,10 @@ public class TestKeyProviderCryptoExtension {
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
KeyProviderCryptoExtension.EncryptedKeyVersion ek1 =
kpExt.generateEncryptedKey(kv);
kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,
ek1.getEncryptedKey().getVersionName());
Assert.assertEquals("foo", ek1.getKeyName());
Assert.assertNotNull(ek1.getEncryptedKey().getMaterial());
Assert.assertEquals(kv.getMaterial().length,
ek1.getEncryptedKey().getMaterial().length);
@ -55,7 +56,7 @@ public class TestKeyProviderCryptoExtension {
Assert.assertEquals(kv.getMaterial().length, k1.getMaterial().length);
KeyProviderCryptoExtension.EncryptedKeyVersion ek2 =
kpExt.generateEncryptedKey(kv);
kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
boolean eq = true;
for (int i = 0; eq && i < ek2.getEncryptedKey().getMaterial().length; i++) {

View File

@ -161,7 +161,7 @@ public class TestKeyShell {
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(-1, rc);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
@ -174,7 +174,7 @@ public class TestKeyShell {
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(-1, rc);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("key1 has not been created."));
}
@ -187,7 +187,7 @@ public class TestKeyShell {
KeyShell ks = new KeyShell();
ks.setConf(new Configuration());
rc = ks.run(args1);
assertEquals(-1, rc);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured."));
}
@ -216,7 +216,7 @@ public class TestKeyShell {
config.set(KeyProviderFactory.KEY_PROVIDER_PATH, "user:///");
ks.setConf(config);
rc = ks.run(args1);
assertEquals(-1, rc);
assertEquals(1, rc);
assertTrue(outContent.toString().contains("There are no valid " +
"KeyProviders configured."));
}
@ -262,19 +262,19 @@ public class TestKeyShell {
final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
"--attr", "=bar"};
rc = ks.run(args2);
assertEquals(-1, rc);
assertEquals(1, rc);
/* Not in attribute = value form */
outContent.reset();
args2[5] = "foo";
rc = ks.run(args2);
assertEquals(-1, rc);
assertEquals(1, rc);
/* No attribute or value */
outContent.reset();
args2[5] = "=";
rc = ks.run(args2);
assertEquals(-1, rc);
assertEquals(1, rc);
/* Legal: attribute is a, value is b=c */
outContent.reset();
@ -308,7 +308,7 @@ public class TestKeyShell {
"--attr", "foo=bar",
"--attr", "foo=glarch"};
rc = ks.run(args4);
assertEquals(-1, rc);
assertEquals(1, rc);
/* Clean up to be a good citizen */
deleteKey(ks, "keyattr1");

View File

@ -0,0 +1,190 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import org.apache.hadoop.crypto.key.kms.ValueQueue;
import org.apache.hadoop.crypto.key.kms.ValueQueue.QueueRefiller;
import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
import org.junit.Assert;
import org.junit.Test;
import com.google.common.collect.Sets;
public class TestValueQueue {
private static class FillInfo {
final int num;
final String key;
FillInfo(int num, String key) {
this.num = num;
this.key = key;
}
}
private static class MockFiller implements QueueRefiller<String> {
final LinkedBlockingQueue<FillInfo> fillCalls =
new LinkedBlockingQueue<FillInfo>();
@Override
public void fillQueueForKey(String keyName, Queue<String> keyQueue,
int numValues) throws IOException {
fillCalls.add(new FillInfo(numValues, keyName));
for(int i = 0; i < numValues; i++) {
keyQueue.add("test");
}
}
public FillInfo getTop() throws InterruptedException {
return fillCalls.poll(500, TimeUnit.MILLISECONDS);
}
}
/**
* Verifies that Queue is initially filled to "numInitValues"
*/
@Test
public void testInitFill() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
vq.shutdown();
}
/**
* Verifies that Queue is initialized (Warmed-up) for provided keys
*/
@Test
public void testWarmUp() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.5f, 300, 1,
SyncGenerationPolicy.ALL, filler);
vq.initializeQueuesForKeys("k1", "k2", "k3");
FillInfo[] fillInfos =
{filler.getTop(), filler.getTop(), filler.getTop()};
Assert.assertEquals(5, fillInfos[0].num);
Assert.assertEquals(5, fillInfos[1].num);
Assert.assertEquals(5, fillInfos[2].num);
Assert.assertEquals(Sets.newHashSet("k1", "k2", "k3"),
Sets.newHashSet(fillInfos[0].key,
fillInfos[1].key,
fillInfos[2].key));
vq.shutdown();
}
/**
* Verifies that the refill task is executed after "checkInterval" if
* num values below "lowWatermark"
*/
@Test
public void testRefill() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
// Trigger refill
vq.getNext("k1");
Assert.assertEquals(1, filler.getTop().num);
Assert.assertEquals(10, filler.getTop().num);
vq.shutdown();
}
/**
* Verifies that the No refill Happens after "checkInterval" if
* num values above "lowWatermark"
*/
@Test
public void testNoRefill() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.5f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(5, filler.getTop().num);
Assert.assertEquals(null, filler.getTop());
vq.shutdown();
}
/**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
@Test
public void testgetAtMostPolicyALL() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.1f, 300, 1,
SyncGenerationPolicy.ALL, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(1, filler.getTop().num);
// Drain completely
Assert.assertEquals(10, vq.getAtMost("k1", 10).size());
// Synchronous call
Assert.assertEquals(10, filler.getTop().num);
// Ask for more... return all
Assert.assertEquals(19, vq.getAtMost("k1", 19).size());
// Synchronous call (No Async call since num > lowWatermark)
Assert.assertEquals(19, filler.getTop().num);
vq.shutdown();
}
/**
* Verify getAtMost when SyncGeneration Policy = ALL
*/
@Test
public void testgetAtMostPolicyATLEAST_ONE() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.3f, 300, 1,
SyncGenerationPolicy.ATLEAST_ONE, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(3, filler.getTop().num);
// Drain completely
Assert.assertEquals(2, vq.getAtMost("k1", 10).size());
// Asynch Refill call
Assert.assertEquals(10, filler.getTop().num);
vq.shutdown();
}
/**
* Verify getAtMost when SyncGeneration Policy = LOW_WATERMARK
*/
@Test
public void testgetAtMostPolicyLOW_WATERMARK() throws Exception {
MockFiller filler = new MockFiller();
ValueQueue<String> vq =
new ValueQueue<String>(10, 0.3f, 300, 1,
SyncGenerationPolicy.LOW_WATERMARK, filler);
Assert.assertEquals("test", vq.getNext("k1"));
Assert.assertEquals(3, filler.getTop().num);
// Drain completely
Assert.assertEquals(3, vq.getAtMost("k1", 10).size());
// Synchronous call
Assert.assertEquals(1, filler.getTop().num);
// Asynch Refill call
Assert.assertEquals(10, filler.getTop().num);
vq.shutdown();
}
}

View File

@ -20,6 +20,7 @@ package org.apache.hadoop.fs.viewfs;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
@ -28,9 +29,16 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import org.apache.hadoop.fs.permission.AclEntry;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.viewfs.ConfigUtil;
@ -38,6 +46,7 @@ import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Assert;
@ -96,7 +105,6 @@ public class ViewFileSystemBaseTest {
// in the test root
// Set up the defaultMT in the config with our mount point links
//Configuration conf = new Configuration();
conf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf);
@ -720,4 +728,49 @@ public class ViewFileSystemBaseTest {
Assert.assertTrue("Other-readable permission not set!",
perms.getOtherAction().implies(FsAction.READ));
}
/**
* Verify the behavior of ACL operations on paths above the root of
* any mount table entry.
*/
@Test(expected=AccessControlException.class)
public void testInternalModifyAclEntries() throws IOException {
fsView.modifyAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAclEntries() throws IOException {
fsView.removeAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveDefaultAcl() throws IOException {
fsView.removeDefaultAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAcl() throws IOException {
fsView.removeAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetAcl() throws IOException {
fsView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
}
@Test
public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
AclStatus aclStatus = fsView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
}

View File

@ -22,10 +22,14 @@ import static org.apache.hadoop.fs.FileContextTestHelper.checkFileStatus;
import static org.apache.hadoop.fs.FileContextTestHelper.exists;
import static org.apache.hadoop.fs.FileContextTestHelper.isDir;
import static org.apache.hadoop.fs.FileContextTestHelper.isFile;
import static org.apache.hadoop.fs.viewfs.Constants.PERMISSION_555;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
@ -39,8 +43,12 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.viewfs.ViewFs.MountPoint;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.After;
import org.junit.Assert;
@ -695,4 +703,48 @@ public class ViewFsBaseTest {
public void testInternalSetOwner() throws IOException {
fcView.setOwner(new Path("/internalDir"), "foo", "bar");
}
/**
* Verify the behavior of ACL operations on paths above the root of
* any mount table entry.
*/
@Test(expected=AccessControlException.class)
public void testInternalModifyAclEntries() throws IOException {
fcView.modifyAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAclEntries() throws IOException {
fcView.removeAclEntries(new Path("/internalDir"),
new ArrayList<AclEntry>());
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveDefaultAcl() throws IOException {
fcView.removeDefaultAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalRemoveAcl() throws IOException {
fcView.removeAcl(new Path("/internalDir"));
}
@Test(expected=AccessControlException.class)
public void testInternalSetAcl() throws IOException {
fcView.setAcl(new Path("/internalDir"), new ArrayList<AclEntry>());
}
@Test
public void testInternalGetAclStatus() throws IOException {
final UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
AclStatus aclStatus = fcView.getAclStatus(new Path("/internalDir"));
assertEquals(aclStatus.getOwner(), currentUser.getUserName());
assertEquals(aclStatus.getGroup(), currentUser.getGroupNames()[0]);
assertEquals(aclStatus.getEntries(),
AclUtil.getMinimalAcl(PERMISSION_555));
assertFalse(aclStatus.isStickyBit());
}
}

View File

@ -327,8 +327,8 @@ public class MiniRPCBenchmark {
String shortUserName =
UserGroupInformation.createRemoteUser(user).getShortUserName();
try {
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(shortUserName),
GROUP_NAME_1);
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(shortUserName), GROUP_NAME_1);
configureSuperUserIPAddresses(conf, shortUserName);
// start the server
miniServer = new MiniServer(conf, user, keytabFile);
@ -411,7 +411,7 @@ public class MiniRPCBenchmark {
}
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
builder.toString());
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(superUserShortName), builder.toString());
}
}

View File

@ -496,6 +496,8 @@ public class TestRPC {
caught = true;
}
assertTrue(caught);
rb = getMetrics(server.rpcDetailedMetrics.name());
assertCounter("IOExceptionNumOps", 1L, rb);
proxy.testServerGet();

View File

@ -380,6 +380,23 @@ public class TestMetricsSystemImpl {
ms.shutdown();
}
@Test public void testUnregisterSource() {
MetricsSystem ms = new MetricsSystemImpl();
TestSource ts1 = new TestSource("ts1");
TestSource ts2 = new TestSource("ts2");
ms.register("ts1", "", ts1);
ms.register("ts2", "", ts2);
MetricsSource s1 = ms.getSource("ts1");
assertNotNull(s1);
// should work when metrics system is not started
ms.unregisterSource("ts1");
s1 = ms.getSource("ts1");
assertNull(s1);
MetricsSource s2 = ms.getSource("ts2");
assertNotNull(s2);
ms.shutdown();
}
private void checkMetricsRecords(List<MetricsRecord> recs) {
LOG.debug(recs);
MetricsRecord r = recs.get(0);

View File

@ -101,7 +101,8 @@ public class TestDoAsEffectiveUser {
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
LOG.info("Local Ip addresses: "+builder.toString());
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(superUserShortName),
builder.toString());
}
@ -181,8 +182,8 @@ public class TestDoAsEffectiveUser {
@Test(timeout=4000)
public void testRealUserSetup() throws IOException {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider
.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@ -214,7 +215,8 @@ public class TestDoAsEffectiveUser {
public void testRealUserAuthorizationSuccess() throws IOException {
final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group1");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@ -248,9 +250,11 @@ public class TestDoAsEffectiveUser {
@Test
public void testRealUserIPAuthorizationFailure() throws IOException {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_SHORT_NAME),
"20.20.20.20"); //Authorized IP address
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group1");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
@ -293,8 +297,8 @@ public class TestDoAsEffectiveUser {
@Test
public void testRealUserIPNotSpecified() throws IOException {
final Configuration conf = new Configuration();
conf.setStrings(DefaultImpersonationProvider
.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(2).setVerbose(false).build();
@ -377,7 +381,8 @@ public class TestDoAsEffectiveUser {
public void testRealUserGroupAuthorizationFailure() throws IOException {
final Configuration conf = new Configuration();
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
"group3");
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)

View File

@ -127,6 +127,22 @@ public class TestCredShell {
"CredentialProviders configured."));
}
@Test
public void testPromptForCredentialWithEmptyPasswd() throws Exception {
String[] args1 = {"create", "credential1", "--provider",
"jceks://file" + tmpDir + "/credstore.jceks"};
ArrayList<String> passwords = new ArrayList<String>();
passwords.add(null);
passwords.add("p@ssw0rd");
int rc = 0;
CredentialShell shell = new CredentialShell();
shell.setConf(new Configuration());
shell.setPasswordReader(new MockPasswordReader(passwords));
rc = shell.run(args1);
assertEquals(outContent.toString(), -1, rc);
assertTrue(outContent.toString().contains("Passwords don't match"));
}
@Test
public void testPromptForCredential() throws Exception {
String[] args1 = {"create", "credential1", "--provider",
@ -142,7 +158,7 @@ public class TestCredShell {
assertEquals(0, rc);
assertTrue(outContent.toString().contains("credential1 has been successfully " +
"created."));
String[] args2 = {"delete", "credential1", "--provider",
"jceks://file" + tmpDir + "/credstore.jceks"};
rc = shell.run(args2);
@ -162,7 +178,7 @@ public class TestCredShell {
public char[] readPassword(String prompt) {
if (passwords.size() == 0) return null;
String pass = passwords.remove(0);
return pass.toCharArray();
return pass == null ? null : pass.toCharArray();
}
@Override

View File

@ -111,10 +111,12 @@ public class TestProxyUsers {
groupMappingClassName);
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(NETGROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -135,10 +137,12 @@ public class TestProxyUsers {
public void testProxyUsers() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -168,10 +172,12 @@ public class TestProxyUsers {
public void testProxyUsersWithUserConf() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserUserConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserUserConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -202,10 +208,12 @@ public class TestProxyUsers {
public void testWildcardGroup() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -236,10 +244,12 @@ public class TestProxyUsers {
public void testWildcardUser() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserUserConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserUserConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -270,10 +280,12 @@ public class TestProxyUsers {
public void testWildcardIP() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -301,10 +313,12 @@ public class TestProxyUsers {
public void testIPRange() {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
"*");
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP_RANGE);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@ -324,16 +338,19 @@ public class TestProxyUsers {
public void testWithDuplicateProxyGroups() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES,GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection<String> groupsToBeProxied =
ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME));
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME));
assertEquals (1,groupsToBeProxied.size());
}
@ -342,16 +359,19 @@ public class TestProxyUsers {
public void testWithDuplicateProxyHosts() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider()
.getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(PROXY_IP,PROXY_IP)));
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection<String> hosts =
ProxyUsers.getDefaultImpersonationProvider().getProxyHosts().get(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME));
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME));
assertEquals (1,hosts.size());
}
@ -391,26 +411,73 @@ public class TestProxyUsers {
public void testWithProxyGroupsAndUsersWithSpaces() throws Exception {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserUserConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserUserConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(PROXY_USER_NAME + " ",AUTHORIZED_PROXY_USER_NAME, "ONEMORE")));
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
Collection<String> groupsToBeProxied =
ProxyUsers.getDefaultImpersonationProvider().getProxyGroups().get(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME));
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME));
assertEquals (GROUP_NAMES.length, groupsToBeProxied.size());
}
@Test(expected = IllegalArgumentException.class)
public void testProxyUsersWithNullPrefix() throws Exception {
ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false),
null);
}
@Test(expected = IllegalArgumentException.class)
public void testProxyUsersWithEmptyPrefix() throws Exception {
ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration(false),
"");
}
@Test
public void testProxyUsersWithCustomPrefix() throws Exception {
Configuration conf = new Configuration(false);
conf.set("x." + REAL_USER_NAME + ".users",
StringUtils.join(",", Arrays.asList(AUTHORIZED_PROXY_USER_NAME)));
conf.set("x." + REAL_USER_NAME+ ".hosts", PROXY_IP);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf, "x");
// First try proxying a user that's allowed
UserGroupInformation realUserUgi = UserGroupInformation
.createRemoteUser(REAL_USER_NAME);
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
AUTHORIZED_PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
// Now try proxying a user that's not allowed
realUserUgi = UserGroupInformation.createRemoteUser(REAL_USER_NAME);
proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
// From good IP
assertNotAuthorized(proxyUserUgi, "1.2.3.4");
// From bad IP
assertNotAuthorized(proxyUserUgi, "1.2.3.5");
}
private void assertNotAuthorized(UserGroupInformation proxyUgi, String host) {
try {
@ -430,6 +497,11 @@ public class TestProxyUsers {
}
static class TestDummyImpersonationProvider implements ImpersonationProvider {
@Override
public void init(String configurationPrefix) {
}
/**
* Authorize a user (superuser) to impersonate another user (user1) if the
* superuser belongs to the group "sudo_user1" .
@ -460,11 +532,13 @@ public class TestProxyUsers {
public static void loadTest(String ipString, int testRange) {
Configuration conf = new Configuration();
conf.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER_NAME),
StringUtils.join(",", Arrays.asList(GROUP_NAMES)));
conf.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(REAL_USER_NAME),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(REAL_USER_NAME),
ipString
);
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);

View File

@ -79,4 +79,19 @@
</description>
</property>
<property>
<name>hadoop.kms.acl.GENERATE_EEK</name>
<value>*</value>
<description>
ACL for generateEncryptedKey CryptoExtension operations
</description>
</property>
<property>
<name>hadoop.kms.acl.DECRYPT_EEK</name>
<value>*</value>
<description>
ACL for decrypt EncryptedKey CryptoExtension operations
</description>
</property>
</configuration>

View File

@ -0,0 +1,149 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.kms.ValueQueue;
import org.apache.hadoop.crypto.key.kms.ValueQueue.SyncGenerationPolicy;
/**
* A {@link KeyProviderCryptoExtension} that pre-generates and caches encrypted
* keys.
*/
@InterfaceAudience.Private
public class EagerKeyGeneratorKeyProviderCryptoExtension
extends KeyProviderCryptoExtension {
private static final String KEY_CACHE_PREFIX =
"hadoop.security.kms.encrypted.key.cache.";
public static final String KMS_KEY_CACHE_SIZE =
KEY_CACHE_PREFIX + "size";
public static final int KMS_KEY_CACHE_SIZE_DEFAULT = 100;
public static final String KMS_KEY_CACHE_LOW_WATERMARK =
KEY_CACHE_PREFIX + "low.watermark";
public static final float KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT = 0.30f;
public static final String KMS_KEY_CACHE_EXPIRY_MS =
KEY_CACHE_PREFIX + "expiry";
public static final int KMS_KEY_CACHE_EXPIRY_DEFAULT = 43200000;
public static final String KMS_KEY_CACHE_NUM_REFILL_THREADS =
KEY_CACHE_PREFIX + "num.fill.threads";
public static final int KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT = 2;
private static class CryptoExtension
implements KeyProviderCryptoExtension.CryptoExtension {
private class EncryptedQueueRefiller implements
ValueQueue.QueueRefiller<EncryptedKeyVersion> {
@Override
public void fillQueueForKey(String keyName,
Queue<EncryptedKeyVersion> keyQueue, int numKeys) throws IOException {
List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>();
for (int i = 0; i < numKeys; i++) {
try {
retEdeks.add(keyProviderCryptoExtension.generateEncryptedKey(
keyName));
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
}
keyQueue.addAll(retEdeks);
}
}
private KeyProviderCryptoExtension keyProviderCryptoExtension;
private final ValueQueue<EncryptedKeyVersion> encKeyVersionQueue;
public CryptoExtension(Configuration conf,
KeyProviderCryptoExtension keyProviderCryptoExtension) {
this.keyProviderCryptoExtension = keyProviderCryptoExtension;
encKeyVersionQueue =
new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
conf.getInt(KMS_KEY_CACHE_SIZE,
KMS_KEY_CACHE_SIZE_DEFAULT),
conf.getFloat(KMS_KEY_CACHE_LOW_WATERMARK,
KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT),
conf.getInt(KMS_KEY_CACHE_EXPIRY_MS,
KMS_KEY_CACHE_EXPIRY_DEFAULT),
conf.getInt(KMS_KEY_CACHE_NUM_REFILL_THREADS,
KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
SyncGenerationPolicy.LOW_WATERMARK, new EncryptedQueueRefiller()
);
}
@Override
public void warmUpEncryptedKeys(String... keyNames) throws
IOException {
try {
encKeyVersionQueue.initializeQueuesForKeys(keyNames);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
throws IOException, GeneralSecurityException {
try {
return encKeyVersionQueue.getNext(encryptionKeyName);
} catch (ExecutionException e) {
throw new IOException(e);
}
}
@Override
public KeyVersion
decryptEncryptedKey(EncryptedKeyVersion encryptedKeyVersion)
throws IOException, GeneralSecurityException {
return keyProviderCryptoExtension.decryptEncryptedKey(
encryptedKeyVersion);
}
}
/**
* This class is a proxy for a <code>KeyProviderCryptoExtension</code> that
* decorates the underlying <code>CryptoExtension</code> with one that eagerly
* caches pre-generated Encrypted Keys using a <code>ValueQueue</code>
*
* @param conf Configuration object to load parameters from
* @param keyProviderCryptoExtension <code>KeyProviderCryptoExtension</code>
* to delegate calls to.
*/
public EagerKeyGeneratorKeyProviderCryptoExtension(Configuration conf,
KeyProviderCryptoExtension keyProviderCryptoExtension) {
super(keyProviderCryptoExtension,
new CryptoExtension(conf, keyProviderCryptoExtension));
}
}

View File

@ -20,6 +20,8 @@ package org.apache.hadoop.crypto.key.kms.server;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
@ -29,6 +31,7 @@ import org.apache.hadoop.util.StringUtils;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.Path;
@ -39,10 +42,14 @@ import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.SecurityContext;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.Principal;
import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@ -61,8 +68,10 @@ public class KMS {
private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
private static final String GET_METADATA = "GET_METADATA";
private static final String GENERATE_EEK = "GENERATE_EEK";
private static final String DECRYPT_EEK = "DECRYPT_EEK";
private KeyProvider provider;
private KeyProviderCryptoExtension provider;
public KMS() throws Exception {
provider = KMSWebApp.getKeyProvider();
@ -289,6 +298,92 @@ public class KMS {
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
}
@SuppressWarnings({ "rawtypes", "unchecked" })
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response generateEncryptedKeys(
@Context SecurityContext securityContext,
@PathParam("name") String name,
@QueryParam(KMSRESTConstants.EEK_OP) String edekOp,
@DefaultValue("1")
@QueryParam(KMSRESTConstants.EEK_NUM_KEYS) int numKeys)
throws Exception {
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(name, "name");
KMSClientProvider.checkNotNull(edekOp, "eekOp");
Object retJSON;
if (edekOp.equals(KMSRESTConstants.EEK_GENERATE)) {
assertAccess(KMSACLs.Type.GENERATE_EEK, user, GENERATE_EEK, name);
List<EncryptedKeyVersion> retEdeks =
new LinkedList<EncryptedKeyVersion>();
try {
for (int i = 0; i < numKeys; i ++) {
retEdeks.add(provider.generateEncryptedKey(name));
}
} catch (Exception e) {
throw new IOException(e);
}
KMSAudit.ok(user, GENERATE_EEK, name, "");
retJSON = new ArrayList();
for (EncryptedKeyVersion edek : retEdeks) {
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
}
} else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
KMSRESTConstants.EEK_DECRYPT);
}
KMSWebApp.getGenerateEEKCallsMeter().mark();
return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
.build();
}
@SuppressWarnings("rawtypes")
@POST
@Path(KMSRESTConstants.KEY_VERSION_RESOURCE + "/{versionName:.*}/" +
KMSRESTConstants.EEK_SUB_RESOURCE)
@Produces(MediaType.APPLICATION_JSON)
public Response decryptEncryptedKey(@Context SecurityContext securityContext,
@PathParam("versionName") String versionName,
@QueryParam(KMSRESTConstants.EEK_OP) String eekOp,
Map jsonPayload)
throws Exception {
Principal user = getPrincipal(securityContext);
KMSClientProvider.checkNotEmpty(versionName, "versionName");
KMSClientProvider.checkNotNull(eekOp, "eekOp");
String keyName = (String) jsonPayload.get(KMSRESTConstants.NAME_FIELD);
String ivStr = (String) jsonPayload.get(KMSRESTConstants.IV_FIELD);
String encMaterialStr =
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
Object retJSON;
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName);
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
byte[] iv = Base64.decodeBase64(ivStr);
KMSClientProvider.checkNotNull(encMaterialStr,
KMSRESTConstants.MATERIAL_FIELD);
byte[] encMaterial = Base64.decodeBase64(encMaterialStr);
KeyProvider.KeyVersion retKeyVersion =
provider.decryptEncryptedKey(
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
iv, KeyProviderCryptoExtension.EEK, encMaterial));
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
KMSAudit.ok(user, DECRYPT_EEK, versionName, "");
} else {
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
KMSRESTConstants.EEK_DECRYPT);
}
KMSWebApp.getDecryptEEKCallsMeter().mark();
return Response.ok().type(MediaType.APPLICATION_JSON).entity(retJSON)
.build();
}
@GET
@Path(KMSRESTConstants.KEY_RESOURCE + "/{name:.*}/" +
KMSRESTConstants.VERSIONS_SUB_RESOURCE)

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
@ -28,20 +29,20 @@ import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
/**
* Provides access to the <code>AccessControlList</code>s used by KMS,
* hot-reloading them if the <code>kms-acls.xml</code> file where the ACLs
* are defined has been updated.
*/
@InterfaceAudience.Private
public class KMSACLs implements Runnable {
private static final Logger LOG = LoggerFactory.getLogger(KMSACLs.class);
public enum Type {
CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA, SET_KEY_MATERIAL;
CREATE, DELETE, ROLLOVER, GET, GET_KEYS, GET_METADATA,
SET_KEY_MATERIAL, GENERATE_EEK, DECRYPT_EEK;
public String getConfigKey() {
return KMSConfiguration.CONFIG_PREFIX + "acl." + this.toString();
@ -52,13 +53,11 @@ public class KMSACLs implements Runnable {
public static final int RELOADER_SLEEP_MILLIS = 1000;
Map<Type, AccessControlList> acls;
private ReadWriteLock lock;
private volatile Map<Type, AccessControlList> acls;
private ScheduledExecutorService executorService;
private long lastReload;
KMSACLs(Configuration conf) {
lock = new ReentrantReadWriteLock();
if (conf == null) {
conf = loadACLs();
}
@ -70,17 +69,13 @@ public class KMSACLs implements Runnable {
}
private void setACLs(Configuration conf) {
lock.writeLock().lock();
try {
acls = new HashMap<Type, AccessControlList>();
for (Type aclType : Type.values()) {
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
acls.put(aclType, new AccessControlList(aclStr));
LOG.info("'{}' ACL '{}'", aclType, aclStr);
}
} finally {
lock.writeLock().unlock();
Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
for (Type aclType : Type.values()) {
String aclStr = conf.get(aclType.getConfigKey(), ACL_DEFAULT);
tempAcls.put(aclType, new AccessControlList(aclStr));
LOG.info("'{}' ACL '{}'", aclType, aclStr);
}
acls = tempAcls;
}
@Override
@ -120,14 +115,7 @@ public class KMSACLs implements Runnable {
public boolean hasAccess(Type type, String user) {
UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
AccessControlList acl = null;
lock.readLock().lock();
try {
acl = acls.get(type);
} finally {
lock.readLock().unlock();
}
return acl.isUserAllowed(ugi);
return acls.get(type).isUserAllowed(ugi);
}
}

View File

@ -1,177 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import org.apache.hadoop.crypto.key.KeyProvider;
import java.io.IOException;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
/**
* A <code>KeyProvider</code> proxy implementation providing a short lived
* cache for <code>KeyVersions</code> to avoid burst of requests to hit the
* underlying <code>KeyProvider</code>.
*/
public class KMSCacheKeyProvider extends KeyProvider {
private final KeyProvider provider;
private LoadingCache<String, KeyVersion> keyVersionCache;
private LoadingCache<String, KeyVersion> currentKeyCache;
private static class KeyNotFoundException extends Exception {
private static final long serialVersionUID = 1L;
}
public KMSCacheKeyProvider(KeyProvider prov, long timeoutMillis) {
this.provider = prov;
keyVersionCache = CacheBuilder.newBuilder().expireAfterAccess(timeoutMillis,
TimeUnit.MILLISECONDS).build(new CacheLoader<String, KeyVersion>() {
@Override
public KeyVersion load(String key) throws Exception {
KeyVersion kv = provider.getKeyVersion(key);
if (kv == null) {
throw new KeyNotFoundException();
}
return kv;
}
});
// for current key we don't want to go stale for more than 1 sec
currentKeyCache = CacheBuilder.newBuilder().expireAfterWrite(1000,
TimeUnit.MILLISECONDS).build(new CacheLoader<String, KeyVersion>() {
@Override
public KeyVersion load(String key) throws Exception {
KeyVersion kv = provider.getCurrentKey(key);
if (kv == null) {
throw new KeyNotFoundException();
}
return kv;
}
});
}
@Override
public KeyVersion getCurrentKey(String name) throws IOException {
try {
return currentKeyCache.get(name);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
@Override
public KeyVersion getKeyVersion(String versionName)
throws IOException {
try {
return keyVersionCache.get(versionName);
} catch (ExecutionException ex) {
Throwable cause = ex.getCause();
if (cause instanceof KeyNotFoundException) {
return null;
} else if (cause instanceof IOException) {
throw (IOException) cause;
} else {
throw new IOException(cause);
}
}
}
@Override
public List<String> getKeys() throws IOException {
return provider.getKeys();
}
@Override
public List<KeyVersion> getKeyVersions(String name)
throws IOException {
return provider.getKeyVersions(name);
}
@Override
public Metadata getMetadata(String name) throws IOException {
return provider.getMetadata(name);
}
@Override
public KeyVersion createKey(String name, byte[] material,
Options options) throws IOException {
return provider.createKey(name, material, options);
}
@Override
public KeyVersion createKey(String name,
Options options)
throws NoSuchAlgorithmException, IOException {
return provider.createKey(name, options);
}
@Override
public void deleteKey(String name) throws IOException {
provider.deleteKey(name);
currentKeyCache.invalidate(name);
// invalidating all key versions as we don't know which ones belonged to the
// deleted key
keyVersionCache.invalidateAll();
}
@Override
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
KeyVersion key = provider.rollNewVersion(name, material);
currentKeyCache.invalidate(name);
return key;
}
@Override
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
KeyVersion key = provider.rollNewVersion(name);
currentKeyCache.invalidate(name);
return key;
}
@Override
public void flush() throws IOException {
provider.flush();
}
@Override
public Metadata[] getKeysMetadata(String ... keyNames)
throws IOException {
return provider.getKeysMetadata(keyNames);
}
@Override
public boolean isTransient() {
return provider.isTransient();
}
}

View File

@ -34,9 +34,21 @@ public class KMSConfiguration {
public static final String CONFIG_PREFIX = "hadoop.kms.";
// Property to Enable/Disable Caching
public static final String KEY_CACHE_ENABLE = CONFIG_PREFIX +
"cache.enable";
// Timeout for the Key and Metadata Cache
public static final String KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
"cache.timeout.ms";
public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 1000; // 10 secs
// TImeout for the Current Key cache
public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
"current.key.cache.timeout.ms";
public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
// 10 mins
public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
// 30 secs
public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
static Configuration getConfiguration(boolean loadHadoopDefaults,
String ... resources) {

View File

@ -17,8 +17,10 @@
*/
package org.apache.hadoop.crypto.key.kms.server;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
import java.util.ArrayList;
@ -39,7 +41,9 @@ public class KMSServerJSONUtils {
keyVersion.getName());
json.put(KMSRESTConstants.VERSION_NAME_FIELD,
keyVersion.getVersionName());
json.put(KMSRESTConstants.MATERIAL_FIELD, keyVersion.getMaterial());
json.put(KMSRESTConstants.MATERIAL_FIELD,
Base64.encodeBase64URLSafeString(
keyVersion.getMaterial()));
}
return json;
}
@ -55,6 +59,21 @@ public class KMSServerJSONUtils {
return json;
}
@SuppressWarnings("unchecked")
public static Map toJSON(EncryptedKeyVersion encryptedKeyVersion) {
Map json = new LinkedHashMap();
if (encryptedKeyVersion != null) {
json.put(KMSRESTConstants.VERSION_NAME_FIELD,
encryptedKeyVersion.getKeyVersionName());
json.put(KMSRESTConstants.IV_FIELD,
Base64.encodeBase64URLSafeString(
encryptedKeyVersion.getIv()));
json.put(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD,
toJSON(encryptedKeyVersion.getEncryptedKey()));
}
return json;
}
@SuppressWarnings("unchecked")
public static Map toJSON(String keyName, KeyProvider.Metadata meta) {
Map json = new LinkedHashMap();

View File

@ -20,9 +20,12 @@ package org.apache.hadoop.crypto.key.kms.server;
import com.codahale.metrics.JmxReporter;
import com.codahale.metrics.Meter;
import com.codahale.metrics.MetricRegistry;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.CachingKeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.authorize.AccessControlList;
@ -34,6 +37,7 @@ import org.slf4j.bridge.SLF4JBridgeHandler;
import javax.servlet.ServletContextEvent;
import javax.servlet.ServletContextListener;
import java.io.File;
import java.net.URL;
import java.util.List;
@ -54,6 +58,10 @@ public class KMSWebApp implements ServletContextListener {
"unauthorized.calls.meter";
private static final String UNAUTHENTICATED_CALLS_METER = METRICS_PREFIX +
"unauthenticated.calls.meter";
private static final String GENERATE_EEK_METER = METRICS_PREFIX +
"generate_eek.calls.meter";
private static final String DECRYPT_EEK_METER = METRICS_PREFIX +
"decrypt_eek.calls.meter";
private static Logger LOG;
private static MetricRegistry metricRegistry;
@ -65,8 +73,10 @@ public class KMSWebApp implements ServletContextListener {
private static Meter keyCallsMeter;
private static Meter unauthorizedCallsMeter;
private static Meter unauthenticatedCallsMeter;
private static Meter decryptEEKCallsMeter;
private static Meter generateEEKCallsMeter;
private static Meter invalidCallsMeter;
private static KeyProvider keyProvider;
private static KeyProviderCryptoExtension keyProviderCryptoExtension;
static {
SLF4JBridgeHandler.removeHandlersForRootLogger();
@ -121,6 +131,10 @@ public class KMSWebApp implements ServletContextListener {
metricRegistry = new MetricRegistry();
jmxReporter = JmxReporter.forRegistry(metricRegistry).build();
jmxReporter.start();
generateEEKCallsMeter = metricRegistry.register(GENERATE_EEK_METER,
new Meter());
decryptEEKCallsMeter = metricRegistry.register(DECRYPT_EEK_METER,
new Meter());
adminCallsMeter = metricRegistry.register(ADMIN_CALLS_METER, new Meter());
keyCallsMeter = metricRegistry.register(KEY_CALLS_METER, new Meter());
invalidCallsMeter = metricRegistry.register(INVALID_CALLS_METER,
@ -149,11 +163,23 @@ public class KMSWebApp implements ServletContextListener {
"the first provider",
kmsConf.get(KeyProviderFactory.KEY_PROVIDER_PATH));
}
keyProvider = providers.get(0);
long timeOutMillis =
kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY,
KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
keyProvider = new KMSCacheKeyProvider(keyProvider, timeOutMillis);
KeyProvider keyProvider = providers.get(0);
if (kmsConf.getBoolean(KMSConfiguration.KEY_CACHE_ENABLE,
KMSConfiguration.KEY_CACHE_ENABLE_DEFAULT)) {
long keyTimeOutMillis =
kmsConf.getLong(KMSConfiguration.KEY_CACHE_TIMEOUT_KEY,
KMSConfiguration.KEY_CACHE_TIMEOUT_DEFAULT);
long currKeyTimeOutMillis =
kmsConf.getLong(KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_KEY,
KMSConfiguration.CURR_KEY_CACHE_TIMEOUT_DEFAULT);
keyProvider = new CachingKeyProvider(keyProvider, keyTimeOutMillis,
currKeyTimeOutMillis);
}
keyProviderCryptoExtension = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(keyProvider);
keyProviderCryptoExtension =
new EagerKeyGeneratorKeyProviderCryptoExtension(kmsConf,
keyProviderCryptoExtension);
LOG.info("KMS Started");
} catch (Throwable ex) {
@ -200,6 +226,14 @@ public class KMSWebApp implements ServletContextListener {
return invalidCallsMeter;
}
public static Meter getGenerateEEKCallsMeter() {
return generateEEKCallsMeter;
}
public static Meter getDecryptEEKCallsMeter() {
return decryptEEKCallsMeter;
}
public static Meter getUnauthorizedCallsMeter() {
return unauthorizedCallsMeter;
}
@ -208,7 +242,7 @@ public class KMSWebApp implements ServletContextListener {
return unauthenticatedCallsMeter;
}
public static KeyProvider getKeyProvider() {
return keyProvider;
public static KeyProviderCryptoExtension getKeyProvider() {
return keyProviderCryptoExtension;
}
}

View File

@ -72,22 +72,35 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version}
KMS caches keys for short period of time to avoid excessive hits to the
underlying key provider.
The cache is used with the following 2 methods only, <<<getCurrentKey()>>>
and <<<getKeyVersion()>>>.
The Cache is enabled by default (can be dissabled by setting the
<<<hadoop.kms.cache.enable>>> boolean property to false)
The cache is used with the following 3 methods only, <<<getCurrentKey()>>>
and <<<getKeyVersion()>>> and <<<getMetadata()>>>.
For the <<<getCurrentKey()>>> method, cached entries are kept for a maximum
of 1000 millisecond regardless the number of times the key is being access
of 30000 millisecond regardless the number of times the key is being access
(to avoid stale keys to be considered current).
For the <<<getKeyVersion()>>> method, cached entries are kept with a default
inactivity timeout of 10000 milliseconds. This time out is configurable via
the following property in the <<<etc/hadoop/kms-site.xml>>> configuration
file:
inactivity timeout of 600000 milliseconds (10 mins). This time out is
configurable via the following property in the <<<etc/hadoop/kms-site.xml>>>
configuration file:
+---+
<property>
<name>hadoop.kms.cache.enable</name>
<value>true</value>
</property>
<property>
<name>hadoop.kms.cache.timeout.ms</name>
<value>10000</value>
<value>600000</value>
</property>
<property>
<name>hadoop.kms.current.key.cache.timeout.ms</name>
<value>30000</value>
</property>
+---+
@ -266,6 +279,25 @@ $ keytool -genkey -alias tomcat -keyalg RSA
to provide the key material when creating or rolling a key.
</description>
</property>
<property>
<name>hadoop.kms.acl.GENERATE_EEK</name>
<value>*</value>
<description>
ACL for generateEncryptedKey
CryptoExtension operations
</description>
</property>
<property>
<name>hadoop.kms.acl.DECRYPT_EEK</name>
<value>*</value>
<description>
ACL for decrypt EncryptedKey
CryptoExtension operations
</description>
</property>
</configuration>
+---+
** KMS HTTP REST API
@ -383,6 +415,70 @@ Content-Type: application/json
}
+---+
*** Generate Encrypted Key for Current KeyVersion
<REQUEST:>
+---+
GET http://HOST:PORT/kms/v1/key/<key-name>/_eek?eek_op=generate&num_keys=<number-of-keys-to-generate>
+---+
<RESPONSE:>
+---+
200 OK
Content-Type: application/json
[
{
"versionName" : "encryptionVersionName",
"iv" : "<iv>", //base64
"encryptedKeyVersion" : {
"versionName" : "EEK",
"material" : "<material>", //base64
}
},
{
"versionName" : "encryptionVersionName",
"iv" : "<iv>", //base64
"encryptedKeyVersion" : {
"versionName" : "EEK",
"material" : "<material>", //base64
}
},
...
]
+---+
*** Decrypt Encrypted Key
<REQUEST:>
+---+
POST http://HOST:PORT/kms/v1/keyversion/<version-name>/_eek?ee_op=decrypt
Content-Type: application/json
{
"name" : "<key-name>",
"iv" : "<iv>", //base64
"material" : "<material>", //base64
}
+---+
<RESPONSE:>
+---+
200 OK
Content-Type: application/json
{
"name" : "EK",
"material" : "<material>", //base64
}
+---+
*** Get Key Version
<REQUEST:>

View File

@ -19,6 +19,9 @@ package org.apache.hadoop.crypto.key.kms.server;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.authorize.AuthorizationException;
@ -36,6 +39,7 @@ import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.LoginContext;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
@ -267,7 +271,7 @@ public class TestKMS {
}
}
private void doAs(String user, final PrivilegedExceptionAction<Void> action)
private <T> T doAs(String user, final PrivilegedExceptionAction<T> action)
throws Exception {
Set<Principal> principals = new HashSet<Principal>();
principals.add(new KerberosPrincipal(user));
@ -280,7 +284,7 @@ public class TestKMS {
try {
loginContext.login();
subject = loginContext.getSubject();
Subject.doAs(subject, action);
return Subject.doAs(subject, action);
} finally {
loginContext.logout();
}
@ -474,6 +478,32 @@ public class TestKMS {
Assert.assertNotNull(kms1[0].getCreated());
Assert.assertTrue(started.before(kms1[0].getCreated()));
// test generate and decryption of EEK
KeyProvider.KeyVersion kv = kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpExt =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 = kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,
ek1.getEncryptedKey().getVersionName());
Assert.assertNotNull(ek1.getEncryptedKey().getMaterial());
Assert.assertEquals(kv.getMaterial().length,
ek1.getEncryptedKey().getMaterial().length);
KeyProvider.KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
Assert.assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
KeyProvider.KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
Assert.assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
Assert.assertEquals(kv.getMaterial().length, k1.getMaterial().length);
EncryptedKeyVersion ek2 = kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
boolean isEq = true;
for (int i = 0; isEq && i < ek2.getEncryptedKey().getMaterial().length;
i++) {
isEq = k2.getMaterial()[i] == k1.getMaterial()[i];
}
Assert.assertFalse(isEq);
// deleteKey()
kp.deleteKey("k1");
@ -565,7 +595,7 @@ public class TestKMS {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp = new KMSClientProvider(uri, conf);
@ -582,7 +612,7 @@ public class TestKMS {
Assert.fail(ex.toString());
}
try {
kp.createKey("k", new byte[8], new KeyProvider.Options(conf));
kp.createKey("k", new byte[16], new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
@ -598,7 +628,7 @@ public class TestKMS {
Assert.fail(ex.toString());
}
try {
kp.rollNewVersion("k", new byte[8]);
kp.rollNewVersion("k", new byte[16]);
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
@ -690,7 +720,7 @@ public class TestKMS {
@Override
public Void run() throws Exception {
try {
KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[8],
KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
@ -717,7 +747,8 @@ public class TestKMS {
@Override
public Void run() throws Exception {
try {
KeyProvider.KeyVersion kv = kp.rollNewVersion("k1", new byte[8]);
KeyProvider.KeyVersion kv =
kp.rollNewVersion("k1", new byte[16]);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.toString());
@ -726,12 +757,46 @@ public class TestKMS {
}
});
doAs("GET", new PrivilegedExceptionAction<Void>() {
final KeyVersion currKv =
doAs("GET", new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
try {
kp.getKeyVersion("k1@0");
KeyVersion kv = kp.getCurrentKey("k1");
return kv;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
final EncryptedKeyVersion encKv =
doAs("GENERATE_EEK",
new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 =
kpCE.generateEncryptedKey(currKv.getName());
return ek1;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kp.getKeyVersion("k1@0");
kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
kpCE.decryptEncryptedKey(encKv);
} catch (Exception ex) {
Assert.fail(ex.toString());
}
@ -817,7 +882,7 @@ public class TestKMS {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 64);
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp = new KMSClientProvider(uri, conf);
@ -889,6 +954,30 @@ public class TestKMS {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = new KMSClientProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
.generateEncryptedKey("a");
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = new KMSClientProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
.decryptEncryptedKey(
new KMSClientProvider.KMSEncryptedKeyVersion("a",
"a", new byte[] {1, 2}, "EEK", new byte[] {1, 2}));
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
Assert.assertTrue(caughtTimeout);
sock.close();

View File

@ -154,6 +154,8 @@ public class Nfs3Utils {
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
} else {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
return rtn;

View File

@ -68,5 +68,12 @@ public class TestNfs3Utils {
0, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 4}, attr));
assertEquals("No access should be allowed for dir as mode is 700 even though AuxGID does match",
0, Nfs3Utils.getAccessRightsForUserGroup(3, 20, new int[] {5, 10}, attr));
Mockito.when(attr.getUid()).thenReturn(2);
Mockito.when(attr.getGid()).thenReturn(10);
Mockito.when(attr.getMode()).thenReturn(457); // 711
Mockito.when(attr.getType()).thenReturn(NfsFileType.NFSDIR.toValue());
assertEquals("Access should be allowed for dir as mode is 711 and GID matches",
2 /* Lookup */, Nfs3Utils.getAccessRightsForUserGroup(3, 10, new int[] {5, 16, 11}, attr));
}
}

View File

@ -72,11 +72,11 @@ public class TestReaddir {
public static void setup() throws Exception {
String currentUser = System.getProperty("user.name");
config.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(currentUser),
"*");
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(currentUser), "*");
config.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(currentUser),
"*");
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(currentUser), "*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
cluster.waitActive();

View File

@ -312,10 +312,12 @@ public class TestWrites {
System.getProperty("user.name"));
String currentUser = System.getProperty("user.name");
config.set(
DefaultImpersonationProvider.getProxySuperuserGroupConfKey(currentUser),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(currentUser),
"*");
config.set(
DefaultImpersonationProvider.getProxySuperuserIpConfKey(currentUser),
DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(currentUser),
"*");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);

View File

@ -12,6 +12,8 @@ Trunk (Unreleased)
HDFS-5570. Deprecate hftp / hsftp and replace them with webhdfs / swebhdfs.
(wheat9)
HDFS-2538. option to disable fsck dots (Mohammad Kamrul Islam via aw)
NEW FEATURES
HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@ -287,8 +289,28 @@ Release 2.6.0 - UNRELEASED
HDFS-2856. Fix block protocol so that Datanodes don't require root or jsvc.
(cnauroth)
HDFS-5624. Add HDFS tests for ACLs in combination with viewfs.
(Stephen Chu via cnauroth)
HDFS-6655. Add 'header banner' to 'explorer.html' also in Namenode UI
(vinayakumarb)
HDFS-4120. Add a new "-skipSharedEditsCheck" option for BootstrapStandby
(Liang Xie and Rakesh R via vinayakumarb)
HDFS-6597. Add a new option to NN upgrade to terminate the process after
upgrade on NN is completed. (Danilo Vunjak via cnauroth)
HDFS-6700. BlockPlacementPolicy shoud choose storage but not datanode for
deletion. (szetszwo)
HDFS-6616. Add exclude-datanodes feature to WebHDFS redirection so that it
will not redirect retries to the same datanode. (zhaoyunjiong via szetszwo)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)
BUG FIXES
HDFS-6617. Flake TestDFSZKFailoverController.testManualFailoverWithDFSHAAdmin
@ -312,6 +334,16 @@ Release 2.6.0 - UNRELEASED
HDFS-6456. NFS should throw error for invalid entry in
dfs.nfs.exports.allowed.hosts (Abhiraj Butala via brandonli)
HDFS-6689. NFS doesn't return correct lookup access for direcories (brandonli)
HDFS-6478. RemoteException can't be retried properly for non-HA scenario.
(Ming Ma via jing9)
HDFS-6693. TestDFSAdminWithHA fails on windows ( vinayakumarb )
HDFS-6667. In HDFS HA mode, Distcp/SLive with webhdfs on secure cluster fails
with Client cannot authenticate via:[TOKEN, KERBEROS] error. (jing9)
Release 2.5.0 - UNRELEASED
INCOMPATIBLE CHANGES
@ -566,6 +598,8 @@ Release 2.5.0 - UNRELEASED
HDFS-6583. Remove clientNode in FileUnderConstructionFeature. (wheat9)
HDFS-6599. 2.4 addBlock is 10 to 20 times slower compared to 0.23 (daryn)
BUG FIXES
HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.

View File

@ -0,0 +1,169 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.contrib.bkjournal;
import java.io.File;
import java.io.FileFilter;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.ha.TestStandbyCheckpoints.SlowCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
public class TestBootstrapStandbyWithBKJM {
private static BKJMUtil bkutil;
protected MiniDFSCluster cluster;
@BeforeClass
public static void setupBookkeeper() throws Exception {
bkutil = new BKJMUtil(3);
bkutil.start();
}
@AfterClass
public static void teardownBookkeeper() throws Exception {
bkutil.teardown();
}
@After
public void teardown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
.createJournalURI("/bootstrapStandby").toString());
BKJMUtil.addJournalManagerDefinition(conf);
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
SlowCodec.class.getCanonicalName());
CompressionCodecFactory.setCodecClasses(conf,
ImmutableList.<Class> of(SlowCodec.class));
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
.numDataNodes(1).manageNameDfsSharedDirs(false).build();
cluster.waitActive();
}
/**
* While boostrapping, in_progress transaction entries should be skipped.
* Bootstrap usage for BKJM : "-force", "-nonInteractive", "-skipSharedEditsCheck"
*/
@Test
public void testBootstrapStandbyWithActiveNN() throws Exception {
// make nn0 active
cluster.transitionToActive(0);
// do ops and generate in-progress edit log data
Configuration confNN1 = cluster.getConfiguration(1);
DistributedFileSystem dfs = (DistributedFileSystem) HATestUtil
.configureFailoverFs(cluster, confNN1);
for (int i = 1; i <= 10; i++) {
dfs.mkdirs(new Path("/test" + i));
}
dfs.close();
// shutdown nn1 and delete its edit log files
cluster.shutdownNameNode(1);
deleteEditLogIfExists(confNN1);
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
cluster.getNameNodeRpc(0).saveNamespace();
cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
// check without -skipSharedEditsCheck, Bootstrap should fail for BKJM
// immediately after saveNamespace
int rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive" },
confNN1);
Assert.assertEquals("Mismatches return code", 6, rc);
// check with -skipSharedEditsCheck
rc = BootstrapStandby.run(new String[] { "-force", "-nonInteractive",
"-skipSharedEditsCheck" }, confNN1);
Assert.assertEquals("Mismatches return code", 0, rc);
// Checkpoint as fast as we can, in a tight loop.
confNN1.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
cluster.restartNameNode(1);
cluster.transitionToStandby(1);
NameNode nn0 = cluster.getNameNode(0);
HATestUtil.waitForStandbyToCatchUp(nn0, cluster.getNameNode(1));
long expectedCheckpointTxId = NameNodeAdapter.getNamesystem(nn0)
.getFSImage().getMostRecentCheckpointTxId();
HATestUtil.waitForCheckpoint(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId));
// Should have copied over the namespace
FSImageTestUtil.assertNNHasCheckpoints(cluster, 1,
ImmutableList.of((int) expectedCheckpointTxId));
FSImageTestUtil.assertNNFilesMatch(cluster);
}
private void deleteEditLogIfExists(Configuration confNN1) {
String editDirs = confNN1.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY);
String[] listEditDirs = StringUtils.split(editDirs, ',');
Assert.assertTrue("Wrong edit directory path!", listEditDirs.length > 0);
for (String dir : listEditDirs) {
File curDir = new File(dir, "current");
File[] listFiles = curDir.listFiles(new FileFilter() {
@Override
public boolean accept(File f) {
if (!f.getName().startsWith("edits")) {
return true;
}
return false;
}
});
if (listFiles != null && listFiles.length > 0) {
for (File file : listFiles) {
Assert.assertTrue("Failed to delete edit files!", file.delete());
}
}
}
}
}

View File

@ -26,7 +26,6 @@ import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
@ -38,14 +37,13 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
@ -259,12 +257,11 @@ public class HAUtil {
/**
* Parse the file system URI out of the provided token.
*/
public static URI getServiceUriFromToken(final String scheme,
Token<?> token) {
public static URI getServiceUriFromToken(final String scheme, Token<?> token) {
String tokStr = token.getService().toString();
if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) {
tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, "");
final String prefix = buildTokenServicePrefixForLogicalUri(scheme);
if (tokStr.startsWith(prefix)) {
tokStr = tokStr.replaceFirst(prefix, "");
}
return URI.create(scheme + "://" + tokStr);
}
@ -273,10 +270,13 @@ public class HAUtil {
* Get the service name used in the delegation token for the given logical
* HA service.
* @param uri the logical URI of the cluster
* @param scheme the scheme of the corresponding FileSystem
* @return the service name
*/
public static Text buildTokenServiceForLogicalUri(URI uri) {
return new Text(HA_DT_SERVICE_PREFIX + uri.getHost());
public static Text buildTokenServiceForLogicalUri(final URI uri,
final String scheme) {
return new Text(buildTokenServicePrefixForLogicalUri(scheme)
+ uri.getHost());
}
/**
@ -286,7 +286,11 @@ public class HAUtil {
public static boolean isTokenForLogicalUri(Token<?> token) {
return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX);
}
public static String buildTokenServicePrefixForLogicalUri(String scheme) {
return HA_DT_SERVICE_PREFIX + scheme + ":";
}
/**
* Locate a delegation token associated with the given HA cluster URI, and if
* one is found, clone it to also represent the underlying namenode address.
@ -298,7 +302,9 @@ public class HAUtil {
public static void cloneDelegationTokenForLogicalUri(
UserGroupInformation ugi, URI haUri,
Collection<InetSocketAddress> nnAddrs) {
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri);
// this cloning logic is only used by hdfs
Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri,
HdfsConstants.HDFS_URI_SCHEME);
Token<DelegationTokenIdentifier> haToken =
tokenSelector.selectToken(haService, ugi.getTokens());
if (haToken != null) {
@ -309,8 +315,9 @@ public class HAUtil {
Token<DelegationTokenIdentifier> specificToken =
new Token.PrivateToken<DelegationTokenIdentifier>(haToken);
SecurityUtil.setTokenService(specificToken, singleNNAddr);
Text alias =
new Text(HA_DT_SERVICE_PREFIX + "//" + specificToken.getService());
Text alias = new Text(
buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME)
+ "//" + specificToken.getService());
ugi.addToken(alias, specificToken);
LOG.debug("Mapped HA service delegation token for logical URI " +
haUri + " to namenode " + singleNNAddr);

View File

@ -163,7 +163,8 @@ public class NameNodeProxies {
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri));
@ -224,7 +225,8 @@ public class NameNodeProxies {
new Class[] { xface }, dummyHandler);
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(
NameNode.getAddress(nameNodeUri));
@ -333,19 +335,18 @@ public class NameNodeProxies {
address, conf, ugi, NamenodeProtocolPB.class, 0);
if (withRetries) { // create the proxy with retries
RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200,
TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy,
exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("getBlocks", methodPolicy);
methodNameToPolicyMap.put("getAccessKeys", methodPolicy);
proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class,
proxy, methodNameToPolicyMap);
TimeUnit.MILLISECONDS);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("getBlocks", timeoutPolicy);
methodNameToPolicyMap.put("getAccessKeys", timeoutPolicy);
NamenodeProtocol translatorProxy =
new NamenodeProtocolTranslatorPB(proxy);
return (NamenodeProtocol) RetryProxy.create(
NamenodeProtocol.class, translatorProxy, methodNameToPolicyMap);
} else {
return new NamenodeProtocolTranslatorPB(proxy);
}
return new NamenodeProtocolTranslatorPB(proxy);
}
private static ClientProtocol createNNProxyWithClientProtocol(
@ -379,29 +380,27 @@ public class NameNodeProxies {
= new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap
= new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(defaultPolicy,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
defaultPolicy, exceptionToPolicyMap);
RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
defaultPolicy, remoteExceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap
= new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
proxy = (ClientNamenodeProtocolPB) RetryProxy.create(
ClientNamenodeProtocolPB.class,
new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>(
ClientNamenodeProtocolPB.class, proxy),
ClientProtocol translatorProxy =
new ClientNamenodeProtocolTranslatorPB(proxy);
return (ClientProtocol) RetryProxy.create(
ClientProtocol.class,
new DefaultFailoverProxyProvider<ClientProtocol>(
ClientProtocol.class, translatorProxy),
methodNameToPolicyMap,
defaultPolicy);
} else {
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
private static Object createNameNodeProxy(InetSocketAddress address,
Configuration conf, UserGroupInformation ugi, Class<?> xface,
int rpcTimeout) throws IOException {

View File

@ -339,7 +339,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
buffer.append("Cache Remaining: " +cr+ " ("+StringUtils.byteDesc(cr)+")"+"\n");
buffer.append("Cache Used%: "+percent2String(cacheUsedPercent) + "\n");
buffer.append("Cache Remaining%: "+percent2String(cacheRemainingPercent) + "\n");
buffer.append("Xceivers: "+getXceiverCount()+"\n");
buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
return buffer.toString();
}

View File

@ -124,7 +124,7 @@ public class HdfsConstants {
* of a delgation token, indicating that the URI is a logical (HA)
* URI.
*/
public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:";
public static final String HA_DT_SERVICE_PREFIX = "ha-";
/**

View File

@ -97,7 +97,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
RPC.setProtocolEngine(conf, DatanodeProtocolPB.class,
ProtobufRpcEngine.class);
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
rpcProxy = createNamenode(nameNodeAddr, conf, ugi);
}
private static DatanodeProtocolPB createNamenode(
@ -109,33 +109,6 @@ public class DatanodeProtocolClientSideTranslatorPB implements
org.apache.hadoop.ipc.Client.getPingInterval(conf), null).getProxy();
}
/** Create a {@link NameNode} proxy */
static DatanodeProtocolPB createNamenodeWithRetry(
DatanodeProtocolPB rpcNamenode) {
RetryPolicy createPolicy = RetryPolicies
.retryUpToMaximumCountWithFixedSleep(5,
HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
createPolicy);
Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap =
new HashMap<Class<? extends Exception>, RetryPolicy>();
exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
.retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
remoteExceptionToPolicyMap));
RetryPolicy methodPolicy = RetryPolicies.retryByException(
RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<String, RetryPolicy>();
methodNameToPolicyMap.put("create", methodPolicy);
return (DatanodeProtocolPB) RetryProxy.create(DatanodeProtocolPB.class,
rpcNamenode, methodNameToPolicyMap);
}
@Override
public void close() throws IOException {
RPC.stopProxy(rpcProxy);

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolMetaInterface;
import org.apache.hadoop.ipc.ProtocolTranslator;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
@ -61,7 +62,7 @@ import com.google.protobuf.ServiceException;
@InterfaceAudience.Private
@InterfaceStability.Stable
public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
ProtocolMetaInterface, Closeable {
ProtocolMetaInterface, Closeable, ProtocolTranslator {
/** RpcController is not used and hence is set to null */
private final static RpcController NULL_CONTROLLER = null;
@ -88,6 +89,11 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
RPC.stopProxy(rpcProxy);
}
@Override
public Object getUnderlyingProxyObject() {
return rpcProxy;
}
@Override
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
throws IOException {

View File

@ -727,7 +727,6 @@ public class BlockManager {
final List<DatanodeStorageInfo> locations
= new ArrayList<DatanodeStorageInfo>(blocksMap.numNodes(block));
for(DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
final String storageID = storage.getStorageID();
// filter invalidate replicas
if(!invalidateBlocks.contains(storage.getDatanodeDescriptor(), block)) {
locations.add(storage);
@ -2640,7 +2639,7 @@ public class BlockManager {
if (addedNode == delNodeHint) {
delNodeHint = null;
}
Collection<DatanodeDescriptor> nonExcess = new ArrayList<DatanodeDescriptor>();
Collection<DatanodeStorageInfo> nonExcess = new ArrayList<DatanodeStorageInfo>();
Collection<DatanodeDescriptor> corruptNodes = corruptReplicas
.getNodes(block);
for(DatanodeStorageInfo storage : blocksMap.getStorages(block, State.NORMAL)) {
@ -2660,7 +2659,7 @@ public class BlockManager {
if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
// exclude corrupt replicas
if (corruptNodes == null || !corruptNodes.contains(cur)) {
nonExcess.add(cur);
nonExcess.add(storage);
}
}
}
@ -2684,7 +2683,7 @@ public class BlockManager {
* If no such a node is available,
* then pick a node with least free space
*/
private void chooseExcessReplicates(Collection<DatanodeDescriptor> nonExcess,
private void chooseExcessReplicates(final Collection<DatanodeStorageInfo> nonExcess,
Block b, short replication,
DatanodeDescriptor addedNode,
DatanodeDescriptor delNodeHint,
@ -2692,28 +2691,33 @@ public class BlockManager {
assert namesystem.hasWriteLock();
// first form a rack to datanodes map and
BlockCollection bc = getBlockCollection(b);
final Map<String, List<DatanodeDescriptor>> rackMap
= new HashMap<String, List<DatanodeDescriptor>>();
final List<DatanodeDescriptor> moreThanOne = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> exactlyOne = new ArrayList<DatanodeDescriptor>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
final List<DatanodeStorageInfo> moreThanOne = new ArrayList<DatanodeStorageInfo>();
final List<DatanodeStorageInfo> exactlyOne = new ArrayList<DatanodeStorageInfo>();
// split nodes into two sets
// moreThanOne contains nodes on rack with more than one replica
// exactlyOne contains the remaining nodes
replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne,
exactlyOne);
replicator.splitNodesWithRack(nonExcess, rackMap, moreThanOne, exactlyOne);
// pick one node to delete that favors the delete hint
// otherwise pick one with least space from priSet if it is not empty
// otherwise one node with least space from remains
boolean firstOne = true;
final DatanodeStorageInfo delNodeHintStorage
= DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, delNodeHint);
final DatanodeStorageInfo addedNodeStorage
= DatanodeStorageInfo.getDatanodeStorageInfo(nonExcess, addedNode);
while (nonExcess.size() - replication > 0) {
// check if we can delete delNodeHint
final DatanodeInfo cur;
if (firstOne && delNodeHint !=null && nonExcess.contains(delNodeHint)
&& (moreThanOne.contains(delNodeHint)
|| (addedNode != null && !moreThanOne.contains(addedNode))) ) {
cur = delNodeHint;
final DatanodeStorageInfo cur;
if (firstOne && delNodeHintStorage != null
&& (moreThanOne.contains(delNodeHintStorage)
|| (addedNodeStorage != null
&& !moreThanOne.contains(addedNodeStorage)))) {
cur = delNodeHintStorage;
} else { // regular excessive replica removal
cur = replicator.chooseReplicaToDelete(bc, b, replication,
moreThanOne, exactlyOne);
@ -2725,7 +2729,7 @@ public class BlockManager {
exactlyOne, cur);
nonExcess.remove(cur);
addToExcessReplicate(cur, b);
addToExcessReplicate(cur.getDatanodeDescriptor(), b);
//
// The 'excessblocks' tracks blocks until we get confirmation
@ -2736,7 +2740,7 @@ public class BlockManager {
// should be deleted. Items are removed from the invalidate list
// upon giving instructions to the namenode.
//
addToInvalidates(b, cur);
addToInvalidates(b, cur.getDatanodeDescriptor());
blockLog.info("BLOCK* chooseExcessReplicates: "
+"("+cur+", "+b+") is added to invalidated blocks set");
}

View File

@ -124,11 +124,12 @@ public abstract class BlockPlacementPolicy {
listed in the previous parameter.
* @return the replica that is the best candidate for deletion
*/
abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
Block block,
short replicationFactor,
Collection<DatanodeDescriptor> existingReplicas,
Collection<DatanodeDescriptor> moreExistingReplicas);
abstract public DatanodeStorageInfo chooseReplicaToDelete(
BlockCollection srcBC,
Block block,
short replicationFactor,
Collection<DatanodeStorageInfo> existingReplicas,
Collection<DatanodeStorageInfo> moreExistingReplicas);
/**
* Used to setup a BlockPlacementPolicy object. This should be defined by
@ -175,21 +176,23 @@ public abstract class BlockPlacementPolicy {
* @param exactlyOne The List of replica nodes on rack with only one replica
* @param cur current replica to remove
*/
public void adjustSetsWithChosenReplica(final Map<String,
List<DatanodeDescriptor>> rackMap,
final List<DatanodeDescriptor> moreThanOne,
final List<DatanodeDescriptor> exactlyOne, final DatanodeInfo cur) {
public void adjustSetsWithChosenReplica(
final Map<String, List<DatanodeStorageInfo>> rackMap,
final List<DatanodeStorageInfo> moreThanOne,
final List<DatanodeStorageInfo> exactlyOne,
final DatanodeStorageInfo cur) {
String rack = getRack(cur);
final List<DatanodeDescriptor> datanodes = rackMap.get(rack);
datanodes.remove(cur);
if (datanodes.isEmpty()) {
final String rack = getRack(cur.getDatanodeDescriptor());
final List<DatanodeStorageInfo> storages = rackMap.get(rack);
storages.remove(cur);
if (storages.isEmpty()) {
rackMap.remove(rack);
}
if (moreThanOne.remove(cur)) {
if (datanodes.size() == 1) {
moreThanOne.remove(datanodes.get(0));
exactlyOne.add(datanodes.get(0));
if (storages.size() == 1) {
final DatanodeStorageInfo remaining = storages.get(0);
moreThanOne.remove(remaining);
exactlyOne.add(remaining);
}
} else {
exactlyOne.remove(cur);
@ -214,28 +217,28 @@ public abstract class BlockPlacementPolicy {
* @param exactlyOne remains contains the remaining nodes
*/
public void splitNodesWithRack(
Collection<DatanodeDescriptor> dataNodes,
final Map<String, List<DatanodeDescriptor>> rackMap,
final List<DatanodeDescriptor> moreThanOne,
final List<DatanodeDescriptor> exactlyOne) {
for(DatanodeDescriptor node : dataNodes) {
final String rackName = getRack(node);
List<DatanodeDescriptor> datanodeList = rackMap.get(rackName);
if (datanodeList == null) {
datanodeList = new ArrayList<DatanodeDescriptor>();
rackMap.put(rackName, datanodeList);
final Iterable<DatanodeStorageInfo> storages,
final Map<String, List<DatanodeStorageInfo>> rackMap,
final List<DatanodeStorageInfo> moreThanOne,
final List<DatanodeStorageInfo> exactlyOne) {
for(DatanodeStorageInfo s: storages) {
final String rackName = getRack(s.getDatanodeDescriptor());
List<DatanodeStorageInfo> storageList = rackMap.get(rackName);
if (storageList == null) {
storageList = new ArrayList<DatanodeStorageInfo>();
rackMap.put(rackName, storageList);
}
datanodeList.add(node);
storageList.add(s);
}
// split nodes into two sets
for(List<DatanodeDescriptor> datanodeList : rackMap.values()) {
if (datanodeList.size() == 1) {
for(List<DatanodeStorageInfo> storageList : rackMap.values()) {
if (storageList.size() == 1) {
// exactlyOne contains nodes on rack with only one replica
exactlyOne.add(datanodeList.get(0));
exactlyOne.add(storageList.get(0));
} else {
// moreThanOne contains nodes on rack with more than one replica
moreThanOne.addAll(datanodeList);
moreThanOne.addAll(storageList);
}
}
}

View File

@ -636,15 +636,11 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
// check the communication traffic of the target machine
if (considerLoad) {
double avgLoad = 0;
if (stats != null) {
int size = stats.getNumDatanodesInService();
if (size != 0) {
avgLoad = (double)stats.getTotalLoad()/size;
}
}
if (node.getXceiverCount() > (2.0 * avgLoad)) {
logNodeIsNotChosen(storage, "the node is too busy ");
final double maxLoad = 2.0 * stats.getInServiceXceiverAverage();
final int nodeLoad = node.getXceiverCount();
if (nodeLoad > maxLoad) {
logNodeIsNotChosen(storage,
"the node is too busy (load:"+nodeLoad+" > "+maxLoad+") ");
return false;
}
}
@ -727,31 +723,34 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
}
@Override
public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
public DatanodeStorageInfo chooseReplicaToDelete(BlockCollection bc,
Block block, short replicationFactor,
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
Collection<DatanodeStorageInfo> first,
Collection<DatanodeStorageInfo> second) {
long oldestHeartbeat =
now() - heartbeatInterval * tolerateHeartbeatMultiplier;
DatanodeDescriptor oldestHeartbeatNode = null;
DatanodeStorageInfo oldestHeartbeatStorage = null;
long minSpace = Long.MAX_VALUE;
DatanodeDescriptor minSpaceNode = null;
DatanodeStorageInfo minSpaceStorage = null;
// Pick the node with the oldest heartbeat or with the least free space,
// if all hearbeats are within the tolerable heartbeat interval
for(DatanodeDescriptor node : pickupReplicaSet(first, second)) {
for(DatanodeStorageInfo storage : pickupReplicaSet(first, second)) {
final DatanodeDescriptor node = storage.getDatanodeDescriptor();
long free = node.getRemaining();
long lastHeartbeat = node.getLastUpdate();
if(lastHeartbeat < oldestHeartbeat) {
oldestHeartbeat = lastHeartbeat;
oldestHeartbeatNode = node;
oldestHeartbeatStorage = storage;
}
if (minSpace > free) {
minSpace = free;
minSpaceNode = node;
minSpaceStorage = storage;
}
}
return oldestHeartbeatNode != null ? oldestHeartbeatNode : minSpaceNode;
return oldestHeartbeatStorage != null? oldestHeartbeatStorage
: minSpaceStorage;
}
/**
@ -760,9 +759,9 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
* replica while second set contains remaining replica nodes.
* So pick up first set if not empty. If first is empty, then pick second.
*/
protected Collection<DatanodeDescriptor> pickupReplicaSet(
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
protected Collection<DatanodeStorageInfo> pickupReplicaSet(
Collection<DatanodeStorageInfo> first,
Collection<DatanodeStorageInfo> second) {
return first.isEmpty() ? second : first;
}

View File

@ -286,9 +286,9 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
* If first is empty, then pick second.
*/
@Override
public Collection<DatanodeDescriptor> pickupReplicaSet(
Collection<DatanodeDescriptor> first,
Collection<DatanodeDescriptor> second) {
public Collection<DatanodeStorageInfo> pickupReplicaSet(
Collection<DatanodeStorageInfo> first,
Collection<DatanodeStorageInfo> second) {
// If no replica within same rack, return directly.
if (first.isEmpty()) {
return second;
@ -296,25 +296,24 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
// Split data nodes in the first set into two sets,
// moreThanOne contains nodes on nodegroup with more than one replica
// exactlyOne contains the remaining nodes
Map<String, List<DatanodeDescriptor>> nodeGroupMap =
new HashMap<String, List<DatanodeDescriptor>>();
Map<String, List<DatanodeStorageInfo>> nodeGroupMap =
new HashMap<String, List<DatanodeStorageInfo>>();
for(DatanodeDescriptor node : first) {
final String nodeGroupName =
NetworkTopology.getLastHalf(node.getNetworkLocation());
List<DatanodeDescriptor> datanodeList =
nodeGroupMap.get(nodeGroupName);
if (datanodeList == null) {
datanodeList = new ArrayList<DatanodeDescriptor>();
nodeGroupMap.put(nodeGroupName, datanodeList);
for(DatanodeStorageInfo storage : first) {
final String nodeGroupName = NetworkTopology.getLastHalf(
storage.getDatanodeDescriptor().getNetworkLocation());
List<DatanodeStorageInfo> storageList = nodeGroupMap.get(nodeGroupName);
if (storageList == null) {
storageList = new ArrayList<DatanodeStorageInfo>();
nodeGroupMap.put(nodeGroupName, storageList);
}
datanodeList.add(node);
storageList.add(storage);
}
final List<DatanodeDescriptor> moreThanOne = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> exactlyOne = new ArrayList<DatanodeDescriptor>();
final List<DatanodeStorageInfo> moreThanOne = new ArrayList<DatanodeStorageInfo>();
final List<DatanodeStorageInfo> exactlyOne = new ArrayList<DatanodeStorageInfo>();
// split nodes into two sets
for(List<DatanodeDescriptor> datanodeList : nodeGroupMap.values()) {
for(List<DatanodeStorageInfo> datanodeList : nodeGroupMap.values()) {
if (datanodeList.size() == 1 ) {
// exactlyOne contains nodes on nodegroup with exactly one replica
exactlyOne.add(datanodeList.get(0));

View File

@ -820,7 +820,9 @@ public class DatanodeManager {
}
/** Start decommissioning the specified datanode. */
private void startDecommission(DatanodeDescriptor node) {
@InterfaceAudience.Private
@VisibleForTesting
public void startDecommission(DatanodeDescriptor node) {
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
for (DatanodeStorageInfo storage : node.getStorageInfos()) {
LOG.info("Start Decommissioning " + node + " " + storage

View File

@ -52,6 +52,12 @@ public interface DatanodeStatistics {
/** @return the xceiver count */
public int getXceiverCount();
/** @return average xceiver count for non-decommission(ing|ed) nodes */
public int getInServiceXceiverCount();
/** @return number of non-decommission(ing|ed) nodes */
public int getNumDatanodesInService();
/**
* @return the total used space by data nodes for non-DFS purposes
* such as storing temporary files on the local file system

View File

@ -22,6 +22,7 @@ import java.util.Iterator;
import java.util.List;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@ -290,4 +291,21 @@ public class DatanodeStorageInfo {
public String toString() {
return "[" + storageType + "]" + storageID + ":" + state;
}
/** @return the first {@link DatanodeStorageInfo} corresponding to
* the given datanode
*/
static DatanodeStorageInfo getDatanodeStorageInfo(
final Iterable<DatanodeStorageInfo> infos,
final DatanodeDescriptor datanode) {
if (datanode == null) {
return null;
}
for(DatanodeStorageInfo storage : infos) {
if (storage.getDatanodeDescriptor() == datanode) {
return storage;
}
}
return null;
}
}

View File

@ -150,6 +150,16 @@ class HeartbeatManager implements DatanodeStatistics {
return stats.xceiverCount;
}
@Override
public synchronized int getInServiceXceiverCount() {
return stats.nodesInServiceXceiverCount;
}
@Override
public synchronized int getNumDatanodesInService() {
return stats.nodesInService;
}
@Override
public synchronized long getCacheCapacity() {
return stats.cacheCapacity;
@ -178,7 +188,7 @@ class HeartbeatManager implements DatanodeStatistics {
}
synchronized void register(final DatanodeDescriptor d) {
if (!datanodes.contains(d)) {
if (!d.isAlive) {
addDatanode(d);
//update its timestamp
@ -191,6 +201,8 @@ class HeartbeatManager implements DatanodeStatistics {
}
synchronized void addDatanode(final DatanodeDescriptor d) {
// update in-service node count
stats.add(d);
datanodes.add(d);
d.isAlive = true;
}
@ -323,6 +335,9 @@ class HeartbeatManager implements DatanodeStatistics {
private long cacheCapacity = 0L;
private long cacheUsed = 0L;
private int nodesInService = 0;
private int nodesInServiceXceiverCount = 0;
private int expiredHeartbeats = 0;
private void add(final DatanodeDescriptor node) {
@ -330,6 +345,8 @@ class HeartbeatManager implements DatanodeStatistics {
blockPoolUsed += node.getBlockPoolUsed();
xceiverCount += node.getXceiverCount();
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
nodesInService++;
nodesInServiceXceiverCount += node.getXceiverCount();
capacityTotal += node.getCapacity();
capacityRemaining += node.getRemaining();
} else {
@ -344,6 +361,8 @@ class HeartbeatManager implements DatanodeStatistics {
blockPoolUsed -= node.getBlockPoolUsed();
xceiverCount -= node.getXceiverCount();
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
nodesInService--;
nodesInServiceXceiverCount -= node.getXceiverCount();
capacityTotal -= node.getCapacity();
capacityRemaining -= node.getRemaining();
} else {

View File

@ -93,7 +93,8 @@ public final class HdfsServerConstants {
FORCE("-force"),
NONINTERACTIVE("-nonInteractive"),
RENAMERESERVED("-renameReserved"),
METADATAVERSION("-metadataVersion");
METADATAVERSION("-metadataVersion"),
UPGRADEONLY("-upgradeOnly");
private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
"(\\w+)\\((\\w+)\\)");

View File

@ -128,7 +128,8 @@ public class DatanodeWebHdfsMethods {
"://" + nnId);
boolean isLogical = HAUtil.isLogicalUri(conf, nnUri);
if (isLogical) {
token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri));
token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri,
HdfsConstants.HDFS_URI_SCHEME));
} else {
token.setService(SecurityUtil.buildTokenService(nnUri));
}

View File

@ -48,6 +48,15 @@ public interface FSClusterStats {
* @return Number of datanodes that are both alive and not decommissioned.
*/
public int getNumDatanodesInService();
/**
* an indication of the average load of non-decommission(ing|ed) nodes
* eligible for block placement
*
* @return average of the in service number of block transfers and block
* writes that are currently occurring on the cluster.
*/
public double getInServiceXceiverAverage();
}

View File

@ -225,6 +225,7 @@ public class FSImage implements Closeable {
NNStorage.checkVersionUpgradable(storage.getLayoutVersion());
}
if (startOpt != StartupOption.UPGRADE
&& startOpt != StartupOption.UPGRADEONLY
&& !RollingUpgradeStartupOption.STARTED.matches(startOpt)
&& layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION
&& layoutVersion != HdfsConstants.NAMENODE_LAYOUT_VERSION) {
@ -263,6 +264,7 @@ public class FSImage implements Closeable {
// 3. Do transitions
switch(startOpt) {
case UPGRADE:
case UPGRADEONLY:
doUpgrade(target);
return false; // upgrade saved image already
case IMPORT:
@ -748,11 +750,13 @@ public class FSImage implements Closeable {
editLog.recoverUnclosedStreams();
} else if (HAUtil.isHAEnabled(conf, nameserviceId)
&& (startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY
|| RollingUpgradeStartupOption.ROLLBACK.matches(startOpt))) {
// This NN is HA, but we're doing an upgrade or a rollback of rolling
// upgrade so init the edit log for write.
editLog.initJournalsForWrite();
if (startOpt == StartupOption.UPGRADE) {
if (startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY) {
long sharedLogCTime = editLog.getSharedLogCTime();
if (this.storage.getCTime() < sharedLogCTime) {
throw new IOException("It looks like the shared log is already " +

View File

@ -1038,7 +1038,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
}
// This will start a new log segment and write to the seen_txid file, so
// we shouldn't do it when coming up in standby state
if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)) {
if (!haEnabled || (haEnabled && startOpt == StartupOption.UPGRADE)
|| (haEnabled && startOpt == StartupOption.UPGRADEONLY)) {
fsImage.openEditLogForWrite();
}
success = true;
@ -2400,7 +2401,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
// Generate the EDEK while not holding the lock
KeyProviderCryptoExtension.EncryptedKeyVersion edek = null;
try {
edek = provider.generateEncryptedKey(latestEZKeyVersion);
edek = provider.generateEncryptedKey("");
} catch (GeneralSecurityException e) {
throw new IOException(e);
}
@ -7557,7 +7558,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
@Override // FSClusterStats
public int getNumDatanodesInService() {
return getNumLiveDataNodes() - getNumDecomLiveDataNodes();
return datanodeStatistics.getNumDatanodesInService();
}
@Override // for block placement strategy
public double getInServiceXceiverAverage() {
double avgLoad = 0;
final int nodes = getNumDatanodesInService();
if (nodes != 0) {
final int xceivers = datanodeStatistics.getInServiceXceiverCount();
avgLoad = (double)xceivers/nodes;
}
return avgLoad;
}
public SnapshotManager getSnapshotManager() {

View File

@ -836,7 +836,7 @@ public class NNStorage extends Storage implements Closeable,
*/
void processStartupOptionsForUpgrade(StartupOption startOpt, int layoutVersion)
throws IOException {
if (startOpt == StartupOption.UPGRADE) {
if (startOpt == StartupOption.UPGRADE || startOpt == StartupOption.UPGRADEONLY) {
// If upgrade from a release that does not support federation,
// if clusterId is provided in the startupOptions use it.
// Else generate a new cluster ID

View File

@ -210,6 +210,9 @@ public class NameNode implements NameNodeStatusMXBean {
+ StartupOption.UPGRADE.getName() +
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+ StartupOption.UPGRADEONLY.getName() +
" [" + StartupOption.CLUSTERID.getName() + " cid]" +
" [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | \n\t["
+ StartupOption.ROLLBACK.getName() + "] | \n\t["
+ StartupOption.ROLLINGUPGRADE.getName() + " <"
+ RollingUpgradeStartupOption.DOWNGRADE.name().toLowerCase() + "|"
@ -713,6 +716,7 @@ public class NameNode implements NameNodeStatusMXBean {
* <li>{@link StartupOption#BACKUP BACKUP} - start backup node</li>
* <li>{@link StartupOption#CHECKPOINT CHECKPOINT} - start checkpoint node</li>
* <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
* <li>{@link StartupOption#UPGRADEONLY UPGRADEONLY} - upgrade the cluster
* upgrade and create a snapshot of the current file system state</li>
* <li>{@link StartupOption#RECOVER RECOVERY} - recover name node
* metadata</li>
@ -767,7 +771,8 @@ public class NameNode implements NameNodeStatusMXBean {
}
protected HAState createHAState(StartupOption startOpt) {
if (!haEnabled || startOpt == StartupOption.UPGRADE) {
if (!haEnabled || startOpt == StartupOption.UPGRADE
|| startOpt == StartupOption.UPGRADEONLY) {
return ACTIVE_STATE;
} else {
return STANDBY_STATE;
@ -1198,8 +1203,10 @@ public class NameNode implements NameNodeStatusMXBean {
startOpt = StartupOption.BACKUP;
} else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.CHECKPOINT;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE;
} else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)
|| StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ?
StartupOption.UPGRADE : StartupOption.UPGRADEONLY;
/* Can be followed by CLUSTERID with a required parameter or
* RENAMERESERVED with an optional parameter
*/
@ -1407,6 +1414,12 @@ public class NameNode implements NameNodeStatusMXBean {
terminate(0);
return null; // avoid javac warning
}
case UPGRADEONLY: {
DefaultMetricsSystem.initialize("NameNode");
new NameNode(conf);
terminate(0);
return null;
}
default: {
DefaultMetricsSystem.initialize("NameNode");
return new NameNode(conf);

View File

@ -126,6 +126,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
private boolean showBlocks = false;
private boolean showLocations = false;
private boolean showRacks = false;
private boolean showprogress = false;
private boolean showCorruptFileBlocks = false;
/**
@ -203,6 +204,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
else if (key.equals("blocks")) { this.showBlocks = true; }
else if (key.equals("locations")) { this.showLocations = true; }
else if (key.equals("racks")) { this.showRacks = true; }
else if (key.equals("showprogress")) { this.showprogress = true; }
else if (key.equals("openforwrite")) {this.showOpenFiles = true; }
else if (key.equals("listcorruptfileblocks")) {
this.showCorruptFileBlocks = true;
@ -381,10 +383,13 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
} else if (showFiles) {
out.print(path + " " + fileLen + " bytes, " +
blocks.locatedBlockCount() + " block(s): ");
} else {
} else if (showprogress) {
out.print('.');
}
if (res.totalFiles % 100 == 0) { out.println(); out.flush(); }
if ((showprogress) && res.totalFiles % 100 == 0) {
out.println();
out.flush();
}
int missing = 0;
int corrupt = 0;
long missize = 0;

View File

@ -19,24 +19,30 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.util.List;
import java.util.Map;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INode;
import com.google.common.collect.ImmutableList;
/**
* XAttrStorage is used to read and set xattrs for an inode.
*/
@InterfaceAudience.Private
public class XAttrStorage {
private static final Map<String, String> internedNames = Maps.newHashMap();
/**
* Reads the existing extended attributes of an inode. If the
* inode does not have an <code>XAttr</code>, then this method
* returns an empty list.
* <p/>
* Must be called while holding the FSDirectory read lock.
*
* @param inode INode to read
* @param snapshotId
* @return List<XAttr> <code>XAttr</code> list.
@ -48,6 +54,9 @@ public class XAttrStorage {
/**
* Reads the existing extended attributes of an inode.
* <p/>
* Must be called while holding the FSDirectory read lock.
*
* @param inode INode to read.
* @return List<XAttr> <code>XAttr</code> list.
*/
@ -58,6 +67,9 @@ public class XAttrStorage {
/**
* Update xattrs of inode.
* <p/>
* Must be called while holding the FSDirectory write lock.
*
* @param inode INode to update
* @param xAttrs to update xAttrs.
* @param snapshotId id of the latest snapshot of the inode
@ -70,8 +82,24 @@ public class XAttrStorage {
}
return;
}
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(xAttrs);
// Dedupe the xAttr name and save them into a new interned list
List<XAttr> internedXAttrs = Lists.newArrayListWithCapacity(xAttrs.size());
for (XAttr xAttr : xAttrs) {
final String name = xAttr.getName();
String internedName = internedNames.get(name);
if (internedName == null) {
internedName = name;
internedNames.put(internedName, internedName);
}
XAttr internedXAttr = new XAttr.Builder()
.setName(internedName)
.setNameSpace(xAttr.getNameSpace())
.setValue(xAttr.getValue())
.build();
internedXAttrs.add(internedXAttr);
}
// Save the list of interned xattrs
ImmutableList<XAttr> newXAttrs = ImmutableList.copyOf(internedXAttrs);
if (inode.getXAttrFeature() != null) {
inode.removeXAttrFeature(snapshotId);
}

View File

@ -81,6 +81,7 @@ public class BootstrapStandby implements Tool, Configurable {
private boolean force = false;
private boolean interactive = true;
private boolean skipSharedEditsCheck = false;
// Exit/return codes.
static final int ERR_CODE_FAILED_CONNECT = 2;
@ -117,6 +118,8 @@ public class BootstrapStandby implements Tool, Configurable {
force = true;
} else if ("-nonInteractive".equals(arg)) {
interactive = false;
} else if ("-skipSharedEditsCheck".equals(arg)) {
skipSharedEditsCheck = true;
} else {
printUsage();
throw new HadoopIllegalArgumentException(
@ -127,7 +130,7 @@ public class BootstrapStandby implements Tool, Configurable {
private void printUsage() {
System.err.println("Usage: " + this.getClass().getSimpleName() +
"[-force] [-nonInteractive]");
" [-force] [-nonInteractive] [-skipSharedEditsCheck]");
}
private NamenodeProtocol createNNProtocolProxy()
@ -200,7 +203,7 @@ public class BootstrapStandby implements Tool, Configurable {
// Ensure that we have enough edits already in the shared directory to
// start up from the last checkpoint on the active.
if (!checkLogsAvailableForRead(image, imageTxId, curTxId)) {
if (!skipSharedEditsCheck && !checkLogsAvailableForRead(image, imageTxId, curTxId)) {
return ERR_CODE_LOGS_UNAVAILABLE;
}

View File

@ -28,6 +28,7 @@ import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.List;
import javax.servlet.ServletContext;
@ -84,6 +85,7 @@ import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.DoAsParam;
import org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@ -113,11 +115,13 @@ import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.StringUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Charsets;
@ -190,12 +194,26 @@ public class NamenodeWebHdfsMethods {
}
return np;
}
@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize) throws IOException {
final long blocksize, final String excludeDatanodes) throws IOException {
final BlockManager bm = namenode.getNamesystem().getBlockManager();
HashSet<Node> excludes = new HashSet<Node>();
if (excludeDatanodes != null) {
for (String host : StringUtils
.getTrimmedStringCollection(excludeDatanodes)) {
int idx = host.indexOf(":");
if (idx != -1) {
excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
} else {
excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
}
}
}
if (op == PutOpParam.Op.CREATE) {
//choose a datanode near to client
@ -204,7 +222,7 @@ public class NamenodeWebHdfsMethods {
if (clientNode != null) {
final DatanodeStorageInfo[] storages = bm.getBlockPlacementPolicy()
.chooseTarget(path, 1, clientNode,
new ArrayList<DatanodeStorageInfo>(), false, null, blocksize,
new ArrayList<DatanodeStorageInfo>(), false, excludes, blocksize,
// TODO: get storage type from the file
StorageType.DEFAULT);
if (storages.length > 0) {
@ -233,7 +251,7 @@ public class NamenodeWebHdfsMethods {
final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
final int count = locations.locatedBlockCount();
if (count > 0) {
return bestNode(locations.get(0).getLocations());
return bestNode(locations.get(0).getLocations(), excludes);
}
}
}
@ -247,11 +265,14 @@ public class NamenodeWebHdfsMethods {
* sorted based on availability and network distances, thus it is sufficient
* to return the first element of the node here.
*/
private static DatanodeInfo bestNode(DatanodeInfo[] nodes) throws IOException {
if (nodes.length == 0 || nodes[0].isDecommissioned()) {
throw new IOException("No active nodes contain this block");
private static DatanodeInfo bestNode(DatanodeInfo[] nodes,
HashSet<Node> excludes) throws IOException {
for (DatanodeInfo dn: nodes) {
if (false == dn.isDecommissioned() && false == excludes.contains(dn)) {
return dn;
}
}
return nodes[0];
throw new IOException("No active nodes contain this block");
}
private Token<? extends TokenIdentifier> generateDelegationToken(
@ -270,11 +291,12 @@ public class NamenodeWebHdfsMethods {
final UserGroupInformation ugi, final DelegationParam delegation,
final UserParam username, final DoAsParam doAsUser,
final String path, final HttpOpParam.Op op, final long openOffset,
final long blocksize,
final long blocksize, final String excludeDatanodes,
final Param<?, ?>... parameters) throws URISyntaxException, IOException {
final DatanodeInfo dn;
try {
dn = chooseDatanode(namenode, path, op, openOffset, blocksize);
dn = chooseDatanode(namenode, path, op, openOffset, blocksize,
excludeDatanodes);
} catch (InvalidTopologyException ite) {
throw new IOException("Failed to find datanode, suggest to check cluster health.", ite);
}
@ -361,13 +383,15 @@ public class NamenodeWebHdfsMethods {
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
final OldSnapshotNameParam oldSnapshotName
)throws IOException, InterruptedException {
final OldSnapshotNameParam oldSnapshotName,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, overwrite, bufferSize, replication,
blockSize, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
xattrSetFlag, snapshotName, oldSnapshotName);
xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes);
}
/** Handle HTTP PUT request. */
@ -423,14 +447,16 @@ public class NamenodeWebHdfsMethods {
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
final OldSnapshotNameParam oldSnapshotName
final OldSnapshotNameParam oldSnapshotName,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
oldSnapshotName);
oldSnapshotName, excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
@ -441,7 +467,7 @@ public class NamenodeWebHdfsMethods {
permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
xattrSetFlag, snapshotName, oldSnapshotName);
xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes);
} finally {
reset();
}
@ -474,7 +500,8 @@ public class NamenodeWebHdfsMethods {
final XAttrValueParam xattrValue,
final XAttrSetFlagParam xattrSetFlag,
final SnapshotNameParam snapshotName,
final OldSnapshotNameParam oldSnapshotName
final OldSnapshotNameParam oldSnapshotName,
final ExcludeDatanodesParam exclDatanodes
) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
@ -484,9 +511,10 @@ public class NamenodeWebHdfsMethods {
switch(op.getValue()) {
case CREATE:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, blockSize.getValue(conf),
permission, overwrite, bufferSize, replication, blockSize);
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), -1L, blockSize.getValue(conf),
exclDatanodes.getValue(), permission, overwrite, bufferSize,
replication, blockSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MKDIRS:
@ -619,9 +647,12 @@ public class NamenodeWebHdfsMethods {
@QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT)
final ConcatSourcesParam concatSrcs,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs, bufferSize);
return post(ugi, delegation, username, doAsUser, ROOT, op, concatSrcs,
bufferSize, excludeDatanodes);
}
/** Handle HTTP POST request. */
@ -643,17 +674,21 @@ public class NamenodeWebHdfsMethods {
@QueryParam(ConcatSourcesParam.NAME) @DefaultValue(ConcatSourcesParam.DEFAULT)
final ConcatSourcesParam concatSrcs,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
final BufferSizeParam bufferSize,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize);
init(ugi, delegation, username, doAsUser, path, op, concatSrcs, bufferSize,
excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
public Response run() throws IOException, URISyntaxException {
try {
return post(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, concatSrcs, bufferSize);
path.getAbsolutePath(), op, concatSrcs, bufferSize,
excludeDatanodes);
} finally {
reset();
}
@ -669,15 +704,17 @@ public class NamenodeWebHdfsMethods {
final String fullpath,
final PostOpParam op,
final ConcatSourcesParam concatSrcs,
final BufferSizeParam bufferSize
final BufferSizeParam bufferSize,
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
switch(op.getValue()) {
case APPEND:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, -1L, bufferSize);
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), -1L, -1L,
excludeDatanodes.getValue(), bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case CONCAT:
@ -715,10 +752,12 @@ public class NamenodeWebHdfsMethods {
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final List<XAttrNameParam> xattrNames,
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding
final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
renewer, bufferSize, xattrNames, xattrEncoding);
renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
}
/** Handle HTTP GET request. */
@ -747,11 +786,13 @@ public class NamenodeWebHdfsMethods {
@QueryParam(XAttrNameParam.NAME) @DefaultValue(XAttrNameParam.DEFAULT)
final List<XAttrNameParam> xattrNames,
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
final XAttrEncodingParam xattrEncoding
final XAttrEncodingParam xattrEncoding,
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, offset, length,
renewer, bufferSize, xattrEncoding);
renewer, bufferSize, xattrEncoding, excludeDatanodes);
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override
@ -759,7 +800,7 @@ public class NamenodeWebHdfsMethods {
try {
return get(ugi, delegation, username, doAsUser,
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
xattrNames, xattrEncoding);
xattrNames, xattrEncoding, excludeDatanodes);
} finally {
reset();
}
@ -779,7 +820,8 @@ public class NamenodeWebHdfsMethods {
final RenewerParam renewer,
final BufferSizeParam bufferSize,
final List<XAttrNameParam> xattrNames,
final XAttrEncodingParam xattrEncoding
final XAttrEncodingParam xattrEncoding,
final ExcludeDatanodesParam excludeDatanodes
) throws IOException, URISyntaxException {
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = getRPCServer(namenode);
@ -787,8 +829,9 @@ public class NamenodeWebHdfsMethods {
switch(op.getValue()) {
case OPEN:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), offset.getValue(), -1L, offset, length, bufferSize);
final URI uri = redirectURI(namenode, ugi, delegation, username,
doAsUser, fullpath, op.getValue(), offset.getValue(), -1L,
excludeDatanodes.getValue(), offset, length, bufferSize);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GET_BLOCK_LOCATIONS:
@ -824,7 +867,7 @@ public class NamenodeWebHdfsMethods {
case GETFILECHECKSUM:
{
final URI uri = redirectURI(namenode, ugi, delegation, username, doAsUser,
fullpath, op.getValue(), -1L, -1L);
fullpath, op.getValue(), -1L, -1L, null);
return Response.temporaryRedirect(uri).type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case GETDELEGATIONTOKEN:

View File

@ -77,7 +77,7 @@ public class DFSck extends Configured implements Tool {
private static final String USAGE = "Usage: DFSck <path> "
+ "[-list-corruptfileblocks | "
+ "[-move | -delete | -openforwrite] "
+ "[-files [-blocks [-locations | -racks]]]]\n"
+ "[-files [-blocks [-locations | -racks]]]] [-showprogress]\n"
+ "\t<path>\tstart checking from this path\n"
+ "\t-move\tmove corrupted files to /lost+found\n"
+ "\t-delete\tdelete corrupted files\n"
@ -90,7 +90,8 @@ public class DFSck extends Configured implements Tool {
+ "blocks and files they belong to\n"
+ "\t-blocks\tprint out block report\n"
+ "\t-locations\tprint out locations for every block\n"
+ "\t-racks\tprint out network topology for data-node locations\n\n"
+ "\t-racks\tprint out network topology for data-node locations\n"
+ "\t-showprogress\tshow progress in output. Default is OFF (no progress)\n\n"
+ "Please Note:\n"
+ "\t1. By default fsck ignores files opened for write, "
+ "use -openforwrite to report such files. They are usually "
@ -270,6 +271,7 @@ public class DFSck extends Configured implements Tool {
else if (args[idx].equals("-blocks")) { url.append("&blocks=1"); }
else if (args[idx].equals("-locations")) { url.append("&locations=1"); }
else if (args[idx].equals("-racks")) { url.append("&racks=1"); }
else if (args[idx].equals("-showprogress")) { url.append("&showprogress=1"); }
else if (args[idx].equals("-list-corruptfileblocks")) {
url.append("&listcorruptfileblocks=1");
doListCorruptFileBlocks = true;

View File

@ -158,7 +158,7 @@ public class WebHdfsFileSystem extends FileSystem
// getCanonicalUri() in order to handle the case where no port is
// specified in the URI
this.tokenServiceName = isLogicalUri ?
HAUtil.buildTokenServiceForLogicalUri(uri)
HAUtil.buildTokenServiceForLogicalUri(uri, getScheme())
: SecurityUtil.buildTokenService(getCanonicalUri());
if (!isHA) {
@ -448,6 +448,7 @@ public class WebHdfsFileSystem extends FileSystem
protected final HttpOpParam.Op op;
private final boolean redirected;
protected ExcludeDatanodesParam excludeDatanodes = new ExcludeDatanodesParam("");
private boolean checkRetry;
@ -499,6 +500,10 @@ public class WebHdfsFileSystem extends FileSystem
* a DN such as open and checksum
*/
private HttpURLConnection connect(URL url) throws IOException {
//redirect hostname and port
String redirectHost = null;
// resolve redirects for a DN operation unless already resolved
if (op.getRedirect() && !redirected) {
final HttpOpParam.Op redirectOp =
@ -511,11 +516,24 @@ public class WebHdfsFileSystem extends FileSystem
try {
validateResponse(redirectOp, conn, false);
url = new URL(conn.getHeaderField("Location"));
redirectHost = url.getHost() + ":" + url.getPort();
} finally {
conn.disconnect();
}
}
return connect(op, url);
try {
return connect(op, url);
} catch (IOException ioe) {
if (redirectHost != null) {
if (excludeDatanodes.getValue() != null) {
excludeDatanodes = new ExcludeDatanodesParam(redirectHost + ","
+ excludeDatanodes.getValue());
} else {
excludeDatanodes = new ExcludeDatanodesParam(redirectHost);
}
}
throw ioe;
}
}
private HttpURLConnection connect(final HttpOpParam.Op op, final URL url)
@ -652,7 +670,14 @@ public class WebHdfsFileSystem extends FileSystem
@Override
protected URL getUrl() throws IOException {
return toUrl(op, fspath, parameters);
if (excludeDatanodes.getValue() != null) {
Param<?, ?>[] tmpParam = new Param<?, ?>[parameters.length + 1];
System.arraycopy(parameters, 0, tmpParam, 0, parameters.length);
tmpParam[parameters.length] = excludeDatanodes;
return toUrl(op, fspath, tmpParam);
} else {
return toUrl(op, fspath, parameters);
}
}
}

View File

@ -0,0 +1,42 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/** Exclude datanodes param */
public class ExcludeDatanodesParam extends StringParam {
/** Parameter name. */
public static final String NAME = "excludedatanodes";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
/**
* Constructor.
* @param str a string representation of the parameter value.
*/
public ExcludeDatanodesParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT)? null: DOMAIN.parse(str));
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -24,6 +24,29 @@
<title>Browsing HDFS</title>
</head>
<body>
<header class="navbar navbar-inverse bs-docs-nav" role="banner">
<div class="container">
<div class="navbar-header">
<div class="navbar-brand">Hadoop</div>
</div>
<ul class="nav navbar-nav" id="ui-tabs">
<li><a href="dfshealth.html#tab-overview">Overview</a></li>
<li><a href="dfshealth.html#tab-datanode">Datanodes</a></li>
<li><a href="dfshealth.html#tab-snapshot">Snapshot</a></li>
<li><a href="dfshealth.html#tab-startup-progress">Startup Progress</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown">Utilities <b class="caret"></b></a>
<ul class="dropdown-menu">
<li><a href="#">Browse the file system</a></li>
<li><a href="logs">Logs</a></li>
</ul>
</li>
</ul>
</div>
</header>
<div class="modal" id="file-info" tabindex="-1" role="dialog" aria-hidden="true">
<div class="modal-dialog">
<div class="modal-content">
@ -73,6 +96,12 @@
</div>
<br />
<div id="panel"></div>
<div class="row">
<hr />
<div class="col-xs-2"><p>Hadoop, 2014.</p></div>
</div>
</div>
<script type="text/x-dust-template" id="tmpl-explorer">
@ -126,7 +155,5 @@
</script><script type="text/javascript" src="/static/dfs-dust.js">
</script><script type="text/javascript" src="explorer.js">
</script>
<hr />
<p>Hadoop, 2014.</p>
</body>
</html>

View File

@ -0,0 +1,190 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.IOException;
import java.util.List;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify ACL through ViewFileSystem functionality.
*/
public class TestViewFileSystemWithAcls {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileSystem fHdfs;
private static FileSystem fHdfs2;
private FileSystem fsView;
private Configuration fsViewConf;
private FileSystem fsTarget, fsTarget2;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileSystemTestHelper fileSystemTestHelper =
new FileSystemTestHelper("/tmp/TestViewFileSystemWithAcls");
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fHdfs = cluster.getFileSystem(0);
fHdfs2 = cluster.getFileSystem(1);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fsTarget = fHdfs;
fsTarget2 = fHdfs2;
targetTestRoot = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
targetTestRoot2 = fileSystemTestHelper.getAbsoluteTestRootPath(fsTarget2);
fsTarget.delete(targetTestRoot, true);
fsTarget2.delete(targetTestRoot2, true);
fsTarget.mkdirs(targetTestRoot);
fsTarget2.mkdirs(targetTestRoot2);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fsView = FileSystem.get(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fsTarget.delete(fileSystemTestHelper.getTestRootPath(fsTarget), true);
fsTarget2.delete(fileSystemTestHelper.getTestRootPath(fsTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test
public void testAclOnMountEntry() throws Exception {
// Set ACLs on the first namespace and verify they are correct
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fsView.setAcl(mountOnNn1, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
// Double-check by getting ACL status using FileSystem
// instead of ViewFs
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
// Modify the ACL entries on the first namespace
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ));
fsView.modifyAclEntries(mountOnNn1, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "foo", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, NONE) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
fsView.removeDefaultAcl(mountOnNn1);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected, aclEntryArray(fHdfs.getAclStatus(targetTestRoot)));
// Paranoid check: verify the other namespace does not
// have ACLs set on the same path.
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
// Remove the ACL entries on the first namespace
fsView.removeAcl(mountOnNn1);
assertEquals(0, fsView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0, fHdfs.getAclStatus(targetTestRoot).getEntries().size());
// Now set ACLs on the second namespace
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ));
fsView.modifyAclEntries(mountOnNn2, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fsView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
// Remove the ACL entries on the second namespace
fsView.removeAclEntries(mountOnNn2, Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ)
));
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fHdfs2.getAclStatus(targetTestRoot2)));
fsView.removeAcl(mountOnNn2);
assertEquals(0, fsView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fHdfs2.getAclStatus(targetTestRoot2).getEntries().size());
}
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
return aclStatus.getEntries().toArray(new AclEntry[0]);
}
}

View File

@ -0,0 +1,190 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.List;
import java.io.IOException;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
/**
* Verify ACL through ViewFs functionality.
*/
public class TestViewFsWithAcls {
private static MiniDFSCluster cluster;
private static Configuration clusterConf = new Configuration();
private static FileContext fc, fc2;
private FileContext fcView, fcTarget, fcTarget2;
private Configuration fsViewConf;
private Path targetTestRoot, targetTestRoot2, mountOnNn1, mountOnNn2;
private FileContextTestHelper fileContextTestHelper =
new FileContextTestHelper("/tmp/TestViewFsWithAcls");
@BeforeClass
public static void clusterSetupAtBeginning() throws IOException {
clusterConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(clusterConf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
.numDataNodes(2)
.build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), clusterConf);
fc2 = FileContext.getFileContext(cluster.getURI(1), clusterConf);
}
@AfterClass
public static void ClusterShutdownAtEnd() throws Exception {
cluster.shutdown();
}
@Before
public void setUp() throws Exception {
fcTarget = fc;
fcTarget2 = fc2;
targetTestRoot = fileContextTestHelper.getAbsoluteTestRootPath(fc);
targetTestRoot2 = fileContextTestHelper.getAbsoluteTestRootPath(fc2);
fcTarget.delete(targetTestRoot, true);
fcTarget2.delete(targetTestRoot2, true);
fcTarget.mkdir(targetTestRoot, new FsPermission((short)0750), true);
fcTarget2.mkdir(targetTestRoot2, new FsPermission((short)0750), true);
fsViewConf = ViewFileSystemTestSetup.createConfig();
setupMountPoints();
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, fsViewConf);
}
private void setupMountPoints() {
mountOnNn1 = new Path("/mountOnNn1");
mountOnNn2 = new Path("/mountOnNn2");
ConfigUtil.addLink(fsViewConf, mountOnNn1.toString(), targetTestRoot.toUri());
ConfigUtil.addLink(fsViewConf, mountOnNn2.toString(), targetTestRoot2.toUri());
}
@After
public void tearDown() throws Exception {
fcTarget.delete(fileContextTestHelper.getTestRootPath(fcTarget), true);
fcTarget2.delete(fileContextTestHelper.getTestRootPath(fcTarget2), true);
}
/**
* Verify a ViewFs wrapped over multiple federated NameNodes will
* dispatch the ACL operations to the correct NameNode.
*/
@Test
public void testAclOnMountEntry() throws Exception {
// Set ACLs on the first namespace and verify they are correct
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fcView.setAcl(mountOnNn1, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
// Double-check by getting ACL status using FileSystem
// instead of ViewFs
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
// Modify the ACL entries on the first namespace
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ));
fcView.modifyAclEntries(mountOnNn1, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "foo", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, NONE) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
fcView.removeDefaultAcl(mountOnNn1);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn1)));
assertArrayEquals(expected, aclEntryArray(fc.getAclStatus(targetTestRoot)));
// Paranoid check: verify the other namespace does not
// have ACLs set on the same path.
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
// Remove the ACL entries on the first namespace
fcView.removeAcl(mountOnNn1);
assertEquals(0, fcView.getAclStatus(mountOnNn1).getEntries().size());
assertEquals(0, fc.getAclStatus(targetTestRoot).getEntries().size());
// Now set ACLs on the second namespace
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ));
fcView.modifyAclEntries(mountOnNn2, aclSpec);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fcView.getAclStatus(mountOnNn2)));
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
// Remove the ACL entries on the second namespace
fcView.removeAclEntries(mountOnNn2, Lists.newArrayList(
aclEntry(ACCESS, USER, "bar", READ)
));
expected = new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE) };
assertArrayEquals(expected, aclEntryArray(fc2.getAclStatus(targetTestRoot2)));
fcView.removeAcl(mountOnNn2);
assertEquals(0, fcView.getAclStatus(mountOnNn2).getEntries().size());
assertEquals(0, fc2.getAclStatus(targetTestRoot2).getEntries().size());
}
private AclEntry[] aclEntryArray(AclStatus aclStatus) {
return aclStatus.getEntries().toArray(new AclEntry[0]);
}
}

View File

@ -30,6 +30,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -79,6 +81,7 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
@ -97,6 +100,8 @@ public class TestFileCreation {
((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
}
private static final String RPC_DETAILED_METRICS =
"RpcDetailedActivityForPort";
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
@ -371,7 +376,7 @@ public class TestFileCreation {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
"testuser", new String[]{"testgroup"});
FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
@ -380,12 +385,16 @@ public class TestFileCreation {
return FileSystem.get(cluster.getConfiguration(0));
}
});
String metricsName = RPC_DETAILED_METRICS + cluster.getNameNodePort();
try {
Path p = new Path("/testfile");
FSDataOutputStream stm1 = fs.create(p);
stm1.write(1);
assertCounter("CreateNumOps", 1L, getMetrics(metricsName));
// Create file again without overwrite
try {
fs2.create(p, false);
@ -394,7 +403,9 @@ public class TestFileCreation {
GenericTestUtils.assertExceptionContains("already being created by",
abce);
}
// NameNodeProxies' createNNProxyWithClientProtocol has 5 retries.
assertCounter("AlreadyBeingCreatedExceptionNumOps",
6L, getMetrics(metricsName));
FSDataOutputStream stm2 = fs2.create(p, true);
stm2.write(2);
stm2.close();

View File

@ -25,14 +25,16 @@ import java.net.InetSocketAddress;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RpcClientUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.RefreshUserMappingsProtocol;
import org.apache.hadoop.security.UserGroupInformation;
@ -76,16 +78,22 @@ public class TestIsMethodSupported {
@Test
public void testNamenodeProtocol() throws IOException {
NamenodeProtocolTranslatorPB translator =
(NamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(conf,
NamenodeProtocol np =
NameNodeProxies.createNonHAProxy(conf,
nnAddress, NamenodeProtocol.class, UserGroupInformation.getCurrentUser(),
true).getProxy();
boolean exists = translator.isMethodSupported("rollEditLog");
boolean exists = RpcClientUtil.isMethodSupported(np,
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), "rollEditLog");
assertTrue(exists);
exists = translator.isMethodSupported("bogusMethod");
exists = RpcClientUtil.isMethodSupported(np,
NamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(NamenodeProtocolPB.class), "bogusMethod");
assertFalse(exists);
}
@Test
public void testDatanodeProtocol() throws IOException {
DatanodeProtocolClientSideTranslatorPB translator =
@ -107,16 +115,18 @@ public class TestIsMethodSupported {
NetUtils.getDefaultSocketFactory(conf));
assertTrue(translator.isMethodSupported("refreshNamenodes"));
}
@Test
public void testClientNamenodeProtocol() throws IOException {
ClientNamenodeProtocolTranslatorPB translator =
(ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
ClientProtocol cp =
NameNodeProxies.createNonHAProxy(
conf, nnAddress, ClientProtocol.class,
UserGroupInformation.getCurrentUser(), true).getProxy();
assertTrue(translator.isMethodSupported("mkdirs"));
RpcClientUtil.isMethodSupported(cp,
ClientNamenodeProtocolPB.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
RPC.getProtocolVersion(ClientNamenodeProtocolPB.class), "mkdirs");
}
@Test
public void tesJournalProtocol() throws IOException {
JournalProtocolTranslatorPB translator = (JournalProtocolTranslatorPB)

View File

@ -89,7 +89,8 @@ public class TestDelegationTokenForProxyUser {
builder.append("127.0.1.1,");
builder.append(InetAddress.getLocalHost().getCanonicalHostName());
LOG.info("Local Ip addresses: " + builder.toString());
conf.setStrings(DefaultImpersonationProvider.getProxySuperuserIpConfKey(superUserShortName),
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserIpConfKey(superUserShortName),
builder.toString());
}
@ -101,7 +102,8 @@ public class TestDelegationTokenForProxyUser {
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
config.setLong(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
config.setStrings(DefaultImpersonationProvider.getProxySuperuserGroupConfKey(REAL_USER),
config.setStrings(DefaultImpersonationProvider.getTestProvider().
getProxySuperuserGroupConfKey(REAL_USER),
"group1");
config.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

View File

@ -905,49 +905,46 @@ public class TestReplicationPolicy {
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeDescriptor> replicaNodeList = new
ArrayList<DatanodeDescriptor>();
final Map<String, List<DatanodeDescriptor>> rackMap
= new HashMap<String, List<DatanodeDescriptor>>();
List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
replicaNodeList.add(dataNodes[0]);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
replicaNodeList.add(dataNodes[1]);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
replicaNodeList.add(dataNodes[2]);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
replicaNodeList.add(dataNodes[5]);
replicaList.add(storages[5]);
// Refresh the last update time for all the datanodes
for (int i = 0; i < dataNodes.length; i++) {
dataNodes[i].setLastUpdate(Time.now());
}
List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
replicator.splitNodesWithRack(
replicaNodeList, rackMap, first, second);
// dataNodes[0] and dataNodes[1] are in first set as their rack has two
// replica nodes, while datanodes[2] and dataNodes[5] are in second set.
List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
replicator.splitNodesWithRack(replicaList, rackMap, first, second);
// storages[0] and storages[1] are in first set as their rack has two
// replica nodes, while storages[2] and dataNodes[5] are in second set.
assertEquals(2, first.size());
assertEquals(2, second.size());
DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second);
// Within first set, dataNodes[1] with less free space
assertEquals(chosenNode, dataNodes[1]);
// Within first set, storages[1] with less free space
assertEquals(chosen, storages[1]);
replicator.adjustSetsWithChosenReplica(
rackMap, first, second, chosenNode);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(3, second.size());
// Within second set, dataNodes[5] with less free space
chosenNode = replicator.chooseReplicaToDelete(
// Within second set, storages[5] with less free space
chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second);
assertEquals(chosenNode, dataNodes[5]);
assertEquals(chosen, storages[5]);
}
/**

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.test.PathUtils;
@ -101,6 +102,7 @@ public class TestReplicationPolicyConsiderLoad {
}
}
private final double EPSILON = 0.0001;
/**
* Tests that chooseTarget with considerLoad set to true correctly calculates
* load with decommissioned nodes.
@ -109,14 +111,6 @@ public class TestReplicationPolicyConsiderLoad {
public void testChooseTargetWithDecomNodes() throws IOException {
namenode.getNamesystem().writeLock();
try {
// Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
// returns false
for (int i = 0; i < 3; i++) {
DatanodeInfo d = dnManager.getDatanodeByXferAddr(
dnrList.get(i).getIpAddr(),
dnrList.get(i).getXferPort());
d.setDecommissioned();
}
String blockPoolId = namenode.getNamesystem().getBlockPoolId();
dnManager.handleHeartbeat(dnrList.get(3),
BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@ -133,6 +127,20 @@ public class TestReplicationPolicyConsiderLoad {
blockPoolId, dataNodes[5].getCacheCapacity(),
dataNodes[5].getCacheRemaining(),
4, 0, 0);
// value in the above heartbeats
final int load = 2 + 4 + 4;
FSNamesystem fsn = namenode.getNamesystem();
assertEquals((double)load/6, fsn.getInServiceXceiverAverage(), EPSILON);
// Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget()
// returns false
for (int i = 0; i < 3; i++) {
DatanodeDescriptor d = dnManager.getDatanode(dnrList.get(i));
dnManager.startDecommission(d);
d.setDecommissioned();
}
assertEquals((double)load/3, fsn.getInServiceXceiverAverage(), EPSILON);
// Call chooseTarget()
DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()

View File

@ -591,51 +591,50 @@ public class TestReplicationPolicyWithNodeGroup {
*/
@Test
public void testChooseReplicaToDelete() throws Exception {
List<DatanodeDescriptor> replicaNodeList =
new ArrayList<DatanodeDescriptor>();
final Map<String, List<DatanodeDescriptor>> rackMap =
new HashMap<String, List<DatanodeDescriptor>>();
List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
final Map<String, List<DatanodeStorageInfo>> rackMap
= new HashMap<String, List<DatanodeStorageInfo>>();
dataNodes[0].setRemaining(4*1024*1024);
replicaNodeList.add(dataNodes[0]);
replicaList.add(storages[0]);
dataNodes[1].setRemaining(3*1024*1024);
replicaNodeList.add(dataNodes[1]);
replicaList.add(storages[1]);
dataNodes[2].setRemaining(2*1024*1024);
replicaNodeList.add(dataNodes[2]);
replicaList.add(storages[2]);
dataNodes[5].setRemaining(1*1024*1024);
replicaNodeList.add(dataNodes[5]);
replicaList.add(storages[5]);
List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
replicator.splitNodesWithRack(
replicaNodeList, rackMap, first, second);
replicaList, rackMap, first, second);
assertEquals(3, first.size());
assertEquals(1, second.size());
DatanodeDescriptor chosenNode = replicator.chooseReplicaToDelete(
DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
null, null, (short)3, first, second);
// Within first set {dataNodes[0], dataNodes[1], dataNodes[2]},
// dataNodes[0] and dataNodes[1] are in the same nodegroup,
// but dataNodes[1] is chosen as less free space
assertEquals(chosenNode, dataNodes[1]);
assertEquals(chosen, storages[1]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(2, first.size());
assertEquals(1, second.size());
// Within first set {dataNodes[0], dataNodes[2]}, dataNodes[2] is chosen
// as less free space
chosenNode = replicator.chooseReplicaToDelete(
chosen = replicator.chooseReplicaToDelete(
null, null, (short)2, first, second);
assertEquals(chosenNode, dataNodes[2]);
assertEquals(chosen, storages[2]);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosenNode);
replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
assertEquals(0, first.size());
assertEquals(2, second.size());
// Within second set, dataNodes[5] with less free space
chosenNode = replicator.chooseReplicaToDelete(
chosen = replicator.chooseReplicaToDelete(
null, null, (short)1, first, second);
assertEquals(chosenNode, dataNodes[5]);
assertEquals(chosen, storages[5]);
}
/**

Some files were not shown because too many files have changed in this diff Show More