Merge from trunk to branch
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1615844 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
ac73d416f3
|
@ -192,6 +192,11 @@ Trunk (Unreleased)
|
||||||
HADOOP-10891. Add EncryptedKeyVersion factory method to
|
HADOOP-10891. Add EncryptedKeyVersion factory method to
|
||||||
KeyProviderCryptoExtension. (wang)
|
KeyProviderCryptoExtension. (wang)
|
||||||
|
|
||||||
|
HADOOP-10756. KMS audit log should consolidate successful similar requests.
|
||||||
|
(asuresh via tucu)
|
||||||
|
|
||||||
|
HADOOP-10793. KeyShell args should use single-dash style. (wang)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
HADOOP-9451. Fault single-layer config if node group topology is enabled.
|
||||||
|
@ -405,6 +410,12 @@ Trunk (Unreleased)
|
||||||
HADOOP-10881. Clarify usage of encryption and encrypted encryption
|
HADOOP-10881. Clarify usage of encryption and encrypted encryption
|
||||||
key in KeyProviderCryptoExtension. (wang)
|
key in KeyProviderCryptoExtension. (wang)
|
||||||
|
|
||||||
|
HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm.
|
||||||
|
(Akira Ajisaka via wang)
|
||||||
|
|
||||||
|
HADOOP-10925. Compilation fails in native link0 function on Windows.
|
||||||
|
(cnauroth)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
HADOOP-7761. Improve the performance of raw comparisons. (todd)
|
||||||
|
@ -463,6 +474,14 @@ Release 2.6.0 - UNRELEASED
|
||||||
HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
|
HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
|
||||||
Arpit Agarwal)
|
Arpit Agarwal)
|
||||||
|
|
||||||
|
HADOOP-10902. Deletion of directories with snapshots will not output
|
||||||
|
reason for trash move failure. (Stephen Chu via wang)
|
||||||
|
|
||||||
|
HADOOP-10900. CredentialShell args should use single-dash style. (wang)
|
||||||
|
|
||||||
|
HADOOP-10903. Enhance hadoop classpath command to expand wildcards or write
|
||||||
|
classpath into jar manifest. (cnauroth)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -497,6 +516,15 @@ Release 2.6.0 - UNRELEASED
|
||||||
HADOOP-10876. The constructor of Path should not take an empty URL as a
|
HADOOP-10876. The constructor of Path should not take an empty URL as a
|
||||||
parameter. (Zhihai Xu via wang)
|
parameter. (Zhihai Xu via wang)
|
||||||
|
|
||||||
|
HADOOP-10928. Incorrect usage on `hadoop credential list`.
|
||||||
|
(Josh Elser via wang)
|
||||||
|
|
||||||
|
HADOOP-10927. Fix CredentialShell help behavior and error codes.
|
||||||
|
(Josh Elser via wang)
|
||||||
|
|
||||||
|
HADOOP-10937. Need to set version name correctly before decrypting EEK.
|
||||||
|
(Arun Suresh via wang)
|
||||||
|
|
||||||
Release 2.5.0 - UNRELEASED
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -637,6 +665,8 @@ Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
|
HADOOP-10759. Remove hardcoded JAVA_HEAP_MAX. (Sam Liu via Eric Yang)
|
||||||
|
|
||||||
HADOOP-10378. Typo in help printed by hdfs dfs -help.
|
HADOOP-10378. Typo in help printed by hdfs dfs -help.
|
||||||
(Mit Desai via suresh)
|
(Mit Desai via suresh)
|
||||||
|
|
||||||
|
@ -813,6 +843,8 @@ Release 2.5.0 - UNRELEASED
|
||||||
HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
|
HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
|
||||||
via Arpit Agarwal)
|
via Arpit Agarwal)
|
||||||
|
|
||||||
|
HADOOP-10910. Increase findbugs maxHeap size. (wang)
|
||||||
|
|
||||||
BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HADOOP-10520. Extended attributes definition and FileSystem APIs for
|
HADOOP-10520. Extended attributes definition and FileSystem APIs for
|
||||||
|
|
|
@ -35,6 +35,7 @@ function print_usage(){
|
||||||
echo " distcp <srcurl> <desturl> copy file or directories recursively"
|
echo " distcp <srcurl> <desturl> copy file or directories recursively"
|
||||||
echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
|
echo " archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
|
||||||
echo " classpath prints the class path needed to get the"
|
echo " classpath prints the class path needed to get the"
|
||||||
|
echo " credential interact with credential providers"
|
||||||
echo " Hadoop jar and the required libraries"
|
echo " Hadoop jar and the required libraries"
|
||||||
echo " daemonlog get/set the log level for each daemon"
|
echo " daemonlog get/set the log level for each daemon"
|
||||||
echo " or"
|
echo " or"
|
||||||
|
@ -90,11 +91,6 @@ case $COMMAND in
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
||||||
classpath)
|
|
||||||
echo $CLASSPATH
|
|
||||||
exit
|
|
||||||
;;
|
|
||||||
|
|
||||||
#core commands
|
#core commands
|
||||||
*)
|
*)
|
||||||
# the core commands
|
# the core commands
|
||||||
|
@ -118,6 +114,14 @@ case $COMMAND in
|
||||||
CLASSPATH=${CLASSPATH}:${TOOL_PATH}
|
CLASSPATH=${CLASSPATH}:${TOOL_PATH}
|
||||||
elif [ "$COMMAND" = "credential" ] ; then
|
elif [ "$COMMAND" = "credential" ] ; then
|
||||||
CLASS=org.apache.hadoop.security.alias.CredentialShell
|
CLASS=org.apache.hadoop.security.alias.CredentialShell
|
||||||
|
elif [ "$COMMAND" = "classpath" ] ; then
|
||||||
|
if [ "$#" -eq 1 ]; then
|
||||||
|
# No need to bother starting up a JVM for this simple case.
|
||||||
|
echo $CLASSPATH
|
||||||
|
exit
|
||||||
|
else
|
||||||
|
CLASS=org.apache.hadoop.util.Classpath
|
||||||
|
fi
|
||||||
elif [[ "$COMMAND" = -* ]] ; then
|
elif [[ "$COMMAND" = -* ]] ; then
|
||||||
# class and package names cannot begin with a -
|
# class and package names cannot begin with a -
|
||||||
echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
|
echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
|
||||||
|
|
|
@ -149,8 +149,6 @@ if [[ -z $JAVA_HOME ]]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
JAVA=$JAVA_HOME/bin/java
|
JAVA=$JAVA_HOME/bin/java
|
||||||
# some Java parameters
|
|
||||||
JAVA_HEAP_MAX=-Xmx1000m
|
|
||||||
|
|
||||||
# check envvars which might override default args
|
# check envvars which might override default args
|
||||||
if [ "$HADOOP_HEAPSIZE" != "" ]; then
|
if [ "$HADOOP_HEAPSIZE" != "" ]; then
|
||||||
|
|
|
@ -115,11 +115,14 @@ call :updatepath %HADOOP_BIN_PATH%
|
||||||
)
|
)
|
||||||
|
|
||||||
if %hadoop-command% == classpath (
|
if %hadoop-command% == classpath (
|
||||||
@echo %CLASSPATH%
|
if not defined hadoop-command-arguments (
|
||||||
goto :eof
|
@rem No need to bother starting up a JVM for this simple case.
|
||||||
|
@echo %CLASSPATH%
|
||||||
|
exit /b
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
set corecommands=fs version jar checknative distcp daemonlog archive
|
set corecommands=fs version jar checknative distcp daemonlog archive classpath
|
||||||
for %%i in ( %corecommands% ) do (
|
for %%i in ( %corecommands% ) do (
|
||||||
if %hadoop-command% == %%i set corecommand=true
|
if %hadoop-command% == %%i set corecommand=true
|
||||||
)
|
)
|
||||||
|
@ -175,6 +178,10 @@ call :updatepath %HADOOP_BIN_PATH%
|
||||||
set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
|
set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
|
||||||
goto :eof
|
goto :eof
|
||||||
|
|
||||||
|
:classpath
|
||||||
|
set CLASS=org.apache.hadoop.util.Classpath
|
||||||
|
goto :eof
|
||||||
|
|
||||||
:updatepath
|
:updatepath
|
||||||
set path_to_add=%*
|
set path_to_add=%*
|
||||||
set current_path_comparable=%path%
|
set current_path_comparable=%path%
|
||||||
|
|
|
@ -1843,6 +1843,38 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
||||||
return pass;
|
return pass;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the socket address for <code>hostProperty</code> as a
|
||||||
|
* <code>InetSocketAddress</code>. If <code>hostProperty</code> is
|
||||||
|
* <code>null</code>, <code>addressProperty</code> will be used. This
|
||||||
|
* is useful for cases where we want to differentiate between host
|
||||||
|
* bind address and address clients should use to establish connection.
|
||||||
|
*
|
||||||
|
* @param hostProperty bind host property name.
|
||||||
|
* @param addressProperty address property name.
|
||||||
|
* @param defaultAddressValue the default value
|
||||||
|
* @param defaultPort the default port
|
||||||
|
* @return InetSocketAddress
|
||||||
|
*/
|
||||||
|
public InetSocketAddress getSocketAddr(
|
||||||
|
String hostProperty,
|
||||||
|
String addressProperty,
|
||||||
|
String defaultAddressValue,
|
||||||
|
int defaultPort) {
|
||||||
|
|
||||||
|
InetSocketAddress bindAddr = getSocketAddr(
|
||||||
|
addressProperty, defaultAddressValue, defaultPort);
|
||||||
|
|
||||||
|
final String host = get(hostProperty);
|
||||||
|
|
||||||
|
if (host == null || host.isEmpty()) {
|
||||||
|
return bindAddr;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NetUtils.createSocketAddr(
|
||||||
|
host, bindAddr.getPort(), hostProperty);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the socket address for <code>name</code> property as a
|
* Get the socket address for <code>name</code> property as a
|
||||||
* <code>InetSocketAddress</code>.
|
* <code>InetSocketAddress</code>.
|
||||||
|
@ -1865,6 +1897,40 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
||||||
set(name, NetUtils.getHostPortString(addr));
|
set(name, NetUtils.getHostPortString(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the socket address a client can use to connect for the
|
||||||
|
* <code>name</code> property as a <code>host:port</code>. The wildcard
|
||||||
|
* address is replaced with the local host's address. If the host and address
|
||||||
|
* properties are configured the host component of the address will be combined
|
||||||
|
* with the port component of the addr to generate the address. This is to allow
|
||||||
|
* optional control over which host name is used in multi-home bind-host
|
||||||
|
* cases where a host can have multiple names
|
||||||
|
* @param hostProperty the bind-host configuration name
|
||||||
|
* @param addressProperty the service address configuration name
|
||||||
|
* @param defaultAddressValue the service default address configuration value
|
||||||
|
* @param addr InetSocketAddress of the service listener
|
||||||
|
* @return InetSocketAddress for clients to connect
|
||||||
|
*/
|
||||||
|
public InetSocketAddress updateConnectAddr(
|
||||||
|
String hostProperty,
|
||||||
|
String addressProperty,
|
||||||
|
String defaultAddressValue,
|
||||||
|
InetSocketAddress addr) {
|
||||||
|
|
||||||
|
final String host = get(hostProperty);
|
||||||
|
final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);
|
||||||
|
|
||||||
|
if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
|
||||||
|
//not our case, fall back to original logic
|
||||||
|
return updateConnectAddr(addressProperty, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
final String connectHost = connectHostPort.split(":")[0];
|
||||||
|
// Create connect address using client address hostname and server port.
|
||||||
|
return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
|
||||||
|
connectHost, addr.getPort()));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the socket address a client can use to connect for the
|
* Set the socket address a client can use to connect for the
|
||||||
* <code>name</code> property as a <code>host:port</code>. The wildcard
|
* <code>name</code> property as a <code>host:port</code>. The wildcard
|
||||||
|
|
|
@ -21,11 +21,13 @@ package org.apache.hadoop.crypto.key;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.security.GeneralSecurityException;
|
import java.security.GeneralSecurityException;
|
||||||
import java.security.SecureRandom;
|
import java.security.SecureRandom;
|
||||||
|
|
||||||
import javax.crypto.Cipher;
|
import javax.crypto.Cipher;
|
||||||
import javax.crypto.spec.IvParameterSpec;
|
import javax.crypto.spec.IvParameterSpec;
|
||||||
import javax.crypto.spec.SecretKeySpec;
|
import javax.crypto.spec.SecretKeySpec;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -97,7 +99,7 @@ public class KeyProviderCryptoExtension extends
|
||||||
public static EncryptedKeyVersion createForDecryption(String
|
public static EncryptedKeyVersion createForDecryption(String
|
||||||
encryptionKeyVersionName, byte[] encryptedKeyIv,
|
encryptionKeyVersionName, byte[] encryptedKeyIv,
|
||||||
byte[] encryptedKeyMaterial) {
|
byte[] encryptedKeyMaterial) {
|
||||||
KeyVersion encryptedKeyVersion = new KeyVersion(null, null,
|
KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK,
|
||||||
encryptedKeyMaterial);
|
encryptedKeyMaterial);
|
||||||
return new EncryptedKeyVersion(null, encryptionKeyVersionName,
|
return new EncryptedKeyVersion(null, encryptionKeyVersionName,
|
||||||
encryptedKeyIv, encryptedKeyVersion);
|
encryptedKeyIv, encryptedKeyVersion);
|
||||||
|
@ -258,6 +260,13 @@ public class KeyProviderCryptoExtension extends
|
||||||
keyProvider.getKeyVersion(encryptionKeyVersionName);
|
keyProvider.getKeyVersion(encryptionKeyVersionName);
|
||||||
Preconditions.checkNotNull(encryptionKey,
|
Preconditions.checkNotNull(encryptionKey,
|
||||||
"KeyVersion name '%s' does not exist", encryptionKeyVersionName);
|
"KeyVersion name '%s' does not exist", encryptionKeyVersionName);
|
||||||
|
Preconditions.checkArgument(
|
||||||
|
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
||||||
|
.equals(KeyProviderCryptoExtension.EEK),
|
||||||
|
"encryptedKey version name must be '%s', is '%s'",
|
||||||
|
KeyProviderCryptoExtension.EEK,
|
||||||
|
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
||||||
|
);
|
||||||
final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
|
final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
|
||||||
// Encryption key IV is determined from encrypted key's IV
|
// Encryption key IV is determined from encrypted key's IV
|
||||||
final byte[] encryptionIV =
|
final byte[] encryptionIV =
|
||||||
|
|
|
@ -38,9 +38,9 @@ import org.apache.hadoop.util.ToolRunner;
|
||||||
*/
|
*/
|
||||||
public class KeyShell extends Configured implements Tool {
|
public class KeyShell extends Configured implements Tool {
|
||||||
final static private String USAGE_PREFIX = "Usage: hadoop key " +
|
final static private String USAGE_PREFIX = "Usage: hadoop key " +
|
||||||
"[generic options]\n";
|
"[generic options]\n";
|
||||||
final static private String COMMANDS =
|
final static private String COMMANDS =
|
||||||
" [--help]\n" +
|
" [-help]\n" +
|
||||||
" [" + CreateCommand.USAGE + "]\n" +
|
" [" + CreateCommand.USAGE + "]\n" +
|
||||||
" [" + RollCommand.USAGE + "]\n" +
|
" [" + RollCommand.USAGE + "]\n" +
|
||||||
" [" + DeleteCommand.USAGE + "]\n" +
|
" [" + DeleteCommand.USAGE + "]\n" +
|
||||||
|
@ -90,11 +90,11 @@ public class KeyShell extends Configured implements Tool {
|
||||||
/**
|
/**
|
||||||
* Parse the command line arguments and initialize the data
|
* Parse the command line arguments and initialize the data
|
||||||
* <pre>
|
* <pre>
|
||||||
* % hadoop key create keyName [--size size] [--cipher algorithm]
|
* % hadoop key create keyName [-size size] [-cipher algorithm]
|
||||||
* [--provider providerPath]
|
* [-provider providerPath]
|
||||||
* % hadoop key roll keyName [--provider providerPath]
|
* % hadoop key roll keyName [-provider providerPath]
|
||||||
* % hadoop key list [-provider providerPath]
|
* % hadoop key list [-provider providerPath]
|
||||||
* % hadoop key delete keyName [--provider providerPath] [-i]
|
* % hadoop key delete keyName [-provider providerPath] [-i]
|
||||||
* </pre>
|
* </pre>
|
||||||
* @param args Command line arguments.
|
* @param args Command line arguments.
|
||||||
* @return 0 on success, 1 on failure.
|
* @return 0 on success, 1 on failure.
|
||||||
|
@ -107,47 +107,47 @@ public class KeyShell extends Configured implements Tool {
|
||||||
for (int i = 0; i < args.length; i++) { // parse command line
|
for (int i = 0; i < args.length; i++) { // parse command line
|
||||||
boolean moreTokens = (i < args.length - 1);
|
boolean moreTokens = (i < args.length - 1);
|
||||||
if (args[i].equals("create")) {
|
if (args[i].equals("create")) {
|
||||||
String keyName = "--help";
|
String keyName = "-help";
|
||||||
if (moreTokens) {
|
if (moreTokens) {
|
||||||
keyName = args[++i];
|
keyName = args[++i];
|
||||||
}
|
}
|
||||||
|
|
||||||
command = new CreateCommand(keyName, options);
|
command = new CreateCommand(keyName, options);
|
||||||
if ("--help".equals(keyName)) {
|
if ("-help".equals(keyName)) {
|
||||||
printKeyShellUsage();
|
printKeyShellUsage();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
} else if (args[i].equals("delete")) {
|
} else if (args[i].equals("delete")) {
|
||||||
String keyName = "--help";
|
String keyName = "-help";
|
||||||
if (moreTokens) {
|
if (moreTokens) {
|
||||||
keyName = args[++i];
|
keyName = args[++i];
|
||||||
}
|
}
|
||||||
|
|
||||||
command = new DeleteCommand(keyName);
|
command = new DeleteCommand(keyName);
|
||||||
if ("--help".equals(keyName)) {
|
if ("-help".equals(keyName)) {
|
||||||
printKeyShellUsage();
|
printKeyShellUsage();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
} else if (args[i].equals("roll")) {
|
} else if (args[i].equals("roll")) {
|
||||||
String keyName = "--help";
|
String keyName = "-help";
|
||||||
if (moreTokens) {
|
if (moreTokens) {
|
||||||
keyName = args[++i];
|
keyName = args[++i];
|
||||||
}
|
}
|
||||||
|
|
||||||
command = new RollCommand(keyName);
|
command = new RollCommand(keyName);
|
||||||
if ("--help".equals(keyName)) {
|
if ("-help".equals(keyName)) {
|
||||||
printKeyShellUsage();
|
printKeyShellUsage();
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
} else if ("list".equals(args[i])) {
|
} else if ("list".equals(args[i])) {
|
||||||
command = new ListCommand();
|
command = new ListCommand();
|
||||||
} else if ("--size".equals(args[i]) && moreTokens) {
|
} else if ("-size".equals(args[i]) && moreTokens) {
|
||||||
options.setBitLength(Integer.parseInt(args[++i]));
|
options.setBitLength(Integer.parseInt(args[++i]));
|
||||||
} else if ("--cipher".equals(args[i]) && moreTokens) {
|
} else if ("-cipher".equals(args[i]) && moreTokens) {
|
||||||
options.setCipher(args[++i]);
|
options.setCipher(args[++i]);
|
||||||
} else if ("--description".equals(args[i]) && moreTokens) {
|
} else if ("-description".equals(args[i]) && moreTokens) {
|
||||||
options.setDescription(args[++i]);
|
options.setDescription(args[++i]);
|
||||||
} else if ("--attr".equals(args[i]) && moreTokens) {
|
} else if ("-attr".equals(args[i]) && moreTokens) {
|
||||||
final String attrval[] = args[++i].split("=", 2);
|
final String attrval[] = args[++i].split("=", 2);
|
||||||
final String attr = attrval[0].trim();
|
final String attr = attrval[0].trim();
|
||||||
final String val = attrval[1].trim();
|
final String val = attrval[1].trim();
|
||||||
|
@ -164,14 +164,14 @@ public class KeyShell extends Configured implements Tool {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
attributes.put(attr, val);
|
attributes.put(attr, val);
|
||||||
} else if ("--provider".equals(args[i]) && moreTokens) {
|
} else if ("-provider".equals(args[i]) && moreTokens) {
|
||||||
userSuppliedProvider = true;
|
userSuppliedProvider = true;
|
||||||
getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
|
getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
|
||||||
} else if ("--metadata".equals(args[i])) {
|
} else if ("-metadata".equals(args[i])) {
|
||||||
getConf().setBoolean(LIST_METADATA, true);
|
getConf().setBoolean(LIST_METADATA, true);
|
||||||
} else if ("-i".equals(args[i]) || ("--interactive".equals(args[i]))) {
|
} else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) {
|
||||||
interactive = true;
|
interactive = true;
|
||||||
} else if ("--help".equals(args[i])) {
|
} else if ("-help".equals(args[i])) {
|
||||||
printKeyShellUsage();
|
printKeyShellUsage();
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -258,11 +258,11 @@ public class KeyShell extends Configured implements Tool {
|
||||||
|
|
||||||
private class ListCommand extends Command {
|
private class ListCommand extends Command {
|
||||||
public static final String USAGE =
|
public static final String USAGE =
|
||||||
"list [--provider <provider>] [--metadata] [--help]";
|
"list [-provider <provider>] [-metadata] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The list subcommand displays the keynames contained within\n" +
|
"The list subcommand displays the keynames contained within\n" +
|
||||||
"a particular provider as configured in core-site.xml or\n" +
|
"a particular provider as configured in core-site.xml or\n" +
|
||||||
"specified with the --provider argument. --metadata displays\n" +
|
"specified with the -provider argument. -metadata displays\n" +
|
||||||
"the metadata.";
|
"the metadata.";
|
||||||
|
|
||||||
private boolean metadata = false;
|
private boolean metadata = false;
|
||||||
|
@ -272,9 +272,9 @@ public class KeyShell extends Configured implements Tool {
|
||||||
provider = getKeyProvider();
|
provider = getKeyProvider();
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no non-transient KeyProviders configured.\n"
|
out.println("There are no non-transient KeyProviders configured.\n"
|
||||||
+ "Use the --provider option to specify a provider. If you\n"
|
+ "Use the -provider option to specify a provider. If you\n"
|
||||||
+ "want to list a transient provider then you must use the\n"
|
+ "want to list a transient provider then you must use the\n"
|
||||||
+ "--provider argument.");
|
+ "-provider argument.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
metadata = getConf().getBoolean(LIST_METADATA, false);
|
metadata = getConf().getBoolean(LIST_METADATA, false);
|
||||||
|
@ -310,10 +310,10 @@ public class KeyShell extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private class RollCommand extends Command {
|
private class RollCommand extends Command {
|
||||||
public static final String USAGE = "roll <keyname> [--provider <provider>] [--help]";
|
public static final String USAGE = "roll <keyname> [-provider <provider>] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The roll subcommand creates a new version for the specified key\n" +
|
"The roll subcommand creates a new version for the specified key\n" +
|
||||||
"within the provider indicated using the --provider argument\n";
|
"within the provider indicated using the -provider argument\n";
|
||||||
|
|
||||||
String keyName = null;
|
String keyName = null;
|
||||||
|
|
||||||
|
@ -326,13 +326,13 @@ public class KeyShell extends Configured implements Tool {
|
||||||
provider = getKeyProvider();
|
provider = getKeyProvider();
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no valid KeyProviders configured. The key\n" +
|
out.println("There are no valid KeyProviders configured. The key\n" +
|
||||||
"has not been rolled. Use the --provider option to specify\n" +
|
"has not been rolled. Use the -provider option to specify\n" +
|
||||||
"a provider.");
|
"a provider.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
if (keyName == null) {
|
if (keyName == null) {
|
||||||
out.println("Please provide a <keyname>.\n" +
|
out.println("Please provide a <keyname>.\n" +
|
||||||
"See the usage description by using --help.");
|
"See the usage description by using -help.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -367,11 +367,11 @@ public class KeyShell extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private class DeleteCommand extends Command {
|
private class DeleteCommand extends Command {
|
||||||
public static final String USAGE = "delete <keyname> [--provider <provider>] [--help]";
|
public static final String USAGE = "delete <keyname> [-provider <provider>] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The delete subcommand deletes all versions of the key\n" +
|
"The delete subcommand deletes all versions of the key\n" +
|
||||||
"specified by the <keyname> argument from within the\n" +
|
"specified by the <keyname> argument from within the\n" +
|
||||||
"provider specified --provider.";
|
"provider specified -provider.";
|
||||||
|
|
||||||
String keyName = null;
|
String keyName = null;
|
||||||
boolean cont = true;
|
boolean cont = true;
|
||||||
|
@ -385,12 +385,12 @@ public class KeyShell extends Configured implements Tool {
|
||||||
provider = getKeyProvider();
|
provider = getKeyProvider();
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no valid KeyProviders configured. Nothing\n"
|
out.println("There are no valid KeyProviders configured. Nothing\n"
|
||||||
+ "was deleted. Use the --provider option to specify a provider.");
|
+ "was deleted. Use the -provider option to specify a provider.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (keyName == null) {
|
if (keyName == null) {
|
||||||
out.println("There is no keyName specified. Please specify a " +
|
out.println("There is no keyName specified. Please specify a " +
|
||||||
"<keyname>. See the usage description with --help.");
|
"<keyname>. See the usage description with -help.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (interactive) {
|
if (interactive) {
|
||||||
|
@ -436,19 +436,19 @@ public class KeyShell extends Configured implements Tool {
|
||||||
|
|
||||||
private class CreateCommand extends Command {
|
private class CreateCommand extends Command {
|
||||||
public static final String USAGE =
|
public static final String USAGE =
|
||||||
"create <keyname> [--cipher <cipher>] [--size <size>]\n" +
|
"create <keyname> [-cipher <cipher>] [-size <size>]\n" +
|
||||||
" [--description <description>]\n" +
|
" [-description <description>]\n" +
|
||||||
" [--attr <attribute=value>]\n" +
|
" [-attr <attribute=value>]\n" +
|
||||||
" [--provider <provider>] [--help]";
|
" [-provider <provider>] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The create subcommand creates a new key for the name specified\n" +
|
"The create subcommand creates a new key for the name specified\n" +
|
||||||
"by the <keyname> argument within the provider specified by the\n" +
|
"by the <keyname> argument within the provider specified by the\n" +
|
||||||
"--provider argument. You may specify a cipher with the --cipher\n" +
|
"-provider argument. You may specify a cipher with the -cipher\n" +
|
||||||
"argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
|
"argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
|
||||||
"The default keysize is 256. You may specify the requested key\n" +
|
"The default keysize is 256. You may specify the requested key\n" +
|
||||||
"length using the --size argument. Arbitrary attribute=value\n" +
|
"length using the -size argument. Arbitrary attribute=value\n" +
|
||||||
"style attributes may be specified using the --attr argument.\n" +
|
"style attributes may be specified using the -attr argument.\n" +
|
||||||
"--attr may be specified multiple times, once per attribute.\n";
|
"-attr may be specified multiple times, once per attribute.\n";
|
||||||
|
|
||||||
final String keyName;
|
final String keyName;
|
||||||
final Options options;
|
final Options options;
|
||||||
|
@ -463,13 +463,13 @@ public class KeyShell extends Configured implements Tool {
|
||||||
provider = getKeyProvider();
|
provider = getKeyProvider();
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no valid KeyProviders configured. No key\n" +
|
out.println("There are no valid KeyProviders configured. No key\n" +
|
||||||
" was created. You can use the --provider option to specify\n" +
|
" was created. You can use the -provider option to specify\n" +
|
||||||
" a provider to use.");
|
" a provider to use.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
if (keyName == null) {
|
if (keyName == null) {
|
||||||
out.println("Please provide a <keyname>. See the usage description" +
|
out.println("Please provide a <keyname>. See the usage description" +
|
||||||
" with --help.");
|
" with -help.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -653,7 +653,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
|
||||||
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
||||||
.equals(KeyProviderCryptoExtension.EEK),
|
.equals(KeyProviderCryptoExtension.EEK),
|
||||||
"encryptedKey version name must be '%s', is '%s'",
|
"encryptedKey version name must be '%s', is '%s'",
|
||||||
KeyProviderCryptoExtension.EK,
|
KeyProviderCryptoExtension.EEK,
|
||||||
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
|
||||||
);
|
);
|
||||||
checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
|
checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts;
|
||||||
import org.apache.hadoop.fs.Options.Rename;
|
import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.InvalidPathException;
|
import org.apache.hadoop.fs.InvalidPathException;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -803,6 +804,18 @@ public abstract class AbstractFileSystem {
|
||||||
throws AccessControlException, FileNotFoundException,
|
throws AccessControlException, FileNotFoundException,
|
||||||
UnresolvedLinkException, IOException;
|
UnresolvedLinkException, IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The specification of this method matches that of
|
||||||
|
* {@link FileContext#access(Path, FsAction)}
|
||||||
|
* except that an UnresolvedLinkException may be thrown if a symlink is
|
||||||
|
* encountered in the path.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
|
FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The specification of this method matches that of
|
* The specification of this method matches that of
|
||||||
* {@link FileContext#getFileLinkStatus(Path)}
|
* {@link FileContext#getFileLinkStatus(Path)}
|
||||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||||
import org.apache.hadoop.fs.Options.CreateOpts;
|
import org.apache.hadoop.fs.Options.CreateOpts;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
|
||||||
|
@ -1108,6 +1109,55 @@ public final class FileContext {
|
||||||
}.resolve(this, absF);
|
}.resolve(this, absF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the user can access a path. The mode specifies which access
|
||||||
|
* checks to perform. If the requested permissions are granted, then the
|
||||||
|
* method returns normally. If access is denied, then the method throws an
|
||||||
|
* {@link AccessControlException}.
|
||||||
|
* <p/>
|
||||||
|
* The default implementation of this method calls {@link #getFileStatus(Path)}
|
||||||
|
* and checks the returned permissions against the requested permissions.
|
||||||
|
* Note that the getFileStatus call will be subject to authorization checks.
|
||||||
|
* Typically, this requires search (execute) permissions on each directory in
|
||||||
|
* the path's prefix, but this is implementation-defined. Any file system
|
||||||
|
* that provides a richer authorization model (such as ACLs) may override the
|
||||||
|
* default implementation so that it checks against that model instead.
|
||||||
|
* <p>
|
||||||
|
* In general, applications should avoid using this method, due to the risk of
|
||||||
|
* time-of-check/time-of-use race conditions. The permissions on a file may
|
||||||
|
* change immediately after the access call returns. Most applications should
|
||||||
|
* prefer running specific file system actions as the desired user represented
|
||||||
|
* by a {@link UserGroupInformation}.
|
||||||
|
*
|
||||||
|
* @param path Path to check
|
||||||
|
* @param mode type of access to check
|
||||||
|
* @throws AccessControlException if access is denied
|
||||||
|
* @throws FileNotFoundException if the path does not exist
|
||||||
|
* @throws UnsupportedFileSystemException if file system for <code>path</code>
|
||||||
|
* is not supported
|
||||||
|
* @throws IOException see specific implementation
|
||||||
|
*
|
||||||
|
* Exceptions applicable to file systems accessed over RPC:
|
||||||
|
* @throws RpcClientException If an exception occurred in the RPC client
|
||||||
|
* @throws RpcServerException If an exception occurred in the RPC server
|
||||||
|
* @throws UnexpectedServerException If server implementation throws
|
||||||
|
* undeclared exception to RPC server
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
|
||||||
|
public void access(final Path path, final FsAction mode)
|
||||||
|
throws AccessControlException, FileNotFoundException,
|
||||||
|
UnsupportedFileSystemException, IOException {
|
||||||
|
final Path absPath = fixRelativePart(path);
|
||||||
|
new FSLinkResolver<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void next(AbstractFileSystem fs, Path p) throws IOException,
|
||||||
|
UnresolvedLinkException {
|
||||||
|
fs.access(p, mode);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}.resolve(this, absPath);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return a file status object that represents the path. If the path
|
* Return a file status object that represents the path. If the path
|
||||||
* refers to a symlink then the FileStatus of the symlink is returned.
|
* refers to a symlink then the FileStatus of the symlink is returned.
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.fs.Options.Rename;
|
import org.apache.hadoop.fs.Options.Rename;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable {
|
||||||
*/
|
*/
|
||||||
public abstract FileStatus getFileStatus(Path f) throws IOException;
|
public abstract FileStatus getFileStatus(Path f) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the user can access a path. The mode specifies which access
|
||||||
|
* checks to perform. If the requested permissions are granted, then the
|
||||||
|
* method returns normally. If access is denied, then the method throws an
|
||||||
|
* {@link AccessControlException}.
|
||||||
|
* <p/>
|
||||||
|
* The default implementation of this method calls {@link #getFileStatus(Path)}
|
||||||
|
* and checks the returned permissions against the requested permissions.
|
||||||
|
* Note that the getFileStatus call will be subject to authorization checks.
|
||||||
|
* Typically, this requires search (execute) permissions on each directory in
|
||||||
|
* the path's prefix, but this is implementation-defined. Any file system
|
||||||
|
* that provides a richer authorization model (such as ACLs) may override the
|
||||||
|
* default implementation so that it checks against that model instead.
|
||||||
|
* <p>
|
||||||
|
* In general, applications should avoid using this method, due to the risk of
|
||||||
|
* time-of-check/time-of-use race conditions. The permissions on a file may
|
||||||
|
* change immediately after the access call returns. Most applications should
|
||||||
|
* prefer running specific file system actions as the desired user represented
|
||||||
|
* by a {@link UserGroupInformation}.
|
||||||
|
*
|
||||||
|
* @param path Path to check
|
||||||
|
* @param mode type of access to check
|
||||||
|
* @throws AccessControlException if access is denied
|
||||||
|
* @throws FileNotFoundException if the path does not exist
|
||||||
|
* @throws IOException see specific implementation
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, IOException {
|
||||||
|
checkAccessPermissions(this.getFileStatus(path), mode);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method provides the default implementation of
|
||||||
|
* {@link #access(Path, FsAction)}.
|
||||||
|
*
|
||||||
|
* @param stat FileStatus to check
|
||||||
|
* @param mode type of access to check
|
||||||
|
* @throws IOException for any error
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
static void checkAccessPermissions(FileStatus stat, FsAction mode)
|
||||||
|
throws IOException {
|
||||||
|
FsPermission perm = stat.getPermission();
|
||||||
|
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||||
|
String user = ugi.getShortUserName();
|
||||||
|
List<String> groups = Arrays.asList(ugi.getGroupNames());
|
||||||
|
if (user.equals(stat.getOwner())) {
|
||||||
|
if (perm.getUserAction().implies(mode)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (groups.contains(stat.getGroup())) {
|
||||||
|
if (perm.getGroupAction().implies(mode)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (perm.getOtherAction().implies(mode)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new AccessControlException(String.format(
|
||||||
|
"Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
|
||||||
|
stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* See {@link FileContext#fixRelativePart}
|
* See {@link FileContext#fixRelativePart}
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem {
|
||||||
return fs.getFileStatus(f);
|
return fs.getFileStatus(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, IOException {
|
||||||
|
fs.access(path, mode);
|
||||||
|
}
|
||||||
|
|
||||||
public void createSymlink(final Path target, final Path link,
|
public void createSymlink(final Path target, final Path link,
|
||||||
final boolean createParent) throws AccessControlException,
|
final boolean createParent) throws AccessControlException,
|
||||||
FileAlreadyExistsException, FileNotFoundException,
|
FileAlreadyExistsException, FileNotFoundException,
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.FileSystem.Statistics;
|
import org.apache.hadoop.fs.FileSystem.Statistics;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -119,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem {
|
||||||
return myFs.getFileStatus(f);
|
return myFs.getFileStatus(f);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
|
checkPath(path);
|
||||||
|
myFs.access(path, mode);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FileStatus getFileLinkStatus(final Path f)
|
public FileStatus getFileLinkStatus(final Path f)
|
||||||
throws IOException, UnresolvedLinkException {
|
throws IOException, UnresolvedLinkException {
|
||||||
|
|
|
@ -118,7 +118,11 @@ class Delete {
|
||||||
} catch(FileNotFoundException fnfe) {
|
} catch(FileNotFoundException fnfe) {
|
||||||
throw fnfe;
|
throw fnfe;
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
throw new IOException(ioe.getMessage() + ". Consider using -skipTrash option", ioe);
|
String msg = ioe.getMessage();
|
||||||
|
if (ioe.getCause() != null) {
|
||||||
|
msg += ": " + ioe.getCause().getMessage();
|
||||||
|
}
|
||||||
|
throw new IOException(msg + ". Consider using -skipTrash option", ioe);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return success;
|
return success;
|
||||||
|
|
|
@ -41,7 +41,9 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -222,6 +224,12 @@ class ChRootedFileSystem extends FilterFileSystem {
|
||||||
return super.getFileStatus(fullPath(f));
|
return super.getFileStatus(fullPath(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, IOException {
|
||||||
|
super.access(fullPath(path), mode);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FsStatus getStatus(Path p) throws IOException {
|
public FsStatus getStatus(Path p) throws IOException {
|
||||||
return super.getStatus(fullPath(p));
|
return super.getStatus(fullPath(p));
|
||||||
|
|
|
@ -41,7 +41,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
|
|
||||||
|
@ -200,6 +202,11 @@ class ChRootedFs extends AbstractFileSystem {
|
||||||
return myFs.getFileStatus(fullPath(f));
|
return myFs.getFileStatus(fullPath(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
|
myFs.access(fullPath(path), mode);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FileStatus getFileLinkStatus(final Path f)
|
public FileStatus getFileLinkStatus(final Path f)
|
||||||
throws IOException, UnresolvedLinkException {
|
throws IOException, UnresolvedLinkException {
|
||||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.AclUtil;
|
import org.apache.hadoop.fs.permission.AclUtil;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
|
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
|
||||||
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
|
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
|
||||||
|
@ -359,6 +360,13 @@ public class ViewFileSystem extends FileSystem {
|
||||||
return new ViewFsFileStatus(status, this.makeQualified(f));
|
return new ViewFsFileStatus(status, this.makeQualified(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, IOException {
|
||||||
|
InodeTree.ResolveResult<FileSystem> res =
|
||||||
|
fsState.resolve(getUriPath(path), true);
|
||||||
|
res.targetFileSystem.access(res.remainingPath, mode);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FileStatus[] listStatus(final Path f) throws AccessControlException,
|
public FileStatus[] listStatus(final Path f) throws AccessControlException,
|
||||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.fs.local.LocalConfigKeys;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclUtil;
|
import org.apache.hadoop.fs.permission.AclUtil;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
|
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
|
||||||
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
|
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
|
||||||
|
@ -352,6 +353,14 @@ public class ViewFs extends AbstractFileSystem {
|
||||||
return new ViewFsFileStatus(status, this.makeQualified(f));
|
return new ViewFsFileStatus(status, this.makeQualified(f));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
|
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||||
|
fsState.resolve(getUriPath(path), true);
|
||||||
|
res.targetFileSystem.access(res.remainingPath, mode);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FileStatus getFileLinkStatus(final Path f)
|
public FileStatus getFileLinkStatus(final Path f)
|
||||||
throws AccessControlException, FileNotFoundException,
|
throws AccessControlException, FileNotFoundException,
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.fs.HardLink;
|
||||||
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
|
import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
|
||||||
import org.apache.hadoop.util.NativeCodeLoader;
|
import org.apache.hadoop.util.NativeCodeLoader;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
|
@ -823,6 +824,14 @@ public class NativeIO {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static void link(File src, File dst) throws IOException {
|
||||||
|
if (!nativeLoaded) {
|
||||||
|
HardLink.createHardLink(src, dst);
|
||||||
|
} else {
|
||||||
|
link0(src.getAbsolutePath(), dst.getAbsolutePath());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A version of renameTo that throws a descriptive exception when it fails.
|
* A version of renameTo that throws a descriptive exception when it fails.
|
||||||
*
|
*
|
||||||
|
@ -833,4 +842,7 @@ public class NativeIO {
|
||||||
*/
|
*/
|
||||||
private static native void renameTo0(String src, String dst)
|
private static native void renameTo0(String src, String dst)
|
||||||
throws NativeIOException;
|
throws NativeIOException;
|
||||||
|
|
||||||
|
private static native void link0(String src, String dst)
|
||||||
|
throws NativeIOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -77,7 +77,8 @@ public class SecurityUtil {
|
||||||
* For use only by tests and initialization
|
* For use only by tests and initialization
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
static void setTokenServiceUseIp(boolean flag) {
|
@VisibleForTesting
|
||||||
|
public static void setTokenServiceUseIp(boolean flag) {
|
||||||
useIpForTokenService = flag;
|
useIpForTokenService = flag;
|
||||||
hostResolver = !useIpForTokenService
|
hostResolver = !useIpForTokenService
|
||||||
? new QualifiedHostResolver()
|
? new QualifiedHostResolver()
|
||||||
|
|
|
@ -67,11 +67,11 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
if (command.validate()) {
|
if (command.validate()) {
|
||||||
command.execute();
|
command.execute();
|
||||||
} else {
|
} else {
|
||||||
exitCode = -1;
|
exitCode = 1;
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
e.printStackTrace(err);
|
e.printStackTrace(err);
|
||||||
return -1;
|
return 1;
|
||||||
}
|
}
|
||||||
return exitCode;
|
return exitCode;
|
||||||
}
|
}
|
||||||
|
@ -79,47 +79,54 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
/**
|
/**
|
||||||
* Parse the command line arguments and initialize the data
|
* Parse the command line arguments and initialize the data
|
||||||
* <pre>
|
* <pre>
|
||||||
* % hadoop alias create alias [--provider providerPath]
|
* % hadoop credential create alias [-provider providerPath]
|
||||||
* % hadoop alias list [-provider providerPath]
|
* % hadoop credential list [-provider providerPath]
|
||||||
* % hadoop alias delete alias [--provider providerPath] [-i]
|
* % hadoop credential delete alias [-provider providerPath] [-i]
|
||||||
* </pre>
|
* </pre>
|
||||||
* @param args
|
* @param args
|
||||||
* @return
|
* @return 0 if the argument(s) were recognized, 1 otherwise
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private int init(String[] args) throws IOException {
|
protected int init(String[] args) throws IOException {
|
||||||
|
// no args should print the help message
|
||||||
|
if (0 == args.length) {
|
||||||
|
printCredShellUsage();
|
||||||
|
ToolRunner.printGenericCommandUsage(System.err);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i < args.length; i++) { // parse command line
|
for (int i = 0; i < args.length; i++) { // parse command line
|
||||||
if (args[i].equals("create")) {
|
if (args[i].equals("create")) {
|
||||||
String alias = args[++i];
|
String alias = args[++i];
|
||||||
command = new CreateCommand(alias);
|
command = new CreateCommand(alias);
|
||||||
if (alias.equals("--help")) {
|
if (alias.equals("-help")) {
|
||||||
printCredShellUsage();
|
printCredShellUsage();
|
||||||
return -1;
|
return 0;
|
||||||
}
|
}
|
||||||
} else if (args[i].equals("delete")) {
|
} else if (args[i].equals("delete")) {
|
||||||
String alias = args[++i];
|
String alias = args[++i];
|
||||||
command = new DeleteCommand(alias);
|
command = new DeleteCommand(alias);
|
||||||
if (alias.equals("--help")) {
|
if (alias.equals("-help")) {
|
||||||
printCredShellUsage();
|
printCredShellUsage();
|
||||||
return -1;
|
return 0;
|
||||||
}
|
}
|
||||||
} else if (args[i].equals("list")) {
|
} else if (args[i].equals("list")) {
|
||||||
command = new ListCommand();
|
command = new ListCommand();
|
||||||
} else if (args[i].equals("--provider")) {
|
} else if (args[i].equals("-provider")) {
|
||||||
userSuppliedProvider = true;
|
userSuppliedProvider = true;
|
||||||
getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
|
getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
|
||||||
args[++i]);
|
args[++i]);
|
||||||
} else if (args[i].equals("-i") || (args[i].equals("--interactive"))) {
|
} else if (args[i].equals("-i") || (args[i].equals("-interactive"))) {
|
||||||
interactive = true;
|
interactive = true;
|
||||||
} else if (args[i].equals("-v") || (args[i].equals("--value"))) {
|
} else if (args[i].equals("-v") || (args[i].equals("-value"))) {
|
||||||
value = args[++i];
|
value = args[++i];
|
||||||
} else if (args[i].equals("--help")) {
|
} else if (args[i].equals("-help")) {
|
||||||
printCredShellUsage();
|
printCredShellUsage();
|
||||||
return -1;
|
return 0;
|
||||||
} else {
|
} else {
|
||||||
printCredShellUsage();
|
printCredShellUsage();
|
||||||
ToolRunner.printGenericCommandUsage(System.err);
|
ToolRunner.printGenericCommandUsage(System.err);
|
||||||
return -1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -188,20 +195,20 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private class ListCommand extends Command {
|
private class ListCommand extends Command {
|
||||||
public static final String USAGE = "list <alias> [--provider] [--help]";
|
public static final String USAGE = "list [-provider] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The list subcommand displays the aliases contained within \n" +
|
"The list subcommand displays the aliases contained within \n" +
|
||||||
"a particular provider - as configured in core-site.xml or " +
|
"a particular provider - as configured in core-site.xml or " +
|
||||||
"indicated\nthrough the --provider argument.";
|
"indicated\nthrough the -provider argument.";
|
||||||
|
|
||||||
public boolean validate() {
|
public boolean validate() {
|
||||||
boolean rc = true;
|
boolean rc = true;
|
||||||
provider = getCredentialProvider();
|
provider = getCredentialProvider();
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no non-transient CredentialProviders configured.\n"
|
out.println("There are no non-transient CredentialProviders configured.\n"
|
||||||
+ "Consider using the --provider option to indicate the provider\n"
|
+ "Consider using the -provider option to indicate the provider\n"
|
||||||
+ "to use. If you want to list a transient provider then you\n"
|
+ "to use. If you want to list a transient provider then you\n"
|
||||||
+ "you MUST use the --provider argument.");
|
+ "you MUST use the -provider argument.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -229,11 +236,11 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private class DeleteCommand extends Command {
|
private class DeleteCommand extends Command {
|
||||||
public static final String USAGE = "delete <alias> [--provider] [--help]";
|
public static final String USAGE = "delete <alias> [-provider] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The delete subcommand deletes the credenital\n" +
|
"The delete subcommand deletes the credenital\n" +
|
||||||
"specified as the <alias> argument from within the provider\n" +
|
"specified as the <alias> argument from within the provider\n" +
|
||||||
"indicated through the --provider argument";
|
"indicated through the -provider argument";
|
||||||
|
|
||||||
String alias = null;
|
String alias = null;
|
||||||
boolean cont = true;
|
boolean cont = true;
|
||||||
|
@ -248,13 +255,13 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no valid CredentialProviders configured.\n"
|
out.println("There are no valid CredentialProviders configured.\n"
|
||||||
+ "Nothing will be deleted.\n"
|
+ "Nothing will be deleted.\n"
|
||||||
+ "Consider using the --provider option to indicate the provider"
|
+ "Consider using the -provider option to indicate the provider"
|
||||||
+ " to use.");
|
+ " to use.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (alias == null) {
|
if (alias == null) {
|
||||||
out.println("There is no alias specified. Please provide the" +
|
out.println("There is no alias specified. Please provide the" +
|
||||||
"mandatory <alias>. See the usage description with --help.");
|
"mandatory <alias>. See the usage description with -help.");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (interactive) {
|
if (interactive) {
|
||||||
|
@ -299,11 +306,11 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private class CreateCommand extends Command {
|
private class CreateCommand extends Command {
|
||||||
public static final String USAGE = "create <alias> [--provider] [--help]";
|
public static final String USAGE = "create <alias> [-provider] [-help]";
|
||||||
public static final String DESC =
|
public static final String DESC =
|
||||||
"The create subcommand creates a new credential for the name specified\n" +
|
"The create subcommand creates a new credential for the name specified\n" +
|
||||||
"as the <alias> argument within the provider indicated through\n" +
|
"as the <alias> argument within the provider indicated through\n" +
|
||||||
"the --provider argument.";
|
"the -provider argument.";
|
||||||
|
|
||||||
String alias = null;
|
String alias = null;
|
||||||
|
|
||||||
|
@ -317,13 +324,13 @@ public class CredentialShell extends Configured implements Tool {
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
out.println("There are no valid CredentialProviders configured." +
|
out.println("There are no valid CredentialProviders configured." +
|
||||||
"\nCredential will not be created.\n"
|
"\nCredential will not be created.\n"
|
||||||
+ "Consider using the --provider option to indicate the provider" +
|
+ "Consider using the -provider option to indicate the provider" +
|
||||||
" to use.");
|
" to use.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
if (alias == null) {
|
if (alias == null) {
|
||||||
out.println("There is no alias specified. Please provide the" +
|
out.println("There is no alias specified. Please provide the" +
|
||||||
"mandatory <alias>. See the usage description with --help.");
|
"mandatory <alias>. See the usage description with -help.");
|
||||||
rc = false;
|
rc = false;
|
||||||
}
|
}
|
||||||
return rc;
|
return rc;
|
||||||
|
|
|
@ -0,0 +1,125 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.shell.CommandFormat;
|
||||||
|
import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Command-line utility for getting the full classpath needed to launch a Hadoop
|
||||||
|
* client application. If the hadoop script is called with "classpath" as the
|
||||||
|
* command, then it simply prints the classpath and exits immediately without
|
||||||
|
* launching a JVM. The output likely will include wildcards in the classpath.
|
||||||
|
* If there are arguments passed to the classpath command, then this class gets
|
||||||
|
* called. With the --glob argument, it prints the full classpath with wildcards
|
||||||
|
* expanded. This is useful in situations where wildcard syntax isn't usable.
|
||||||
|
* With the --jar argument, it writes the classpath as a manifest in a jar file.
|
||||||
|
* This is useful in environments with short limitations on the maximum command
|
||||||
|
* line length, where it may not be possible to specify the full classpath in a
|
||||||
|
* command. For example, the maximum command line length on Windows is 8191
|
||||||
|
* characters.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public final class Classpath {
|
||||||
|
private static final String usage =
|
||||||
|
"classpath [--glob|--jar <path>|-h|--help] :\n"
|
||||||
|
+ " Prints the classpath needed to get the Hadoop jar and the required\n"
|
||||||
|
+ " libraries.\n"
|
||||||
|
+ " Options:\n"
|
||||||
|
+ "\n"
|
||||||
|
+ " --glob expand wildcards\n"
|
||||||
|
+ " --jar <path> write classpath as manifest in jar named <path>\n"
|
||||||
|
+ " -h, --help print help\n";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Main entry point.
|
||||||
|
*
|
||||||
|
* @param args command-line arguments
|
||||||
|
*/
|
||||||
|
public static void main(String[] args) {
|
||||||
|
if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) {
|
||||||
|
System.out.println(usage);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy args, because CommandFormat mutates the list.
|
||||||
|
List<String> argsList = new ArrayList<String>(Arrays.asList(args));
|
||||||
|
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar");
|
||||||
|
try {
|
||||||
|
cf.parse(argsList);
|
||||||
|
} catch (UnknownOptionException e) {
|
||||||
|
terminate(1, "unrecognized option");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
String classPath = System.getProperty("java.class.path");
|
||||||
|
|
||||||
|
if (cf.getOpt("-glob")) {
|
||||||
|
// The classpath returned from the property has been globbed already.
|
||||||
|
System.out.println(classPath);
|
||||||
|
} else if (cf.getOpt("-jar")) {
|
||||||
|
if (argsList.isEmpty() || argsList.get(0) == null ||
|
||||||
|
argsList.get(0).isEmpty()) {
|
||||||
|
terminate(1, "-jar option requires path of jar file to write");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the classpath into the manifest of a temporary jar file.
|
||||||
|
Path workingDir = new Path(System.getProperty("user.dir"));
|
||||||
|
final String tmpJarPath;
|
||||||
|
try {
|
||||||
|
tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir,
|
||||||
|
System.getenv());
|
||||||
|
} catch (IOException e) {
|
||||||
|
terminate(1, "I/O error creating jar: " + e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rename the temporary file to its final location.
|
||||||
|
String jarPath = argsList.get(0);
|
||||||
|
try {
|
||||||
|
FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
|
||||||
|
} catch (IOException e) {
|
||||||
|
terminate(1, "I/O error renaming jar temporary file to path: " +
|
||||||
|
e.getMessage());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prints a message to stderr and exits with a status code.
|
||||||
|
*
|
||||||
|
* @param status exit code
|
||||||
|
* @param msg message
|
||||||
|
*/
|
||||||
|
private static void terminate(int status, String msg) {
|
||||||
|
System.err.println(msg);
|
||||||
|
ExitUtil.terminate(status, msg);
|
||||||
|
}
|
||||||
|
}
|
|
@ -79,6 +79,20 @@ public class DiskChecker {
|
||||||
(canonDir.mkdir() || canonDir.exists()));
|
(canonDir.mkdir() || canonDir.exists()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Recurse down a directory tree, checking all child directories.
|
||||||
|
* @param dir
|
||||||
|
* @throws DiskErrorException
|
||||||
|
*/
|
||||||
|
public static void checkDirs(File dir) throws DiskErrorException {
|
||||||
|
checkDir(dir);
|
||||||
|
for (File child : dir.listFiles()) {
|
||||||
|
if (child.isDirectory()) {
|
||||||
|
checkDirs(child);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create the directory if it doesn't exist and check that dir is readable,
|
* Create the directory if it doesn't exist and check that dir is readable,
|
||||||
* writable and executable
|
* writable and executable
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -377,6 +378,19 @@ public class StringUtils {
|
||||||
return str.trim().split("\\s*,\\s*");
|
return str.trim().split("\\s*,\\s*");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Trims all the strings in a Collection<String> and returns a Set<String>.
|
||||||
|
* @param strings
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public static Set<String> getTrimmedStrings(Collection<String> strings) {
|
||||||
|
Set<String> trimmedStrings = new HashSet<String>();
|
||||||
|
for (String string: strings) {
|
||||||
|
trimmedStrings.add(string.trim());
|
||||||
|
}
|
||||||
|
return trimmedStrings;
|
||||||
|
}
|
||||||
|
|
||||||
final public static String[] emptyStringArray = {};
|
final public static String[] emptyStringArray = {};
|
||||||
final public static char COMMA = ',';
|
final public static char COMMA = ',';
|
||||||
final public static String COMMA_STR = ",";
|
final public static String COMMA_STR = ",";
|
||||||
|
|
|
@ -1054,6 +1054,43 @@ done:
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
JNIEXPORT void JNICALL
|
||||||
|
Java_org_apache_hadoop_io_nativeio_NativeIO_link0(JNIEnv *env,
|
||||||
|
jclass clazz, jstring jsrc, jstring jdst)
|
||||||
|
{
|
||||||
|
#ifdef UNIX
|
||||||
|
const char *src = NULL, *dst = NULL;
|
||||||
|
|
||||||
|
src = (*env)->GetStringUTFChars(env, jsrc, NULL);
|
||||||
|
if (!src) goto done; // exception was thrown
|
||||||
|
dst = (*env)->GetStringUTFChars(env, jdst, NULL);
|
||||||
|
if (!dst) goto done; // exception was thrown
|
||||||
|
if (link(src, dst)) {
|
||||||
|
throw_ioe(env, errno);
|
||||||
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
|
if (src) (*env)->ReleaseStringUTFChars(env, jsrc, src);
|
||||||
|
if (dst) (*env)->ReleaseStringUTFChars(env, jdst, dst);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef WINDOWS
|
||||||
|
LPCTSTR src = NULL, dst = NULL;
|
||||||
|
|
||||||
|
src = (LPCTSTR) (*env)->GetStringChars(env, jsrc, NULL);
|
||||||
|
if (!src) goto done; // exception was thrown
|
||||||
|
dst = (LPCTSTR) (*env)->GetStringChars(env, jdst, NULL);
|
||||||
|
if (!dst) goto done; // exception was thrown
|
||||||
|
if (!CreateHardLink(dst, src, NULL)) {
|
||||||
|
throw_ioe(env, GetLastError());
|
||||||
|
}
|
||||||
|
|
||||||
|
done:
|
||||||
|
if (src) (*env)->ReleaseStringChars(env, jsrc, src);
|
||||||
|
if (dst) (*env)->ReleaseStringChars(env, jdst, dst);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
JNIEXPORT jlong JNICALL
|
JNIEXPORT jlong JNICALL
|
||||||
Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0(
|
Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0(
|
||||||
JNIEnv *env, jclass clazz)
|
JNIEnv *env, jclass clazz)
|
||||||
|
|
|
@ -296,9 +296,24 @@ User Commands
|
||||||
* <<<classpath>>>
|
* <<<classpath>>>
|
||||||
|
|
||||||
Prints the class path needed to get the Hadoop jar and the required
|
Prints the class path needed to get the Hadoop jar and the required
|
||||||
libraries.
|
libraries. If called without arguments, then prints the classpath set up by
|
||||||
|
the command scripts, which is likely to contain wildcards in the classpath
|
||||||
|
entries. Additional options print the classpath after wildcard expansion or
|
||||||
|
write the classpath into the manifest of a jar file. The latter is useful in
|
||||||
|
environments where wildcards cannot be used and the expanded classpath exceeds
|
||||||
|
the maximum supported command line length.
|
||||||
|
|
||||||
Usage: <<<hadoop classpath>>>
|
Usage: <<<hadoop classpath [--glob|--jar <path>|-h|--help]>>>
|
||||||
|
|
||||||
|
*-----------------+-----------------------------------------------------------+
|
||||||
|
|| COMMAND_OPTION || Description
|
||||||
|
*-----------------+-----------------------------------------------------------+
|
||||||
|
| --glob | expand wildcards
|
||||||
|
*-----------------+-----------------------------------------------------------+
|
||||||
|
| --jar <path> | write classpath as manifest in jar named <path>
|
||||||
|
*-----------------+-----------------------------------------------------------+
|
||||||
|
| -h, --help | print help
|
||||||
|
*-----------------+-----------------------------------------------------------+
|
||||||
|
|
||||||
Administration Commands
|
Administration Commands
|
||||||
|
|
||||||
|
|
|
@ -26,10 +26,10 @@ import javax.crypto.spec.IvParameterSpec;
|
||||||
import javax.crypto.spec.SecretKeySpec;
|
import javax.crypto.spec.SecretKeySpec;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
||||||
import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
|
import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
@ -118,8 +118,15 @@ public class TestKeyProviderCryptoExtension {
|
||||||
new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion
|
new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion
|
||||||
.deriveIV(encryptedKeyIv)));
|
.deriveIV(encryptedKeyIv)));
|
||||||
final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial);
|
final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial);
|
||||||
|
|
||||||
|
// Test the createForDecryption factory method
|
||||||
|
EncryptedKeyVersion eek2 =
|
||||||
|
EncryptedKeyVersion.createForDecryption(
|
||||||
|
eek.getEncryptionKeyVersionName(), eek.getEncryptedKeyIv(),
|
||||||
|
eek.getEncryptedKeyVersion().getMaterial());
|
||||||
|
|
||||||
// Decrypt it with the API
|
// Decrypt it with the API
|
||||||
KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek);
|
KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek2);
|
||||||
final byte[] apiMaterial = decryptedKey.getMaterial();
|
final byte[] apiMaterial = decryptedKey.getMaterial();
|
||||||
|
|
||||||
assertArrayEquals("Wrong key material from decryptEncryptedKey",
|
assertArrayEquals("Wrong key material from decryptEncryptedKey",
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class TestKeyShell {
|
||||||
private void deleteKey(KeyShell ks, String keyName) throws Exception {
|
private void deleteKey(KeyShell ks, String keyName) throws Exception {
|
||||||
int rc;
|
int rc;
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] delArgs = {"delete", keyName, "--provider", jceksProvider};
|
final String[] delArgs = {"delete", keyName, "-provider", jceksProvider};
|
||||||
rc = ks.run(delArgs);
|
rc = ks.run(delArgs);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
assertTrue(outContent.toString().contains(keyName + " has been " +
|
assertTrue(outContent.toString().contains(keyName + " has been " +
|
||||||
|
@ -90,8 +90,8 @@ public class TestKeyShell {
|
||||||
private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception {
|
private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception {
|
||||||
int rc;
|
int rc;
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] listArgs = {"list", "--provider", jceksProvider };
|
final String[] listArgs = {"list", "-provider", jceksProvider };
|
||||||
final String[] listArgsM = {"list", "--metadata", "--provider", jceksProvider };
|
final String[] listArgsM = {"list", "-metadata", "-provider", jceksProvider };
|
||||||
rc = ks.run(wantMetadata ? listArgsM : listArgs);
|
rc = ks.run(wantMetadata ? listArgsM : listArgs);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
return outContent.toString();
|
return outContent.toString();
|
||||||
|
@ -106,7 +106,7 @@ public class TestKeyShell {
|
||||||
ks.setConf(new Configuration());
|
ks.setConf(new Configuration());
|
||||||
|
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] args1 = {"create", keyName, "--provider", jceksProvider};
|
final String[] args1 = {"create", keyName, "-provider", jceksProvider};
|
||||||
rc = ks.run(args1);
|
rc = ks.run(args1);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
assertTrue(outContent.toString().contains(keyName + " has been " +
|
assertTrue(outContent.toString().contains(keyName + " has been " +
|
||||||
|
@ -121,7 +121,7 @@ public class TestKeyShell {
|
||||||
assertTrue(listOut.contains("created"));
|
assertTrue(listOut.contains("created"));
|
||||||
|
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] args2 = {"roll", keyName, "--provider", jceksProvider};
|
final String[] args2 = {"roll", keyName, "-provider", jceksProvider};
|
||||||
rc = ks.run(args2);
|
rc = ks.run(args2);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
assertTrue(outContent.toString().contains("key1 has been successfully " +
|
||||||
|
@ -137,8 +137,8 @@ public class TestKeyShell {
|
||||||
@Test
|
@Test
|
||||||
public void testKeySuccessfulCreationWithDescription() throws Exception {
|
public void testKeySuccessfulCreationWithDescription() throws Exception {
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] args1 = {"create", "key1", "--provider", jceksProvider,
|
final String[] args1 = {"create", "key1", "-provider", jceksProvider,
|
||||||
"--description", "someDescription"};
|
"-description", "someDescription"};
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
KeyShell ks = new KeyShell();
|
KeyShell ks = new KeyShell();
|
||||||
ks.setConf(new Configuration());
|
ks.setConf(new Configuration());
|
||||||
|
@ -154,7 +154,7 @@ public class TestKeyShell {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidKeySize() throws Exception {
|
public void testInvalidKeySize() throws Exception {
|
||||||
final String[] args1 = {"create", "key1", "--size", "56", "--provider",
|
final String[] args1 = {"create", "key1", "-size", "56", "-provider",
|
||||||
jceksProvider};
|
jceksProvider};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
@ -167,7 +167,7 @@ public class TestKeyShell {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidCipher() throws Exception {
|
public void testInvalidCipher() throws Exception {
|
||||||
final String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider",
|
final String[] args1 = {"create", "key1", "-cipher", "LJM", "-provider",
|
||||||
jceksProvider};
|
jceksProvider};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
@ -180,7 +180,7 @@ public class TestKeyShell {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidProvider() throws Exception {
|
public void testInvalidProvider() throws Exception {
|
||||||
final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
|
final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider",
|
||||||
"sdff://file/tmp/keystore.jceks"};
|
"sdff://file/tmp/keystore.jceks"};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
@ -194,7 +194,7 @@ public class TestKeyShell {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTransientProviderWarning() throws Exception {
|
public void testTransientProviderWarning() throws Exception {
|
||||||
final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
|
final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider",
|
||||||
"user:///"};
|
"user:///"};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
@ -224,8 +224,8 @@ public class TestKeyShell {
|
||||||
@Test
|
@Test
|
||||||
public void testFullCipher() throws Exception {
|
public void testFullCipher() throws Exception {
|
||||||
final String keyName = "key1";
|
final String keyName = "key1";
|
||||||
final String[] args1 = {"create", keyName, "--cipher", "AES/CBC/pkcs5Padding",
|
final String[] args1 = {"create", keyName, "-cipher", "AES/CBC/pkcs5Padding",
|
||||||
"--provider", jceksProvider};
|
"-provider", jceksProvider};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
KeyShell ks = new KeyShell();
|
KeyShell ks = new KeyShell();
|
||||||
|
@ -245,8 +245,8 @@ public class TestKeyShell {
|
||||||
ks.setConf(new Configuration());
|
ks.setConf(new Configuration());
|
||||||
|
|
||||||
/* Simple creation test */
|
/* Simple creation test */
|
||||||
final String[] args1 = {"create", "keyattr1", "--provider", jceksProvider,
|
final String[] args1 = {"create", "keyattr1", "-provider", jceksProvider,
|
||||||
"--attr", "foo=bar"};
|
"-attr", "foo=bar"};
|
||||||
rc = ks.run(args1);
|
rc = ks.run(args1);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
assertTrue(outContent.toString().contains("keyattr1 has been " +
|
assertTrue(outContent.toString().contains("keyattr1 has been " +
|
||||||
|
@ -259,8 +259,8 @@ public class TestKeyShell {
|
||||||
|
|
||||||
/* Negative tests: no attribute */
|
/* Negative tests: no attribute */
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
|
final String[] args2 = {"create", "keyattr2", "-provider", jceksProvider,
|
||||||
"--attr", "=bar"};
|
"-attr", "=bar"};
|
||||||
rc = ks.run(args2);
|
rc = ks.run(args2);
|
||||||
assertEquals(1, rc);
|
assertEquals(1, rc);
|
||||||
|
|
||||||
|
@ -288,10 +288,10 @@ public class TestKeyShell {
|
||||||
|
|
||||||
/* Test several attrs together... */
|
/* Test several attrs together... */
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] args3 = {"create", "keyattr3", "--provider", jceksProvider,
|
final String[] args3 = {"create", "keyattr3", "-provider", jceksProvider,
|
||||||
"--attr", "foo = bar",
|
"-attr", "foo = bar",
|
||||||
"--attr", " glarch =baz ",
|
"-attr", " glarch =baz ",
|
||||||
"--attr", "abc=def"};
|
"-attr", "abc=def"};
|
||||||
rc = ks.run(args3);
|
rc = ks.run(args3);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
|
|
||||||
|
@ -304,9 +304,9 @@ public class TestKeyShell {
|
||||||
|
|
||||||
/* Negative test - repeated attributes should fail */
|
/* Negative test - repeated attributes should fail */
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
final String[] args4 = {"create", "keyattr4", "--provider", jceksProvider,
|
final String[] args4 = {"create", "keyattr4", "-provider", jceksProvider,
|
||||||
"--attr", "foo=bar",
|
"-attr", "foo=bar",
|
||||||
"--attr", "foo=glarch"};
|
"-attr", "foo=glarch"};
|
||||||
rc = ks.run(args4);
|
rc = ks.run(args4);
|
||||||
assertEquals(1, rc);
|
assertEquals(1, rc);
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -201,6 +202,8 @@ public class TestHarFileSystem {
|
||||||
public void removeXAttr(Path path, String name) throws IOException;
|
public void removeXAttr(Path path, String name) throws IOException;
|
||||||
|
|
||||||
public AclStatus getAclStatus(Path path) throws IOException;
|
public AclStatus getAclStatus(Path path) throws IOException;
|
||||||
|
|
||||||
|
public void access(Path path, FsAction mode) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -17,16 +17,18 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.security.alias;
|
package org.apache.hadoop.security.alias;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertFalse;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.ByteArrayOutputStream;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.security.alias.CredentialShell.PasswordReader;
|
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -45,7 +47,7 @@ public class TestCredShell {
|
||||||
@Test
|
@Test
|
||||||
public void testCredentialSuccessfulLifecycle() throws Exception {
|
public void testCredentialSuccessfulLifecycle() throws Exception {
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider",
|
String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
CredentialShell cs = new CredentialShell();
|
CredentialShell cs = new CredentialShell();
|
||||||
|
@ -56,14 +58,14 @@ public class TestCredShell {
|
||||||
"created."));
|
"created."));
|
||||||
|
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
String[] args2 = {"list", "--provider",
|
String[] args2 = {"list", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
rc = cs.run(args2);
|
rc = cs.run(args2);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
assertTrue(outContent.toString().contains("credential1"));
|
assertTrue(outContent.toString().contains("credential1"));
|
||||||
|
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
String[] args4 = {"delete", "credential1", "--provider",
|
String[] args4 = {"delete", "credential1", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
rc = cs.run(args4);
|
rc = cs.run(args4);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
|
@ -71,7 +73,7 @@ public class TestCredShell {
|
||||||
"deleted."));
|
"deleted."));
|
||||||
|
|
||||||
outContent.reset();
|
outContent.reset();
|
||||||
String[] args5 = {"list", "--provider",
|
String[] args5 = {"list", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
rc = cs.run(args5);
|
rc = cs.run(args5);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
|
@ -80,21 +82,21 @@ public class TestCredShell {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidProvider() throws Exception {
|
public void testInvalidProvider() throws Exception {
|
||||||
String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider",
|
String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
|
||||||
"sdff://file/tmp/credstore.jceks"};
|
"sdff://file/tmp/credstore.jceks"};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
CredentialShell cs = new CredentialShell();
|
CredentialShell cs = new CredentialShell();
|
||||||
cs.setConf(new Configuration());
|
cs.setConf(new Configuration());
|
||||||
rc = cs.run(args1);
|
rc = cs.run(args1);
|
||||||
assertEquals(-1, rc);
|
assertEquals(1, rc);
|
||||||
assertTrue(outContent.toString().contains("There are no valid " +
|
assertTrue(outContent.toString().contains("There are no valid " +
|
||||||
"CredentialProviders configured."));
|
"CredentialProviders configured."));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTransientProviderWarning() throws Exception {
|
public void testTransientProviderWarning() throws Exception {
|
||||||
String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider",
|
String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
|
||||||
"user:///"};
|
"user:///"};
|
||||||
|
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
@ -105,7 +107,7 @@ public class TestCredShell {
|
||||||
assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
|
assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
|
||||||
"transient provider."));
|
"transient provider."));
|
||||||
|
|
||||||
String[] args2 = {"delete", "credential1", "--provider", "user:///"};
|
String[] args2 = {"delete", "credential1", "-provider", "user:///"};
|
||||||
rc = cs.run(args2);
|
rc = cs.run(args2);
|
||||||
assertEquals(outContent.toString(), 0, rc);
|
assertEquals(outContent.toString(), 0, rc);
|
||||||
assertTrue(outContent.toString().contains("credential1 has been successfully " +
|
assertTrue(outContent.toString().contains("credential1 has been successfully " +
|
||||||
|
@ -122,14 +124,14 @@ public class TestCredShell {
|
||||||
config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "user:///");
|
config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "user:///");
|
||||||
cs.setConf(config);
|
cs.setConf(config);
|
||||||
rc = cs.run(args1);
|
rc = cs.run(args1);
|
||||||
assertEquals(-1, rc);
|
assertEquals(1, rc);
|
||||||
assertTrue(outContent.toString().contains("There are no valid " +
|
assertTrue(outContent.toString().contains("There are no valid " +
|
||||||
"CredentialProviders configured."));
|
"CredentialProviders configured."));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPromptForCredentialWithEmptyPasswd() throws Exception {
|
public void testPromptForCredentialWithEmptyPasswd() throws Exception {
|
||||||
String[] args1 = {"create", "credential1", "--provider",
|
String[] args1 = {"create", "credential1", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
ArrayList<String> passwords = new ArrayList<String>();
|
ArrayList<String> passwords = new ArrayList<String>();
|
||||||
passwords.add(null);
|
passwords.add(null);
|
||||||
|
@ -139,13 +141,13 @@ public class TestCredShell {
|
||||||
shell.setConf(new Configuration());
|
shell.setConf(new Configuration());
|
||||||
shell.setPasswordReader(new MockPasswordReader(passwords));
|
shell.setPasswordReader(new MockPasswordReader(passwords));
|
||||||
rc = shell.run(args1);
|
rc = shell.run(args1);
|
||||||
assertEquals(outContent.toString(), -1, rc);
|
assertEquals(outContent.toString(), 1, rc);
|
||||||
assertTrue(outContent.toString().contains("Passwords don't match"));
|
assertTrue(outContent.toString().contains("Passwords don't match"));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPromptForCredential() throws Exception {
|
public void testPromptForCredential() throws Exception {
|
||||||
String[] args1 = {"create", "credential1", "--provider",
|
String[] args1 = {"create", "credential1", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
ArrayList<String> passwords = new ArrayList<String>();
|
ArrayList<String> passwords = new ArrayList<String>();
|
||||||
passwords.add("p@ssw0rd");
|
passwords.add("p@ssw0rd");
|
||||||
|
@ -159,7 +161,7 @@ public class TestCredShell {
|
||||||
assertTrue(outContent.toString().contains("credential1 has been successfully " +
|
assertTrue(outContent.toString().contains("credential1 has been successfully " +
|
||||||
"created."));
|
"created."));
|
||||||
|
|
||||||
String[] args2 = {"delete", "credential1", "--provider",
|
String[] args2 = {"delete", "credential1", "-provider",
|
||||||
"jceks://file" + tmpDir + "/credstore.jceks"};
|
"jceks://file" + tmpDir + "/credstore.jceks"};
|
||||||
rc = shell.run(args2);
|
rc = shell.run(args2);
|
||||||
assertEquals(0, rc);
|
assertEquals(0, rc);
|
||||||
|
@ -186,4 +188,21 @@ public class TestCredShell {
|
||||||
System.out.println(message);
|
System.out.println(message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testEmptyArgList() throws Exception {
|
||||||
|
CredentialShell shell = new CredentialShell();
|
||||||
|
shell.setConf(new Configuration());
|
||||||
|
assertEquals(1, shell.init(new String[0]));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCommandHelpExitsNormally() throws Exception {
|
||||||
|
for (String cmd : Arrays.asList("create", "list", "delete")) {
|
||||||
|
CredentialShell shell = new CredentialShell();
|
||||||
|
shell.setConf(new Configuration());
|
||||||
|
assertEquals("Expected help argument on " + cmd + " to return 0",
|
||||||
|
0, shell.init(new String[] {cmd, "-help"}));
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,176 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.PrintStream;
|
||||||
|
import java.nio.charset.Charset;
|
||||||
|
import java.util.jar.Attributes;
|
||||||
|
import java.util.jar.JarFile;
|
||||||
|
import java.util.jar.Manifest;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests covering the classpath command-line utility.
|
||||||
|
*/
|
||||||
|
public class TestClasspath {
|
||||||
|
|
||||||
|
private static final Log LOG = LogFactory.getLog(TestClasspath.class);
|
||||||
|
private static final File TEST_DIR = new File(
|
||||||
|
System.getProperty("test.build.data", "/tmp"), "TestClasspath");
|
||||||
|
private static final Charset UTF8 = Charset.forName("UTF-8");
|
||||||
|
|
||||||
|
static {
|
||||||
|
ExitUtil.disableSystemExit();
|
||||||
|
}
|
||||||
|
|
||||||
|
private PrintStream oldStdout, oldStderr;
|
||||||
|
private ByteArrayOutputStream stdout, stderr;
|
||||||
|
private PrintStream printStdout, printStderr;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
assertTrue(FileUtil.fullyDelete(TEST_DIR));
|
||||||
|
assertTrue(TEST_DIR.mkdirs());
|
||||||
|
oldStdout = System.out;
|
||||||
|
oldStderr = System.err;
|
||||||
|
|
||||||
|
stdout = new ByteArrayOutputStream();
|
||||||
|
printStdout = new PrintStream(stdout);
|
||||||
|
System.setOut(printStdout);
|
||||||
|
|
||||||
|
stderr = new ByteArrayOutputStream();
|
||||||
|
printStderr = new PrintStream(stderr);
|
||||||
|
System.setErr(printStderr);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() {
|
||||||
|
System.setOut(oldStdout);
|
||||||
|
System.setErr(oldStderr);
|
||||||
|
IOUtils.cleanup(LOG, printStdout, printStderr);
|
||||||
|
assertTrue(FileUtil.fullyDelete(TEST_DIR));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGlob() {
|
||||||
|
Classpath.main(new String[] { "--glob" });
|
||||||
|
String strOut = new String(stdout.toByteArray(), UTF8);
|
||||||
|
assertEquals(System.getProperty("java.class.path"), strOut.trim());
|
||||||
|
assertTrue(stderr.toByteArray().length == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testJar() throws IOException {
|
||||||
|
File file = new File(TEST_DIR, "classpath.jar");
|
||||||
|
Classpath.main(new String[] { "--jar", file.getAbsolutePath() });
|
||||||
|
assertTrue(stdout.toByteArray().length == 0);
|
||||||
|
assertTrue(stderr.toByteArray().length == 0);
|
||||||
|
assertTrue(file.exists());
|
||||||
|
assertJar(file);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testJarReplace() throws IOException {
|
||||||
|
// Run the command twice with the same output jar file, and expect success.
|
||||||
|
testJar();
|
||||||
|
testJar();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testJarFileMissing() throws IOException {
|
||||||
|
try {
|
||||||
|
Classpath.main(new String[] { "--jar" });
|
||||||
|
fail("expected exit");
|
||||||
|
} catch (ExitUtil.ExitException e) {
|
||||||
|
assertTrue(stdout.toByteArray().length == 0);
|
||||||
|
String strErr = new String(stderr.toByteArray(), UTF8);
|
||||||
|
assertTrue(strErr.contains("requires path of jar"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testHelp() {
|
||||||
|
Classpath.main(new String[] { "--help" });
|
||||||
|
String strOut = new String(stdout.toByteArray(), UTF8);
|
||||||
|
assertTrue(strOut.contains("Prints the classpath"));
|
||||||
|
assertTrue(stderr.toByteArray().length == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testHelpShort() {
|
||||||
|
Classpath.main(new String[] { "-h" });
|
||||||
|
String strOut = new String(stdout.toByteArray(), UTF8);
|
||||||
|
assertTrue(strOut.contains("Prints the classpath"));
|
||||||
|
assertTrue(stderr.toByteArray().length == 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testUnrecognized() {
|
||||||
|
try {
|
||||||
|
Classpath.main(new String[] { "--notarealoption" });
|
||||||
|
fail("expected exit");
|
||||||
|
} catch (ExitUtil.ExitException e) {
|
||||||
|
assertTrue(stdout.toByteArray().length == 0);
|
||||||
|
String strErr = new String(stderr.toByteArray(), UTF8);
|
||||||
|
assertTrue(strErr.contains("unrecognized option"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Asserts that the specified file is a jar file with a manifest containing a
|
||||||
|
* non-empty classpath attribute.
|
||||||
|
*
|
||||||
|
* @param file File to check
|
||||||
|
* @throws IOException if there is an I/O error
|
||||||
|
*/
|
||||||
|
private static void assertJar(File file) throws IOException {
|
||||||
|
JarFile jarFile = null;
|
||||||
|
try {
|
||||||
|
jarFile = new JarFile(file);
|
||||||
|
Manifest manifest = jarFile.getManifest();
|
||||||
|
assertNotNull(manifest);
|
||||||
|
Attributes mainAttributes = manifest.getMainAttributes();
|
||||||
|
assertNotNull(mainAttributes);
|
||||||
|
assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
|
||||||
|
String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
|
||||||
|
assertNotNull(classPathAttr);
|
||||||
|
assertFalse(classPathAttr.isEmpty());
|
||||||
|
} finally {
|
||||||
|
// It's too bad JarFile doesn't implement Closeable.
|
||||||
|
if (jarFile != null) {
|
||||||
|
try {
|
||||||
|
jarFile.close();
|
||||||
|
} catch (IOException e) {
|
||||||
|
LOG.warn("exception closing jarFile: " + jarFile, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
|
||||||
import org.apache.commons.codec.binary.Base64;
|
import org.apache.commons.codec.binary.Base64;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||||
|
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
|
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
||||||
|
@ -27,7 +28,6 @@ import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
|
||||||
|
|
||||||
import javax.ws.rs.Consumes;
|
import javax.ws.rs.Consumes;
|
||||||
import javax.ws.rs.DELETE;
|
import javax.ws.rs.DELETE;
|
||||||
|
@ -59,22 +59,25 @@ import java.util.Map;
|
||||||
@Path(KMSRESTConstants.SERVICE_VERSION)
|
@Path(KMSRESTConstants.SERVICE_VERSION)
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class KMS {
|
public class KMS {
|
||||||
private static final String CREATE_KEY = "CREATE_KEY";
|
public static final String CREATE_KEY = "CREATE_KEY";
|
||||||
private static final String DELETE_KEY = "DELETE_KEY";
|
public static final String DELETE_KEY = "DELETE_KEY";
|
||||||
private static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
|
public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
|
||||||
private static final String GET_KEYS = "GET_KEYS";
|
public static final String GET_KEYS = "GET_KEYS";
|
||||||
private static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
|
public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
|
||||||
private static final String GET_KEY_VERSION = "GET_KEY_VERSION";
|
public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
|
||||||
private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
|
public static final String GET_METADATA = "GET_METADATA";
|
||||||
private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
|
|
||||||
private static final String GET_METADATA = "GET_METADATA";
|
public static final String GET_KEY_VERSION = "GET_KEY_VERSION";
|
||||||
private static final String GENERATE_EEK = "GENERATE_EEK";
|
public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
|
||||||
private static final String DECRYPT_EEK = "DECRYPT_EEK";
|
public static final String GENERATE_EEK = "GENERATE_EEK";
|
||||||
|
public static final String DECRYPT_EEK = "DECRYPT_EEK";
|
||||||
|
|
||||||
private KeyProviderCryptoExtension provider;
|
private KeyProviderCryptoExtension provider;
|
||||||
|
private KMSAudit kmsAudit;
|
||||||
|
|
||||||
public KMS() throws Exception {
|
public KMS() throws Exception {
|
||||||
provider = KMSWebApp.getKeyProvider();
|
provider = KMSWebApp.getKeyProvider();
|
||||||
|
kmsAudit= KMSWebApp.getKMSAudit();
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Principal getPrincipal(SecurityContext securityContext)
|
private static Principal getPrincipal(SecurityContext securityContext)
|
||||||
|
@ -86,13 +89,26 @@ public class KMS {
|
||||||
return user;
|
return user;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void assertAccess(KMSACLs.Type aclType, Principal principal,
|
|
||||||
|
private static final String UNAUTHORIZED_MSG_WITH_KEY =
|
||||||
|
"User:{0} not allowed to do ''{1}'' on ''{2}''";
|
||||||
|
|
||||||
|
private static final String UNAUTHORIZED_MSG_WITHOUT_KEY =
|
||||||
|
"User:{0} not allowed to do ''{1}''";
|
||||||
|
|
||||||
|
private void assertAccess(KMSACLs.Type aclType, Principal principal,
|
||||||
|
String operation) throws AccessControlException {
|
||||||
|
assertAccess(aclType, principal, operation, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void assertAccess(KMSACLs.Type aclType, Principal principal,
|
||||||
String operation, String key) throws AccessControlException {
|
String operation, String key) throws AccessControlException {
|
||||||
if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
|
if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
|
||||||
KMSWebApp.getUnauthorizedCallsMeter().mark();
|
KMSWebApp.getUnauthorizedCallsMeter().mark();
|
||||||
KMSAudit.unauthorized(principal, operation, key);
|
kmsAudit.unauthorized(principal, operation, key);
|
||||||
throw new AuthorizationException(MessageFormat.format(
|
throw new AuthorizationException(MessageFormat.format(
|
||||||
"User:{0} not allowed to do ''{1}'' on ''{2}''",
|
(key != null) ? UNAUTHORIZED_MSG_WITH_KEY
|
||||||
|
: UNAUTHORIZED_MSG_WITHOUT_KEY,
|
||||||
principal.getName(), operation, key));
|
principal.getName(), operation, key));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -149,7 +165,7 @@ public class KMS {
|
||||||
|
|
||||||
provider.flush();
|
provider.flush();
|
||||||
|
|
||||||
KMSAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
|
kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
|
||||||
(material != null) + " Description:" + description);
|
(material != null) + " Description:" + description);
|
||||||
|
|
||||||
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
|
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
|
||||||
|
@ -175,7 +191,7 @@ public class KMS {
|
||||||
provider.deleteKey(name);
|
provider.deleteKey(name);
|
||||||
provider.flush();
|
provider.flush();
|
||||||
|
|
||||||
KMSAudit.ok(user, DELETE_KEY, name, "");
|
kmsAudit.ok(user, DELETE_KEY, name, "");
|
||||||
|
|
||||||
return Response.ok().build();
|
return Response.ok().build();
|
||||||
}
|
}
|
||||||
|
@ -203,7 +219,7 @@ public class KMS {
|
||||||
|
|
||||||
provider.flush();
|
provider.flush();
|
||||||
|
|
||||||
KMSAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
|
kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
|
||||||
(material != null) + " NewVersion:" + keyVersion.getVersionName());
|
(material != null) + " NewVersion:" + keyVersion.getVersionName());
|
||||||
|
|
||||||
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
|
if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
|
||||||
|
@ -222,11 +238,10 @@ public class KMS {
|
||||||
KMSWebApp.getAdminCallsMeter().mark();
|
KMSWebApp.getAdminCallsMeter().mark();
|
||||||
Principal user = getPrincipal(securityContext);
|
Principal user = getPrincipal(securityContext);
|
||||||
String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
|
String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
|
||||||
String names = StringUtils.arrayToString(keyNames);
|
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA);
|
||||||
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA, names);
|
|
||||||
KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
|
KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
|
||||||
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
|
Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
|
||||||
KMSAudit.ok(user, GET_KEYS_METADATA, names, "");
|
kmsAudit.ok(user, GET_KEYS_METADATA, "");
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,9 +252,9 @@ public class KMS {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
KMSWebApp.getAdminCallsMeter().mark();
|
KMSWebApp.getAdminCallsMeter().mark();
|
||||||
Principal user = getPrincipal(securityContext);
|
Principal user = getPrincipal(securityContext);
|
||||||
assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS, "*");
|
assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS);
|
||||||
Object json = provider.getKeys();
|
Object json = provider.getKeys();
|
||||||
KMSAudit.ok(user, GET_KEYS, "*", "");
|
kmsAudit.ok(user, GET_KEYS, "");
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,7 +278,7 @@ public class KMS {
|
||||||
KMSWebApp.getAdminCallsMeter().mark();
|
KMSWebApp.getAdminCallsMeter().mark();
|
||||||
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
|
assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
|
||||||
Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
|
Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
|
||||||
KMSAudit.ok(user, GET_METADATA, name, "");
|
kmsAudit.ok(user, GET_METADATA, name, "");
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,7 +294,7 @@ public class KMS {
|
||||||
KMSWebApp.getKeyCallsMeter().mark();
|
KMSWebApp.getKeyCallsMeter().mark();
|
||||||
assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
|
assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
|
||||||
Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
|
Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
|
||||||
KMSAudit.ok(user, GET_CURRENT_KEY, name, "");
|
kmsAudit.ok(user, GET_CURRENT_KEY, name, "");
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,9 +307,12 @@ public class KMS {
|
||||||
Principal user = getPrincipal(securityContext);
|
Principal user = getPrincipal(securityContext);
|
||||||
KMSClientProvider.checkNotEmpty(versionName, "versionName");
|
KMSClientProvider.checkNotEmpty(versionName, "versionName");
|
||||||
KMSWebApp.getKeyCallsMeter().mark();
|
KMSWebApp.getKeyCallsMeter().mark();
|
||||||
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION, versionName);
|
KeyVersion keyVersion = provider.getKeyVersion(versionName);
|
||||||
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersion(versionName));
|
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION);
|
||||||
KMSAudit.ok(user, GET_KEY_VERSION, versionName, "");
|
if (keyVersion != null) {
|
||||||
|
kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), "");
|
||||||
|
}
|
||||||
|
Object json = KMSServerJSONUtils.toJSON(keyVersion);
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -327,7 +345,7 @@ public class KMS {
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new IOException(e);
|
throw new IOException(e);
|
||||||
}
|
}
|
||||||
KMSAudit.ok(user, GENERATE_EEK, name, "");
|
kmsAudit.ok(user, GENERATE_EEK, name, "");
|
||||||
retJSON = new ArrayList();
|
retJSON = new ArrayList();
|
||||||
for (EncryptedKeyVersion edek : retEdeks) {
|
for (EncryptedKeyVersion edek : retEdeks) {
|
||||||
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
|
((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
|
||||||
|
@ -362,7 +380,7 @@ public class KMS {
|
||||||
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
|
(String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
|
||||||
Object retJSON;
|
Object retJSON;
|
||||||
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
|
if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
|
||||||
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName);
|
assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName);
|
||||||
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
|
KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
|
||||||
byte[] iv = Base64.decodeBase64(ivStr);
|
byte[] iv = Base64.decodeBase64(ivStr);
|
||||||
KMSClientProvider.checkNotNull(encMaterialStr,
|
KMSClientProvider.checkNotNull(encMaterialStr,
|
||||||
|
@ -373,7 +391,7 @@ public class KMS {
|
||||||
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
|
new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
|
||||||
iv, KeyProviderCryptoExtension.EEK, encMaterial));
|
iv, KeyProviderCryptoExtension.EEK, encMaterial));
|
||||||
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
|
retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
|
||||||
KMSAudit.ok(user, DECRYPT_EEK, versionName, "");
|
kmsAudit.ok(user, DECRYPT_EEK, keyName, "");
|
||||||
} else {
|
} else {
|
||||||
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
|
throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
|
||||||
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
|
" value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
|
||||||
|
@ -396,7 +414,7 @@ public class KMS {
|
||||||
KMSWebApp.getKeyCallsMeter().mark();
|
KMSWebApp.getKeyCallsMeter().mark();
|
||||||
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
|
assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
|
||||||
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
|
Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
|
||||||
KMSAudit.ok(user, GET_KEY_VERSIONS, name, "");
|
kmsAudit.ok(user, GET_KEY_VERSIONS, name, "");
|
||||||
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -20,43 +20,202 @@ package org.apache.hadoop.crypto.key.kms.server;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import com.google.common.base.Joiner;
|
||||||
|
import com.google.common.base.Strings;
|
||||||
|
import com.google.common.cache.Cache;
|
||||||
|
import com.google.common.cache.CacheBuilder;
|
||||||
|
import com.google.common.cache.RemovalListener;
|
||||||
|
import com.google.common.cache.RemovalNotification;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
|
|
||||||
import java.security.Principal;
|
import java.security.Principal;
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides convenience methods for audit logging consistently the different
|
* Provides convenience methods for audit logging consistently the different
|
||||||
* types of events.
|
* types of events.
|
||||||
*/
|
*/
|
||||||
public class KMSAudit {
|
public class KMSAudit {
|
||||||
|
|
||||||
|
private static class AuditEvent {
|
||||||
|
private final AtomicLong accessCount = new AtomicLong(-1);
|
||||||
|
private final String keyName;
|
||||||
|
private final String user;
|
||||||
|
private final String op;
|
||||||
|
private final String extraMsg;
|
||||||
|
private final long startTime = System.currentTimeMillis();
|
||||||
|
|
||||||
|
private AuditEvent(String keyName, String user, String op, String msg) {
|
||||||
|
this.keyName = keyName;
|
||||||
|
this.user = user;
|
||||||
|
this.op = op;
|
||||||
|
this.extraMsg = msg;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getExtraMsg() {
|
||||||
|
return extraMsg;
|
||||||
|
}
|
||||||
|
|
||||||
|
public AtomicLong getAccessCount() {
|
||||||
|
return accessCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getKeyName() {
|
||||||
|
return keyName;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getUser() {
|
||||||
|
return user;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getOp() {
|
||||||
|
return op;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getStartTime() {
|
||||||
|
return startTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static enum OpStatus {
|
||||||
|
OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Set<String> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
|
||||||
|
KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK
|
||||||
|
);
|
||||||
|
|
||||||
|
private Cache<String, AuditEvent> cache;
|
||||||
|
|
||||||
|
private ScheduledExecutorService executor;
|
||||||
|
|
||||||
public static final String KMS_LOGGER_NAME = "kms-audit";
|
public static final String KMS_LOGGER_NAME = "kms-audit";
|
||||||
|
|
||||||
private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
|
private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
|
||||||
|
|
||||||
private static void op(String status, String op, Principal user, String key,
|
KMSAudit(long delay) {
|
||||||
String extraMsg) {
|
cache = CacheBuilder.newBuilder()
|
||||||
AUDIT_LOG.info("Status:{} User:{} Op:{} Name:{}{}", status, user.getName(),
|
.expireAfterWrite(delay, TimeUnit.MILLISECONDS)
|
||||||
op, key, extraMsg);
|
.removalListener(
|
||||||
|
new RemovalListener<String, AuditEvent>() {
|
||||||
|
@Override
|
||||||
|
public void onRemoval(
|
||||||
|
RemovalNotification<String, AuditEvent> entry) {
|
||||||
|
AuditEvent event = entry.getValue();
|
||||||
|
if (event.getAccessCount().get() > 0) {
|
||||||
|
KMSAudit.this.logEvent(event);
|
||||||
|
event.getAccessCount().set(0);
|
||||||
|
KMSAudit.this.cache.put(entry.getKey(), event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}).build();
|
||||||
|
executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
|
||||||
|
.setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
|
||||||
|
executor.scheduleAtFixedRate(new Runnable() {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
cache.cleanUp();
|
||||||
|
}
|
||||||
|
}, delay / 10, delay / 10, TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void ok(Principal user, String op, String key,
|
private void logEvent(AuditEvent event) {
|
||||||
String extraMsg) {
|
|
||||||
op("OK", op, user, key, extraMsg);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void unauthorized(Principal user, String op, String key) {
|
|
||||||
op("UNAUTHORIZED", op, user, key, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void error(Principal user, String method, String url,
|
|
||||||
String extraMsg) {
|
|
||||||
AUDIT_LOG.info("Status:ERROR User:{} Method:{} URL:{} Exception:'{}'",
|
|
||||||
user.getName(), method, url, extraMsg);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static void unauthenticated(String remoteHost, String method,
|
|
||||||
String url, String extraMsg) {
|
|
||||||
AUDIT_LOG.info(
|
AUDIT_LOG.info(
|
||||||
"Status:UNAUTHENTICATED RemoteHost:{} Method:{} URL:{} ErrorMsg:'{}'",
|
"OK[op={}, key={}, user={}, accessCount={}, interval={}ms] {}",
|
||||||
remoteHost, method, url, extraMsg);
|
event.getOp(), event.getKeyName(), event.getUser(),
|
||||||
|
event.getAccessCount().get(),
|
||||||
|
(System.currentTimeMillis() - event.getStartTime()),
|
||||||
|
event.getExtraMsg());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void op(OpStatus opStatus, final String op, final String user,
|
||||||
|
final String key, final String extraMsg) {
|
||||||
|
if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
|
||||||
|
&& !Strings.isNullOrEmpty(op)
|
||||||
|
&& AGGREGATE_OPS_WHITELIST.contains(op)) {
|
||||||
|
String cacheKey = createCacheKey(user, key, op);
|
||||||
|
if (opStatus == OpStatus.UNAUTHORIZED) {
|
||||||
|
cache.invalidate(cacheKey);
|
||||||
|
AUDIT_LOG.info("UNAUTHORIZED[op={}, key={}, user={}] {}", op, key, user,
|
||||||
|
extraMsg);
|
||||||
|
} else {
|
||||||
|
try {
|
||||||
|
AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
|
||||||
|
@Override
|
||||||
|
public AuditEvent call() throws Exception {
|
||||||
|
return new AuditEvent(key, user, op, extraMsg);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
// Log first access (initialized as -1 so
|
||||||
|
// incrementAndGet() == 0 implies first access)
|
||||||
|
if (event.getAccessCount().incrementAndGet() == 0) {
|
||||||
|
event.getAccessCount().incrementAndGet();
|
||||||
|
logEvent(event);
|
||||||
|
}
|
||||||
|
} catch (ExecutionException ex) {
|
||||||
|
throw new RuntimeException(ex);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
List<String> kvs = new LinkedList<String>();
|
||||||
|
if (!Strings.isNullOrEmpty(op)) {
|
||||||
|
kvs.add("op=" + op);
|
||||||
|
}
|
||||||
|
if (!Strings.isNullOrEmpty(key)) {
|
||||||
|
kvs.add("key=" + key);
|
||||||
|
}
|
||||||
|
if (!Strings.isNullOrEmpty(user)) {
|
||||||
|
kvs.add("user=" + user);
|
||||||
|
}
|
||||||
|
if (kvs.size() == 0) {
|
||||||
|
AUDIT_LOG.info("{} {}", opStatus.toString(), extraMsg);
|
||||||
|
} else {
|
||||||
|
String join = Joiner.on(", ").join(kvs);
|
||||||
|
AUDIT_LOG.info("{}[{}] {}", opStatus.toString(), join, extraMsg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void ok(Principal user, String op, String key,
|
||||||
|
String extraMsg) {
|
||||||
|
op(OpStatus.OK, op, user.getName(), key, extraMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void ok(Principal user, String op, String extraMsg) {
|
||||||
|
op(OpStatus.OK, op, user.getName(), null, extraMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void unauthorized(Principal user, String op, String key) {
|
||||||
|
op(OpStatus.UNAUTHORIZED, op, user.getName(), key, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void error(Principal user, String method, String url,
|
||||||
|
String extraMsg) {
|
||||||
|
op(OpStatus.ERROR, null, user.getName(), null, "Method:'" + method
|
||||||
|
+ "' Exception:'" + extraMsg + "'");
|
||||||
|
}
|
||||||
|
|
||||||
|
public void unauthenticated(String remoteHost, String method,
|
||||||
|
String url, String extraMsg) {
|
||||||
|
op(OpStatus.UNAUTHENTICATED, null, null, null, "RemoteHost:"
|
||||||
|
+ remoteHost + " Method:" + method
|
||||||
|
+ " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String createCacheKey(String user, String key, String op) {
|
||||||
|
return user + "#" + key + "#" + op;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void shutdown() {
|
||||||
|
executor.shutdownNow();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,8 +115,10 @@ public class KMSAuthenticationFilter extends AuthenticationFilter {
|
||||||
if (queryString != null) {
|
if (queryString != null) {
|
||||||
requestURL.append("?").append(queryString);
|
requestURL.append("?").append(queryString);
|
||||||
}
|
}
|
||||||
KMSAudit.unauthenticated(request.getRemoteHost(), method,
|
|
||||||
requestURL.toString(), kmsResponse.msg);
|
KMSWebApp.getKMSAudit().unauthenticated(
|
||||||
|
request.getRemoteHost(), method, requestURL.toString(),
|
||||||
|
kmsResponse.msg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -43,12 +43,17 @@ public class KMSConfiguration {
|
||||||
// TImeout for the Current Key cache
|
// TImeout for the Current Key cache
|
||||||
public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
|
public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
|
||||||
"current.key.cache.timeout.ms";
|
"current.key.cache.timeout.ms";
|
||||||
|
// Delay for Audit logs that need aggregation
|
||||||
|
public static final String KMS_AUDIT_AGGREGATION_DELAY = CONFIG_PREFIX +
|
||||||
|
"aggregation.delay.ms";
|
||||||
|
|
||||||
public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
|
public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
|
||||||
// 10 mins
|
// 10 mins
|
||||||
public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
|
public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
|
||||||
// 30 secs
|
// 30 secs
|
||||||
public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
|
public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
|
||||||
|
// 10 secs
|
||||||
|
public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000;
|
||||||
|
|
||||||
static Configuration getConfiguration(boolean loadHadoopDefaults,
|
static Configuration getConfiguration(boolean loadHadoopDefaults,
|
||||||
String ... resources) {
|
String ... resources) {
|
||||||
|
|
|
@ -20,9 +20,11 @@ package org.apache.hadoop.crypto.key.kms.server;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
import com.sun.jersey.api.container.ContainerException;
|
import com.sun.jersey.api.container.ContainerException;
|
||||||
|
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||||
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -30,6 +32,7 @@ import javax.ws.rs.core.MediaType;
|
||||||
import javax.ws.rs.core.Response;
|
import javax.ws.rs.core.Response;
|
||||||
import javax.ws.rs.ext.ExceptionMapper;
|
import javax.ws.rs.ext.ExceptionMapper;
|
||||||
import javax.ws.rs.ext.Provider;
|
import javax.ws.rs.ext.Provider;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.security.Principal;
|
import java.security.Principal;
|
||||||
import java.util.LinkedHashMap;
|
import java.util.LinkedHashMap;
|
||||||
|
@ -83,6 +86,10 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
|
||||||
status = Response.Status.FORBIDDEN;
|
status = Response.Status.FORBIDDEN;
|
||||||
// we don't audit here because we did it already when checking access
|
// we don't audit here because we did it already when checking access
|
||||||
doAudit = false;
|
doAudit = false;
|
||||||
|
} else if (throwable instanceof AuthorizationException) {
|
||||||
|
status = Response.Status.UNAUTHORIZED;
|
||||||
|
// we don't audit here because we did it already when checking access
|
||||||
|
doAudit = false;
|
||||||
} else if (throwable instanceof AccessControlException) {
|
} else if (throwable instanceof AccessControlException) {
|
||||||
status = Response.Status.FORBIDDEN;
|
status = Response.Status.FORBIDDEN;
|
||||||
} else if (exception instanceof IOException) {
|
} else if (exception instanceof IOException) {
|
||||||
|
@ -95,7 +102,8 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
|
||||||
status = Response.Status.INTERNAL_SERVER_ERROR;
|
status = Response.Status.INTERNAL_SERVER_ERROR;
|
||||||
}
|
}
|
||||||
if (doAudit) {
|
if (doAudit) {
|
||||||
KMSAudit.error(KMSMDCFilter.getPrincipal(), KMSMDCFilter.getMethod(),
|
KMSWebApp.getKMSAudit().error(KMSMDCFilter.getPrincipal(),
|
||||||
|
KMSMDCFilter.getMethod(),
|
||||||
KMSMDCFilter.getURL(), getOneLineMessage(exception));
|
KMSMDCFilter.getURL(), getOneLineMessage(exception));
|
||||||
}
|
}
|
||||||
return createResponse(status, throwable);
|
return createResponse(status, throwable);
|
||||||
|
|
|
@ -76,6 +76,7 @@ public class KMSWebApp implements ServletContextListener {
|
||||||
private static Meter decryptEEKCallsMeter;
|
private static Meter decryptEEKCallsMeter;
|
||||||
private static Meter generateEEKCallsMeter;
|
private static Meter generateEEKCallsMeter;
|
||||||
private static Meter invalidCallsMeter;
|
private static Meter invalidCallsMeter;
|
||||||
|
private static KMSAudit kmsAudit;
|
||||||
private static KeyProviderCryptoExtension keyProviderCryptoExtension;
|
private static KeyProviderCryptoExtension keyProviderCryptoExtension;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
@ -144,6 +145,11 @@ public class KMSWebApp implements ServletContextListener {
|
||||||
unauthenticatedCallsMeter = metricRegistry.register(
|
unauthenticatedCallsMeter = metricRegistry.register(
|
||||||
UNAUTHENTICATED_CALLS_METER, new Meter());
|
UNAUTHENTICATED_CALLS_METER, new Meter());
|
||||||
|
|
||||||
|
kmsAudit =
|
||||||
|
new KMSAudit(kmsConf.getLong(
|
||||||
|
KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY,
|
||||||
|
KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY_DEFAULT));
|
||||||
|
|
||||||
// this is required for the the JMXJsonServlet to work properly.
|
// this is required for the the JMXJsonServlet to work properly.
|
||||||
// the JMXJsonServlet is behind the authentication filter,
|
// the JMXJsonServlet is behind the authentication filter,
|
||||||
// thus the '*' ACL.
|
// thus the '*' ACL.
|
||||||
|
@ -199,6 +205,7 @@ public class KMSWebApp implements ServletContextListener {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void contextDestroyed(ServletContextEvent sce) {
|
public void contextDestroyed(ServletContextEvent sce) {
|
||||||
|
kmsAudit.shutdown();
|
||||||
acls.stopReloader();
|
acls.stopReloader();
|
||||||
jmxReporter.stop();
|
jmxReporter.stop();
|
||||||
jmxReporter.close();
|
jmxReporter.close();
|
||||||
|
@ -245,4 +252,8 @@ public class KMSWebApp implements ServletContextListener {
|
||||||
public static KeyProviderCryptoExtension getKeyProvider() {
|
public static KeyProviderCryptoExtension getKeyProvider() {
|
||||||
return keyProviderCryptoExtension;
|
return keyProviderCryptoExtension;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static KMSAudit getKMSAudit() {
|
||||||
|
return kmsAudit;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,25 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version}
|
||||||
</property>
|
</property>
|
||||||
+---+
|
+---+
|
||||||
|
|
||||||
|
** KMS Aggregated Audit logs
|
||||||
|
|
||||||
|
Audit logs are aggregated for API accesses to the GET_KEY_VERSION,
|
||||||
|
GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations.
|
||||||
|
|
||||||
|
Entries are grouped by the (user,key,operation) combined key for a
|
||||||
|
configurable aggregation interval after which the number of accesses to the
|
||||||
|
specified end-point by the user for a given key is flushed to the audit log.
|
||||||
|
|
||||||
|
The Aggregation interval is configured via the property :
|
||||||
|
|
||||||
|
+---+
|
||||||
|
<property>
|
||||||
|
<name>hadoop.kms.aggregation.delay.ms</name>
|
||||||
|
<value>10000</value>
|
||||||
|
</property>
|
||||||
|
+---+
|
||||||
|
|
||||||
|
|
||||||
** Start/Stop the KMS
|
** Start/Stop the KMS
|
||||||
|
|
||||||
To start/stop KMS use KMS's bin/kms.sh script. For example:
|
To start/stop KMS use KMS's bin/kms.sh script. For example:
|
||||||
|
|
|
@ -0,0 +1,134 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.crypto.key.kms.server;
|
||||||
|
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.FilterOutputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.io.PrintStream;
|
||||||
|
import java.security.Principal;
|
||||||
|
|
||||||
|
import org.apache.log4j.LogManager;
|
||||||
|
import org.apache.log4j.PropertyConfigurator;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
public class TestKMSAudit {
|
||||||
|
|
||||||
|
private PrintStream originalOut;
|
||||||
|
private ByteArrayOutputStream memOut;
|
||||||
|
private FilterOut filterOut;
|
||||||
|
private PrintStream capturedOut;
|
||||||
|
|
||||||
|
private KMSAudit kmsAudit;
|
||||||
|
|
||||||
|
private static class FilterOut extends FilterOutputStream {
|
||||||
|
public FilterOut(OutputStream out) {
|
||||||
|
super(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setOutputStream(OutputStream out) {
|
||||||
|
this.out = out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() {
|
||||||
|
originalOut = System.err;
|
||||||
|
memOut = new ByteArrayOutputStream();
|
||||||
|
filterOut = new FilterOut(memOut);
|
||||||
|
capturedOut = new PrintStream(filterOut);
|
||||||
|
System.setErr(capturedOut);
|
||||||
|
PropertyConfigurator.configure(Thread.currentThread().
|
||||||
|
getContextClassLoader()
|
||||||
|
.getResourceAsStream("log4j-kmsaudit.properties"));
|
||||||
|
this.kmsAudit = new KMSAudit(1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void cleanUp() {
|
||||||
|
System.setErr(originalOut);
|
||||||
|
LogManager.resetConfiguration();
|
||||||
|
kmsAudit.shutdown();
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getAndResetLogOutput() {
|
||||||
|
capturedOut.flush();
|
||||||
|
String logOutput = new String(memOut.toByteArray());
|
||||||
|
memOut = new ByteArrayOutputStream();
|
||||||
|
filterOut.setOutputStream(memOut);
|
||||||
|
return logOutput;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAggregation() throws Exception {
|
||||||
|
Principal luser = Mockito.mock(Principal.class);
|
||||||
|
Mockito.when(luser.getName()).thenReturn("luser");
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
Thread.sleep(1500);
|
||||||
|
kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
|
||||||
|
Thread.sleep(1500);
|
||||||
|
String out = getAndResetLogOutput();
|
||||||
|
System.out.println(out);
|
||||||
|
Assert.assertTrue(
|
||||||
|
out.matches(
|
||||||
|
"OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
|
||||||
|
// Not aggregated !!
|
||||||
|
+ "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg"
|
||||||
|
+ "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg"
|
||||||
|
// Aggregated
|
||||||
|
+ "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"
|
||||||
|
+ "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAggregationUnauth() throws Exception {
|
||||||
|
Principal luser = Mockito.mock(Principal.class);
|
||||||
|
Mockito.when(luser.getName()).thenReturn("luser");
|
||||||
|
kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2");
|
||||||
|
Thread.sleep(1000);
|
||||||
|
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
|
||||||
|
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
|
||||||
|
kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3");
|
||||||
|
kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
|
||||||
|
Thread.sleep(2000);
|
||||||
|
String out = getAndResetLogOutput();
|
||||||
|
System.out.println(out);
|
||||||
|
Assert.assertTrue(
|
||||||
|
out.matches(
|
||||||
|
"UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] "
|
||||||
|
+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
|
||||||
|
+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg"
|
||||||
|
+ "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] "
|
||||||
|
+ "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
# LOG Appender
|
||||||
|
log4j.appender.kms-audit=org.apache.log4j.ConsoleAppender
|
||||||
|
log4j.appender.kms-audit.Target=System.err
|
||||||
|
log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
|
||||||
|
log4j.appender.kms-audit.layout.ConversionPattern=%m
|
||||||
|
|
||||||
|
log4j.rootLogger=INFO, kms-audit
|
|
@ -222,6 +222,17 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
writeManager.startAsyncDataSerivce();
|
writeManager.startAsyncDataSerivce();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checks the type of IOException and maps it to appropriate Nfs3Status code.
|
||||||
|
private int mapErrorStatus(IOException e) {
|
||||||
|
if (e instanceof FileNotFoundException) {
|
||||||
|
return Nfs3Status.NFS3ERR_STALE;
|
||||||
|
} else if (e instanceof AccessControlException) {
|
||||||
|
return Nfs3Status.NFS3ERR_ACCES;
|
||||||
|
} else {
|
||||||
|
return Nfs3Status.NFS3ERR_IO;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/******************************************************
|
/******************************************************
|
||||||
* RPC call handlers
|
* RPC call handlers
|
||||||
******************************************************/
|
******************************************************/
|
||||||
|
@ -236,14 +247,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public GETATTR3Response getattr(XDR xdr, RpcInfo info) {
|
public GETATTR3Response getattr(XDR xdr, RpcInfo info) {
|
||||||
|
return getattr(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
GETATTR3Response getattr(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
|
GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -280,7 +296,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
|
LOG.info("Can't get file attribute, fileId=" + handle.getFileId(), e);
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
response.setStatus(status);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
if (attrs == null) {
|
if (attrs == null) {
|
||||||
|
@ -328,8 +345,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SETATTR3Response setattr(XDR xdr, RpcInfo info) {
|
public SETATTR3Response setattr(XDR xdr, RpcInfo info) {
|
||||||
|
return setattr(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
SETATTR3Response setattr(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK);
|
SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK);
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -375,7 +397,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the write access privilege
|
// check the write access privilege
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
|
||||||
return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
|
return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
|
||||||
preOpWcc, preOpAttr));
|
preOpWcc, preOpAttr));
|
||||||
}
|
}
|
||||||
|
@ -394,24 +416,27 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1);
|
LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath, e1);
|
||||||
}
|
}
|
||||||
if (e instanceof AccessControlException) {
|
|
||||||
return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, wccData);
|
int status = mapErrorStatus(e);
|
||||||
} else {
|
return new SETATTR3Response(status, wccData);
|
||||||
return new SETATTR3Response(Nfs3Status.NFS3ERR_IO, wccData);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public LOOKUP3Response lookup(XDR xdr, RpcInfo info) {
|
public LOOKUP3Response lookup(XDR xdr, RpcInfo info) {
|
||||||
|
return lookup(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
LOOKUP3Response lookup(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK);
|
LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -460,20 +485,26 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new LOOKUP3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new LOOKUP3Response(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ACCESS3Response access(XDR xdr, RpcInfo info) {
|
public ACCESS3Response access(XDR xdr, RpcInfo info) {
|
||||||
|
return access(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
ACCESS3Response access(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
|
ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -521,20 +552,26 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new ACCESS3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new ACCESS3Response(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public READLINK3Response readlink(XDR xdr, RpcInfo info) {
|
public READLINK3Response readlink(XDR xdr, RpcInfo info) {
|
||||||
|
return readlink(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK);
|
READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -588,20 +625,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Readlink error: " + e.getClass(), e);
|
LOG.warn("Readlink error: " + e.getClass(), e);
|
||||||
if (e instanceof FileNotFoundException) {
|
int status = mapErrorStatus(e);
|
||||||
return new READLINK3Response(Nfs3Status.NFS3ERR_STALE);
|
return new READLINK3Response(status);
|
||||||
} else if (e instanceof AccessControlException) {
|
|
||||||
return new READLINK3Response(Nfs3Status.NFS3ERR_ACCES);
|
|
||||||
}
|
|
||||||
return new READLINK3Response(Nfs3Status.NFS3ERR_IO);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public READ3Response read(XDR xdr, RpcInfo info) {
|
public READ3Response read(XDR xdr, RpcInfo info) {
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
return read(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
SocketAddress remoteAddress = info.remoteAddress();
|
|
||||||
return read(xdr, securityHandler, remoteAddress);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -725,7 +756,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Read error: " + e.getClass() + " offset: " + offset
|
LOG.warn("Read error: " + e.getClass() + " offset: " + offset
|
||||||
+ " count: " + count, e);
|
+ " count: " + count, e);
|
||||||
return new READ3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new READ3Response(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -807,8 +839,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr);
|
WccAttr attr = preOpAttr == null ? null : Nfs3Utils.getWccAttr(preOpAttr);
|
||||||
WccData fileWcc = new WccData(attr, postOpAttr);
|
WccData fileWcc = new WccData(attr, postOpAttr);
|
||||||
return new WRITE3Response(Nfs3Status.NFS3ERR_IO, fileWcc, 0,
|
|
||||||
request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
|
int status = mapErrorStatus(e);
|
||||||
|
return new WRITE3Response(status, fileWcc, 0, request.getStableHow(),
|
||||||
|
Nfs3Constant.WRITE_COMMIT_VERF);
|
||||||
}
|
}
|
||||||
|
|
||||||
return null;
|
return null;
|
||||||
|
@ -816,9 +850,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CREATE3Response create(XDR xdr, RpcInfo info) {
|
public CREATE3Response create(XDR xdr, RpcInfo info) {
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
return create(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
SocketAddress remoteAddress = info.remoteAddress();
|
|
||||||
return create(xdr, securityHandler, remoteAddress);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -940,13 +972,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
+ dirHandle.getFileId(), e1);
|
+ dirHandle.getFileId(), e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (e instanceof AccessControlException) {
|
|
||||||
return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, fileHandle,
|
int status = mapErrorStatus(e);
|
||||||
postOpObjAttr, dirWcc);
|
return new CREATE3Response(status, fileHandle, postOpObjAttr, dirWcc);
|
||||||
} else {
|
|
||||||
return new CREATE3Response(Nfs3Status.NFS3ERR_IO, fileHandle,
|
|
||||||
postOpObjAttr, dirWcc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr,
|
return new CREATE3Response(Nfs3Status.NFS3_OK, fileHandle, postOpObjAttr,
|
||||||
|
@ -955,8 +983,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public MKDIR3Response mkdir(XDR xdr, RpcInfo info) {
|
public MKDIR3Response mkdir(XDR xdr, RpcInfo info) {
|
||||||
|
return mkdir(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK);
|
MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK);
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -992,7 +1025,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
|
return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
|
||||||
return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr,
|
return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr,
|
||||||
new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr));
|
new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr));
|
||||||
}
|
}
|
||||||
|
@ -1032,15 +1065,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e);
|
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
||||||
postOpDirAttr);
|
postOpDirAttr);
|
||||||
if (e instanceof AccessControlException) {
|
int status = mapErrorStatus(e);
|
||||||
return new MKDIR3Response(Nfs3Status.NFS3ERR_PERM, objFileHandle,
|
return new MKDIR3Response(status, objFileHandle, postOpObjAttr, dirWcc);
|
||||||
postOpObjAttr, dirWcc);
|
|
||||||
} else {
|
|
||||||
return new MKDIR3Response(Nfs3Status.NFS3ERR_IO, objFileHandle,
|
|
||||||
postOpObjAttr, dirWcc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1055,7 +1084,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
|
REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
|
REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
|
@ -1120,20 +1150,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
|
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
||||||
postOpDirAttr);
|
postOpDirAttr);
|
||||||
if (e instanceof AccessControlException) {
|
int status = mapErrorStatus(e);
|
||||||
return new REMOVE3Response(Nfs3Status.NFS3ERR_PERM, dirWcc);
|
return new REMOVE3Response(status, dirWcc);
|
||||||
} else {
|
|
||||||
return new REMOVE3Response(Nfs3Status.NFS3ERR_IO, dirWcc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RMDIR3Response rmdir(XDR xdr, RpcInfo info) {
|
public RMDIR3Response rmdir(XDR xdr, RpcInfo info) {
|
||||||
|
return rmdir(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
|
RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1167,7 +1200,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
||||||
preOpDirAttr);
|
preOpDirAttr);
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
|
||||||
return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
|
return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1202,20 +1235,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
|
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr),
|
||||||
postOpDirAttr);
|
postOpDirAttr);
|
||||||
if (e instanceof AccessControlException) {
|
int status = mapErrorStatus(e);
|
||||||
return new RMDIR3Response(Nfs3Status.NFS3ERR_PERM, dirWcc);
|
return new RMDIR3Response(status, dirWcc);
|
||||||
} else {
|
|
||||||
return new RMDIR3Response(Nfs3Status.NFS3ERR_IO, dirWcc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public RENAME3Response rename(XDR xdr, RpcInfo info) {
|
public RENAME3Response rename(XDR xdr, RpcInfo info) {
|
||||||
|
return rename(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
RENAME3Response rename(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK);
|
RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK);
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1259,7 +1295,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
|
return new RENAME3Response(Nfs3Status.NFS3ERR_STALE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
|
||||||
WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr),
|
WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr),
|
||||||
fromPreOpAttr);
|
fromPreOpAttr);
|
||||||
WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr),
|
WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr),
|
||||||
|
@ -1291,25 +1327,27 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or"
|
LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or"
|
||||||
+ toDirFileIdPath, e1);
|
+ toDirFileIdPath, e1);
|
||||||
}
|
}
|
||||||
if (e instanceof AccessControlException) {
|
|
||||||
return new RENAME3Response(Nfs3Status.NFS3ERR_PERM, fromDirWcc,
|
int status = mapErrorStatus(e);
|
||||||
toDirWcc);
|
return new RENAME3Response(status, fromDirWcc, toDirWcc);
|
||||||
} else {
|
|
||||||
return new RENAME3Response(Nfs3Status.NFS3ERR_IO, fromDirWcc, toDirWcc);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SYMLINK3Response symlink(XDR xdr, RpcInfo info) {
|
public SYMLINK3Response symlink(XDR xdr, RpcInfo info) {
|
||||||
|
return symlink(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK);
|
SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1355,7 +1393,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception:" + e);
|
LOG.warn("Exception:" + e);
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
response.setStatus(status);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1390,10 +1429,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public READDIR3Response readdir(XDR xdr, RpcInfo info) {
|
public READDIR3Response readdir(XDR xdr, RpcInfo info) {
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
return readdir(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
SocketAddress remoteAddress = info.remoteAddress();
|
|
||||||
return readdir(xdr, securityHandler, remoteAddress);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler,
|
public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler,
|
||||||
SocketAddress remoteAddress) {
|
SocketAddress remoteAddress) {
|
||||||
READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
|
READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK);
|
||||||
|
@ -1501,7 +1539,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new READDIR3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new READDIR3Response(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1551,9 +1590,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public READDIRPLUS3Response readdirplus(XDR xdr, RpcInfo info) {
|
public READDIRPLUS3Response readdirplus(XDR xdr, RpcInfo info) {
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
return readdirplus(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
SocketAddress remoteAddress = info.remoteAddress();
|
|
||||||
return readdirplus(xdr, securityHandler, remoteAddress);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
@ -1664,7 +1701,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new READDIRPLUS3Response(status);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set up the dirents in the response
|
// Set up the dirents in the response
|
||||||
|
@ -1723,14 +1761,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FSSTAT3Response fsstat(XDR xdr, RpcInfo info) {
|
public FSSTAT3Response fsstat(XDR xdr, RpcInfo info) {
|
||||||
|
return fsstat(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
FSSTAT3Response fsstat(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
|
FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1785,20 +1828,26 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new FSSTAT3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new FSSTAT3Response(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public FSINFO3Response fsinfo(XDR xdr, RpcInfo info) {
|
public FSINFO3Response fsinfo(XDR xdr, RpcInfo info) {
|
||||||
|
return fsinfo(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
FSINFO3Response fsinfo(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK);
|
FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1843,20 +1892,26 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty);
|
wtmax, wtmax, 1, dtperf, Long.MAX_VALUE, new NfsTime(1), fsProperty);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new FSINFO3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new FSINFO3Response(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public PATHCONF3Response pathconf(XDR xdr, RpcInfo info) {
|
public PATHCONF3Response pathconf(XDR xdr, RpcInfo info) {
|
||||||
|
return pathconf(xdr, getSecurityHandler(info), info.remoteAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
PATHCONF3Response pathconf(XDR xdr, SecurityHandler securityHandler,
|
||||||
|
SocketAddress remoteAddress) {
|
||||||
PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
|
PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK);
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_ONLY)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_ONLY)) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
response.setStatus(Nfs3Status.NFS3ERR_ACCES);
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1890,16 +1945,24 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
HdfsConstants.MAX_PATH_LENGTH, true, false, false, true);
|
HdfsConstants.MAX_PATH_LENGTH, true, false, false, true);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
return new PATHCONF3Response(Nfs3Status.NFS3ERR_IO);
|
int status = mapErrorStatus(e);
|
||||||
|
return new PATHCONF3Response(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public COMMIT3Response commit(XDR xdr, RpcInfo info) {
|
public COMMIT3Response commit(XDR xdr, RpcInfo info) {
|
||||||
//Channel channel, int xid,
|
|
||||||
// SecurityHandler securityHandler, InetAddress client) {
|
|
||||||
COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
|
|
||||||
SecurityHandler securityHandler = getSecurityHandler(info);
|
SecurityHandler securityHandler = getSecurityHandler(info);
|
||||||
|
RpcCall rpcCall = (RpcCall) info.header();
|
||||||
|
int xid = rpcCall.getXid();
|
||||||
|
SocketAddress remoteAddress = info.remoteAddress();
|
||||||
|
return commit(xdr, info.channel(), xid, securityHandler, remoteAddress);
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
COMMIT3Response commit(XDR xdr, Channel channel, int xid,
|
||||||
|
SecurityHandler securityHandler, SocketAddress remoteAddress) {
|
||||||
|
COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
|
||||||
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
|
||||||
if (dfsClient == null) {
|
if (dfsClient == null) {
|
||||||
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
|
||||||
|
@ -1930,7 +1993,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
|
return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!checkAccessPrivilege(info, AccessPrivilege.READ_WRITE)) {
|
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
|
||||||
return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
|
return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData(
|
||||||
Nfs3Utils.getWccAttr(preOpAttr), preOpAttr),
|
Nfs3Utils.getWccAttr(preOpAttr), preOpAttr),
|
||||||
Nfs3Constant.WRITE_COMMIT_VERF);
|
Nfs3Constant.WRITE_COMMIT_VERF);
|
||||||
|
@ -1940,10 +2003,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
: (request.getOffset() + request.getCount());
|
: (request.getOffset() + request.getCount());
|
||||||
|
|
||||||
// Insert commit as an async request
|
// Insert commit as an async request
|
||||||
RpcCall rpcCall = (RpcCall) info.header();
|
|
||||||
int xid = rpcCall.getXid();
|
|
||||||
writeManager.handleCommit(dfsClient, handle, commitOffset,
|
writeManager.handleCommit(dfsClient, handle, commitOffset,
|
||||||
info.channel(), xid, preOpAttr);
|
channel, xid, preOpAttr);
|
||||||
return null;
|
return null;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("Exception ", e);
|
LOG.warn("Exception ", e);
|
||||||
|
@ -1953,9 +2014,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
|
LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId(), e1);
|
||||||
}
|
}
|
||||||
|
|
||||||
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
|
WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr);
|
||||||
return new COMMIT3Response(Nfs3Status.NFS3ERR_IO, fileWcc,
|
int status = mapErrorStatus(e);
|
||||||
Nfs3Constant.WRITE_COMMIT_VERF);
|
return new COMMIT3Response(status, fileWcc,
|
||||||
|
Nfs3Constant.WRITE_COMMIT_VERF);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2111,12 +2174,6 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
||||||
return nfsproc3 == null || nfsproc3.isIdempotent();
|
return nfsproc3 == null || nfsproc3.isIdempotent();
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean checkAccessPrivilege(RpcInfo info,
|
|
||||||
final AccessPrivilege expected) {
|
|
||||||
SocketAddress remoteAddress = info.remoteAddress();
|
|
||||||
return checkAccessPrivilege(remoteAddress, expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean checkAccessPrivilege(SocketAddress remoteAddress,
|
private boolean checkAccessPrivilege(SocketAddress remoteAddress,
|
||||||
final AccessPrivilege expected) {
|
final AccessPrivilege expected) {
|
||||||
// Port monitoring
|
// Port monitoring
|
||||||
|
|
|
@ -18,19 +18,603 @@
|
||||||
package org.apache.hadoop.hdfs.nfs.nfs3;
|
package org.apache.hadoop.hdfs.nfs.nfs3;
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import org.jboss.netty.channel.Channel;
|
||||||
|
import org.junit.AfterClass;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.BeforeClass;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.FileHandle;
|
||||||
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
|
||||||
import org.junit.Assert;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
|
||||||
import org.junit.Test;
|
import org.apache.hadoop.nfs.nfs3.Nfs3Status;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.READ3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READ3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
|
||||||
|
import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
|
||||||
|
import org.apache.hadoop.oncrpc.XDR;
|
||||||
|
import org.apache.hadoop.oncrpc.security.SecurityHandler;
|
||||||
|
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||||
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests for {@link RpcProgramNfs3}
|
* Tests for {@link RpcProgramNfs3}
|
||||||
*/
|
*/
|
||||||
public class TestRpcProgramNfs3 {
|
public class TestRpcProgramNfs3 {
|
||||||
|
static DistributedFileSystem hdfs;
|
||||||
|
static MiniDFSCluster cluster = null;
|
||||||
|
static NfsConfiguration config = new NfsConfiguration();
|
||||||
|
static NameNode nn;
|
||||||
|
static Nfs3 nfs;
|
||||||
|
static RpcProgramNfs3 nfsd;
|
||||||
|
static SecurityHandler securityHandler;
|
||||||
|
static SecurityHandler securityHandlerUnpriviledged;
|
||||||
|
static String testdir = "/tmp";
|
||||||
|
|
||||||
|
@BeforeClass
|
||||||
|
public static void setup() throws Exception {
|
||||||
|
String currentUser = System.getProperty("user.name");
|
||||||
|
|
||||||
|
config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
|
||||||
|
config.set(DefaultImpersonationProvider.getTestProvider()
|
||||||
|
.getProxySuperuserGroupConfKey(currentUser), "*");
|
||||||
|
config.set(DefaultImpersonationProvider.getTestProvider()
|
||||||
|
.getProxySuperuserIpConfKey(currentUser), "*");
|
||||||
|
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
|
||||||
|
|
||||||
|
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
hdfs = cluster.getFileSystem();
|
||||||
|
nn = cluster.getNameNode();
|
||||||
|
|
||||||
|
// Use ephemeral ports in case tests are running in parallel
|
||||||
|
config.setInt("nfs3.mountd.port", 0);
|
||||||
|
config.setInt("nfs3.server.port", 0);
|
||||||
|
|
||||||
|
// Start NFS with allowed.hosts set to "* rw"
|
||||||
|
config.set("dfs.nfs.exports.allowed.hosts", "* rw");
|
||||||
|
nfs = new Nfs3(config);
|
||||||
|
nfs.startServiceInternal(false);
|
||||||
|
nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
|
||||||
|
|
||||||
|
|
||||||
|
// Mock SecurityHandler which returns system user.name
|
||||||
|
securityHandler = Mockito.mock(SecurityHandler.class);
|
||||||
|
Mockito.when(securityHandler.getUser()).thenReturn(currentUser);
|
||||||
|
|
||||||
|
// Mock SecurityHandler which returns a dummy username "harry"
|
||||||
|
securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
|
||||||
|
Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
|
||||||
|
}
|
||||||
|
|
||||||
|
@AfterClass
|
||||||
|
public static void shutdown() throws Exception {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void createFiles() throws IllegalArgumentException, IOException {
|
||||||
|
hdfs.delete(new Path(testdir), true);
|
||||||
|
hdfs.mkdirs(new Path(testdir));
|
||||||
|
hdfs.mkdirs(new Path(testdir + "/foo"));
|
||||||
|
DFSTestUtil.createFile(hdfs, new Path(testdir + "/bar"), 0, (short) 1, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testGetattr() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testSetattr() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("bar");
|
||||||
|
SetAttr3 symAttr = new SetAttr3();
|
||||||
|
symAttr.serialize(xdr_req);
|
||||||
|
xdr_req.writeBoolean(false);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testLookup() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
lookupReq.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testAccess() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testReadlink() throws Exception {
|
||||||
|
// Create a symlink first.
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("fubar");
|
||||||
|
SetAttr3 symAttr = new SetAttr3();
|
||||||
|
symAttr.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("bar");
|
||||||
|
|
||||||
|
SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response.getStatus());
|
||||||
|
|
||||||
|
// Now perform readlink operations.
|
||||||
|
FileHandle handle2 = response.getObjFileHandle();
|
||||||
|
XDR xdr_req2 = new XDR();
|
||||||
|
handle2.serialize(xdr_req2);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testRead() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
|
||||||
|
READ3Request readReq = new READ3Request(handle, 0, 5);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
readReq.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
/* Hits HDFS-6582. It needs to be fixed first.
|
||||||
|
READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testWrite() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
|
||||||
|
byte[] buffer = new byte[10];
|
||||||
|
for (int i = 0; i < 10; i++) {
|
||||||
|
buffer[i] = (byte) i;
|
||||||
|
}
|
||||||
|
|
||||||
|
WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
|
||||||
|
WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
writeReq.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(),
|
||||||
|
null, 1, securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(),
|
||||||
|
null, 1, securityHandler,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect response:", null, response2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testCreate() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("fubar");
|
||||||
|
xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED);
|
||||||
|
SetAttr3 symAttr = new SetAttr3();
|
||||||
|
symAttr.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testMkdir() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("fubar");
|
||||||
|
SetAttr3 symAttr = new SetAttr3();
|
||||||
|
symAttr.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("bar");
|
||||||
|
|
||||||
|
// Attempt to remove by an unpriviledged user should fail.
|
||||||
|
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt to remove by a priviledged user should pass.
|
||||||
|
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testSymlink() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("fubar");
|
||||||
|
SetAttr3 symAttr = new SetAttr3();
|
||||||
|
symAttr.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("bar");
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testRemove() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("bar");
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
REMOVE3Response response2 = nfsd.remove(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testRmdir() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("foo");
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testRename() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("bar");
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeString("fubar");
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testReaddir() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeLongAsHyper(0);
|
||||||
|
xdr_req.writeLongAsHyper(0);
|
||||||
|
xdr_req.writeInt(100);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testReaddirplus() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeLongAsHyper(0);
|
||||||
|
xdr_req.writeLongAsHyper(0);
|
||||||
|
xdr_req.writeInt(3);
|
||||||
|
xdr_req.writeInt(2);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testFsstat() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testFsinfo() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testPathconf() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
|
||||||
|
securityHandler, new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
|
||||||
|
response2.getStatus());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 60000)
|
||||||
|
public void testCommit() throws Exception {
|
||||||
|
HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
|
||||||
|
long dirId = status.getFileId();
|
||||||
|
FileHandle handle = new FileHandle(dirId);
|
||||||
|
XDR xdr_req = new XDR();
|
||||||
|
handle.serialize(xdr_req);
|
||||||
|
xdr_req.writeLongAsHyper(0);
|
||||||
|
xdr_req.writeInt(5);
|
||||||
|
|
||||||
|
Channel ch = Mockito.mock(Channel.class);
|
||||||
|
|
||||||
|
// Attempt by an unpriviledged user should fail.
|
||||||
|
COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(),
|
||||||
|
ch, 1, securityHandlerUnpriviledged,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
|
||||||
|
response1.getStatus());
|
||||||
|
|
||||||
|
// Attempt by a priviledged user should pass.
|
||||||
|
COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(),
|
||||||
|
ch, 1, securityHandler,
|
||||||
|
new InetSocketAddress("localhost", 1234));
|
||||||
|
assertEquals("Incorrect COMMIT3Response:", null, response2);
|
||||||
|
}
|
||||||
|
|
||||||
@Test(timeout=1000)
|
@Test(timeout=1000)
|
||||||
public void testIdempotent() {
|
public void testIdempotent() {
|
||||||
Object[][] procedures = {
|
Object[][] procedures = {
|
||||||
|
|
|
@ -130,6 +130,9 @@ Trunk (Unreleased)
|
||||||
HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
|
HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
|
||||||
directory. (Jing Zhao via wheat9)
|
directory. (Jing Zhao via wheat9)
|
||||||
|
|
||||||
|
HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via
|
||||||
|
Colin Patrick McCabe)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
@ -184,9 +187,6 @@ Trunk (Unreleased)
|
||||||
|
|
||||||
HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
|
HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
|
||||||
|
|
||||||
HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException
|
|
||||||
if option is specified without values. ( Madhukara Phatak via umamahesh)
|
|
||||||
|
|
||||||
HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
|
HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
|
||||||
(acmurthy via eli)
|
(acmurthy via eli)
|
||||||
|
|
||||||
|
@ -332,6 +332,31 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6778. The extended attributes javadoc should simply refer to the
|
HDFS-6778. The extended attributes javadoc should simply refer to the
|
||||||
user docs. (clamb via wang)
|
user docs. (clamb via wang)
|
||||||
|
|
||||||
|
HDFS-6570. add api that enables checking if a user has certain permissions on
|
||||||
|
a file. (Jitendra Pandey via cnauroth)
|
||||||
|
|
||||||
|
HDFS-6441. Add ability to exclude/include specific datanodes while
|
||||||
|
balancing. (Benoy Antony and Yu Li via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-6685. Balancer should preserve storage type of replicas. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-6798. Add test case for incorrect data node condition during
|
||||||
|
balancing. (Benoy Antony via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-6796. Improve the argument check during balancer command line parsing.
|
||||||
|
(Benoy Antony via szetszwo)
|
||||||
|
|
||||||
|
HDFS-6794. Update BlockManager methods to use DatanodeStorageInfo
|
||||||
|
where possible (Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-6802. Some tests in TestDFSClientFailover are missing @Test
|
||||||
|
annotation. (Akira Ajisaka via wang)
|
||||||
|
|
||||||
|
HDFS-6788. Improve synchronization in BPOfferService with read write lock.
|
||||||
|
(Yongjun Zhang via wang)
|
||||||
|
|
||||||
|
HDFS-6787. Remove duplicate code in FSDirectory#unprotectedConcat. (Yi Liu via umamahesh)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||||
|
@ -394,6 +419,27 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-6749. FSNamesystem methods should call resolvePath.
|
HDFS-6749. FSNamesystem methods should call resolvePath.
|
||||||
(Charles Lamb via cnauroth)
|
(Charles Lamb via cnauroth)
|
||||||
|
|
||||||
|
HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in
|
||||||
|
XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA.
|
||||||
|
(Amir Sanjar via stevel)
|
||||||
|
|
||||||
|
HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException
|
||||||
|
if option is specified without values. ( Madhukara Phatak via umamahesh)
|
||||||
|
|
||||||
|
HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Benoy Antony
|
||||||
|
via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-6810. StorageReport array is initialized with wrong size in
|
||||||
|
DatanodeDescriptor#getStorageReports. (szetszwo via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-5723. Append failed FINALIZED replica should not be accepted as valid
|
||||||
|
when that block is underconstruction (vinayakumarb)
|
||||||
|
|
||||||
|
HDFS-5185. DN fails to startup if one of the data dir is full. (vinayakumarb)
|
||||||
|
|
||||||
|
HDFS-6451. NFS should not return NFS3ERR_IO for AccessControlException
|
||||||
|
(Abhiraj Butala via brandonli)
|
||||||
|
|
||||||
Release 2.5.0 - UNRELEASED
|
Release 2.5.0 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -949,6 +995,9 @@ Release 2.5.0 - UNRELEASED
|
||||||
HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
|
HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
|
||||||
(brandonli)
|
(brandonli)
|
||||||
|
|
||||||
|
HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit
|
||||||
|
Agarwal)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
|
||||||
|
|
|
@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<artifactId>netty</artifactId>
|
<artifactId>netty</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>xerces</groupId>
|
||||||
|
<artifactId>xercesImpl</artifactId>
|
||||||
|
<scope>compile</scope>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
<build>
|
<build>
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.crypto.CryptoCodec;
|
import org.apache.hadoop.crypto.CryptoCodec;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
|
import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
|
||||||
|
@ -456,6 +457,11 @@ public class Hdfs extends AbstractFileSystem {
|
||||||
dfs.removeXAttr(getUriPath(path), name);
|
dfs.removeXAttr(getUriPath(path), name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, final FsAction mode) throws IOException {
|
||||||
|
dfs.checkAccess(getUriPath(path), mode);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Renew an existing delegation token.
|
* Renew an existing delegation token.
|
||||||
*
|
*
|
||||||
|
|
|
@ -132,6 +132,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.net.Peer;
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
|
@ -2951,6 +2952,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void checkAccess(String src, FsAction mode) throws IOException {
|
||||||
|
checkOpen();
|
||||||
|
try {
|
||||||
|
namenode.checkAccess(src, mode);
|
||||||
|
} catch (RemoteException re) {
|
||||||
|
throw re.unwrapRemoteException(AccessControlException.class,
|
||||||
|
FileNotFoundException.class,
|
||||||
|
UnresolvedPathException.class);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override // RemotePeerFactory
|
@Override // RemotePeerFactory
|
||||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||||
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
|
||||||
|
|
|
@ -381,8 +381,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
|
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
|
||||||
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
|
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
|
||||||
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
|
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
|
||||||
public static final String DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
|
|
||||||
public static final int DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
|
|
||||||
public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
|
public static final String DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
|
||||||
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
|
public static final int DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
|
||||||
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
|
public static final String DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
|
||||||
|
@ -668,4 +666,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
|
public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
|
||||||
"dfs.datanode.slow.io.warning.threshold.ms";
|
"dfs.datanode.slow.io.warning.threshold.ms";
|
||||||
public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300;
|
public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300;
|
||||||
|
|
||||||
|
public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY =
|
||||||
|
"dfs.datanode.block.id.layout.upgrade.threads";
|
||||||
|
public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12;
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.fs.VolumeId;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||||
|
@ -1913,4 +1914,23 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
}.resolve(this, absF);
|
}.resolve(this, absF);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(Path path, final FsAction mode) throws IOException {
|
||||||
|
final Path absF = fixRelativePart(path);
|
||||||
|
new FileSystemLinkResolver<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void doCall(final Path p) throws IOException {
|
||||||
|
dfs.checkAccess(getPathName(p), mode);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Void next(final FileSystem fs, final Path p)
|
||||||
|
throws IOException {
|
||||||
|
fs.access(p, mode);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}.resolve(this, absF);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,6 +18,9 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
@ -32,4 +35,11 @@ public enum StorageType {
|
||||||
SSD;
|
SSD;
|
||||||
|
|
||||||
public static final StorageType DEFAULT = DISK;
|
public static final StorageType DEFAULT = DISK;
|
||||||
|
public static final StorageType[] EMPTY_ARRAY = {};
|
||||||
|
|
||||||
|
private static final StorageType[] VALUES = values();
|
||||||
|
|
||||||
|
public static List<StorageType> asList() {
|
||||||
|
return Arrays.asList(VALUES);
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -50,6 +50,9 @@ public class Block implements Writable, Comparable<Block> {
|
||||||
public static final Pattern metaFilePattern = Pattern
|
public static final Pattern metaFilePattern = Pattern
|
||||||
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
|
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
|
||||||
+ "$");
|
+ "$");
|
||||||
|
public static final Pattern metaOrBlockFilePattern = Pattern
|
||||||
|
.compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION
|
||||||
|
+ ")?$");
|
||||||
|
|
||||||
public static boolean isBlockFilename(File f) {
|
public static boolean isBlockFilename(File f) {
|
||||||
String name = f.getName();
|
String name = f.getName();
|
||||||
|
@ -65,6 +68,11 @@ public class Block implements Writable, Comparable<Block> {
|
||||||
return metaFilePattern.matcher(name).matches();
|
return metaFilePattern.matcher(name).matches();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static File metaToBlockFile(File metaFile) {
|
||||||
|
return new File(metaFile.getParent(), metaFile.getName().substring(
|
||||||
|
0, metaFile.getName().lastIndexOf('_')));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get generation stamp from the name of the metafile name
|
* Get generation stamp from the name of the metafile name
|
||||||
*/
|
*/
|
||||||
|
@ -75,10 +83,10 @@ public class Block implements Writable, Comparable<Block> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the blockId from the name of the metafile name
|
* Get the blockId from the name of the meta or block file
|
||||||
*/
|
*/
|
||||||
public static long getBlockId(String metaFile) {
|
public static long getBlockId(String metaOrBlockFile) {
|
||||||
Matcher m = metaFilePattern.matcher(metaFile);
|
Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
|
||||||
return m.matches() ? Long.parseLong(m.group(1)) : 0;
|
return m.matches() ? Long.parseLong(m.group(1)) : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||||
|
@ -1346,4 +1347,22 @@ public interface ClientProtocol {
|
||||||
*/
|
*/
|
||||||
@AtMostOnce
|
@AtMostOnce
|
||||||
public void removeXAttr(String src, XAttr xAttr) throws IOException;
|
public void removeXAttr(String src, XAttr xAttr) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks if the user can access a path. The mode specifies which access
|
||||||
|
* checks to perform. If the requested permissions are granted, then the
|
||||||
|
* method returns normally. If access is denied, then the method throws an
|
||||||
|
* {@link AccessControlException}.
|
||||||
|
* In general, applications should avoid using this method, due to the risk of
|
||||||
|
* time-of-check/time-of-use race conditions. The permissions on a file may
|
||||||
|
* change immediately after the access call returns.
|
||||||
|
*
|
||||||
|
* @param path Path to check
|
||||||
|
* @param mode type of access to check
|
||||||
|
* @throws AccessControlException if access is denied
|
||||||
|
* @throws FileNotFoundException if the path does not exist
|
||||||
|
* @throws IOException see specific implementation
|
||||||
|
*/
|
||||||
|
@Idempotent
|
||||||
|
public void checkAccess(String path, FsAction mode) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
|
@ -175,6 +175,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
|
||||||
|
@ -325,6 +327,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
private static final RemoveXAttrResponseProto
|
private static final RemoveXAttrResponseProto
|
||||||
VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
|
VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
|
||||||
|
|
||||||
|
private static final CheckAccessResponseProto
|
||||||
|
VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructor
|
* Constructor
|
||||||
*
|
*
|
||||||
|
@ -1375,4 +1380,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||||
}
|
}
|
||||||
return VOID_REMOVEXATTR_RESPONSE;
|
return VOID_REMOVEXATTR_RESPONSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CheckAccessResponseProto checkAccess(RpcController controller,
|
||||||
|
CheckAccessRequestProto req) throws ServiceException {
|
||||||
|
try {
|
||||||
|
server.checkAccess(req.getPath(), PBHelper.convert(req.getMode()));
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
return VOID_CHECKACCESS_RESPONSE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
|
||||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||||
|
@ -147,6 +148,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
||||||
|
@ -1400,4 +1402,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||||
throw ProtobufHelper.getRemoteException(e);
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void checkAccess(String path, FsAction mode) throws IOException {
|
||||||
|
CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
|
||||||
|
.setPath(path).setMode(PBHelper.convert(mode)).build();
|
||||||
|
try {
|
||||||
|
rpcProxy.checkAccess(null, req);
|
||||||
|
} catch (ServiceException e) {
|
||||||
|
throw ProtobufHelper.getRemoteException(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -357,15 +357,19 @@ public class PBHelper {
|
||||||
return BlockWithLocationsProto.newBuilder()
|
return BlockWithLocationsProto.newBuilder()
|
||||||
.setBlock(convert(blk.getBlock()))
|
.setBlock(convert(blk.getBlock()))
|
||||||
.addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
|
.addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
|
||||||
.addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build();
|
.addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
|
||||||
|
.addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
|
||||||
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BlockWithLocations convert(BlockWithLocationsProto b) {
|
public static BlockWithLocations convert(BlockWithLocationsProto b) {
|
||||||
final List<String> datanodeUuids = b.getDatanodeUuidsList();
|
final List<String> datanodeUuids = b.getDatanodeUuidsList();
|
||||||
final List<String> storageUuids = b.getStorageUuidsList();
|
final List<String> storageUuids = b.getStorageUuidsList();
|
||||||
|
final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
|
||||||
return new BlockWithLocations(convert(b.getBlock()),
|
return new BlockWithLocations(convert(b.getBlock()),
|
||||||
datanodeUuids.toArray(new String[datanodeUuids.size()]),
|
datanodeUuids.toArray(new String[datanodeUuids.size()]),
|
||||||
storageUuids.toArray(new String[storageUuids.size()]));
|
storageUuids.toArray(new String[storageUuids.size()]),
|
||||||
|
convertStorageTypes(storageTypes, storageUuids.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
|
public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
|
||||||
|
@ -2122,11 +2126,11 @@ public class PBHelper {
|
||||||
return castEnum(v, XATTR_NAMESPACE_VALUES);
|
return castEnum(v, XATTR_NAMESPACE_VALUES);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static FsActionProto convert(FsAction v) {
|
public static FsActionProto convert(FsAction v) {
|
||||||
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
|
return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static FsAction convert(FsActionProto v) {
|
public static FsAction convert(FsActionProto v) {
|
||||||
return castEnum(v, FSACTION_VALUES);
|
return castEnum(v, FSACTION_VALUES);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,7 +18,11 @@
|
||||||
package org.apache.hadoop.hdfs.server.balancer;
|
package org.apache.hadoop.hdfs.server.balancer;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.StorageType;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
|
import org.apache.hadoop.hdfs.util.EnumCounters;
|
||||||
|
import org.apache.hadoop.hdfs.util.EnumDoubles;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Balancing policy.
|
* Balancing policy.
|
||||||
|
@ -28,31 +32,43 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
abstract class BalancingPolicy {
|
abstract class BalancingPolicy {
|
||||||
long totalCapacity;
|
final EnumCounters<StorageType> totalCapacities
|
||||||
long totalUsedSpace;
|
= new EnumCounters<StorageType>(StorageType.class);
|
||||||
private double avgUtilization;
|
final EnumCounters<StorageType> totalUsedSpaces
|
||||||
|
= new EnumCounters<StorageType>(StorageType.class);
|
||||||
|
final EnumDoubles<StorageType> avgUtilizations
|
||||||
|
= new EnumDoubles<StorageType>(StorageType.class);
|
||||||
|
|
||||||
void reset() {
|
void reset() {
|
||||||
totalCapacity = 0L;
|
totalCapacities.reset();
|
||||||
totalUsedSpace = 0L;
|
totalUsedSpaces.reset();
|
||||||
avgUtilization = 0.0;
|
avgUtilizations.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Get the policy name. */
|
/** Get the policy name. */
|
||||||
abstract String getName();
|
abstract String getName();
|
||||||
|
|
||||||
/** Accumulate used space and capacity. */
|
/** Accumulate used space and capacity. */
|
||||||
abstract void accumulateSpaces(DatanodeInfo d);
|
abstract void accumulateSpaces(DatanodeStorageReport r);
|
||||||
|
|
||||||
void initAvgUtilization() {
|
void initAvgUtilization() {
|
||||||
this.avgUtilization = totalUsedSpace*100.0/totalCapacity;
|
for(StorageType t : StorageType.asList()) {
|
||||||
}
|
final long capacity = totalCapacities.get(t);
|
||||||
double getAvgUtilization() {
|
if (capacity > 0L) {
|
||||||
return avgUtilization;
|
final double avg = totalUsedSpaces.get(t)*100.0/capacity;
|
||||||
|
avgUtilizations.set(t, avg);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Return the utilization of a datanode */
|
double getAvgUtilization(StorageType t) {
|
||||||
abstract double getUtilization(DatanodeInfo d);
|
return avgUtilizations.get(t);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @return the utilization of a particular storage type of a datanode;
|
||||||
|
* or return null if the datanode does not have such storage type.
|
||||||
|
*/
|
||||||
|
abstract Double getUtilization(DatanodeStorageReport r, StorageType t);
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
@ -84,14 +100,25 @@ abstract class BalancingPolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void accumulateSpaces(DatanodeInfo d) {
|
void accumulateSpaces(DatanodeStorageReport r) {
|
||||||
totalCapacity += d.getCapacity();
|
for(StorageReport s : r.getStorageReports()) {
|
||||||
totalUsedSpace += d.getDfsUsed();
|
final StorageType t = s.getStorage().getStorageType();
|
||||||
|
totalCapacities.add(t, s.getCapacity());
|
||||||
|
totalUsedSpaces.add(t, s.getDfsUsed());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
double getUtilization(DatanodeInfo d) {
|
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
|
||||||
return d.getDfsUsed()*100.0/d.getCapacity();
|
long capacity = 0L;
|
||||||
|
long dfsUsed = 0L;
|
||||||
|
for(StorageReport s : r.getStorageReports()) {
|
||||||
|
if (s.getStorage().getStorageType() == t) {
|
||||||
|
capacity += s.getCapacity();
|
||||||
|
dfsUsed += s.getDfsUsed();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return capacity == 0L? null: dfsUsed*100.0/capacity;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -108,14 +135,25 @@ abstract class BalancingPolicy {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
void accumulateSpaces(DatanodeInfo d) {
|
void accumulateSpaces(DatanodeStorageReport r) {
|
||||||
totalCapacity += d.getCapacity();
|
for(StorageReport s : r.getStorageReports()) {
|
||||||
totalUsedSpace += d.getBlockPoolUsed();
|
final StorageType t = s.getStorage().getStorageType();
|
||||||
|
totalCapacities.add(t, s.getCapacity());
|
||||||
|
totalUsedSpaces.add(t, s.getBlockPoolUsed());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
double getUtilization(DatanodeInfo d) {
|
Double getUtilization(DatanodeStorageReport r, final StorageType t) {
|
||||||
return d.getBlockPoolUsed()*100.0/d.getCapacity();
|
long capacity = 0L;
|
||||||
|
long blockPoolUsed = 0L;
|
||||||
|
for(StorageReport s : r.getStorageReports()) {
|
||||||
|
if (s.getStorage().getStorageType() == t) {
|
||||||
|
capacity += s.getCapacity();
|
||||||
|
blockPoolUsed += s.getBlockPoolUsed();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return capacity == 0L? null: blockPoolUsed*100.0/capacity;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1082,6 +1082,7 @@ public class BlockManager {
|
||||||
* Mark the block belonging to datanode as corrupt
|
* Mark the block belonging to datanode as corrupt
|
||||||
* @param blk Block to be marked as corrupt
|
* @param blk Block to be marked as corrupt
|
||||||
* @param dn Datanode which holds the corrupt replica
|
* @param dn Datanode which holds the corrupt replica
|
||||||
|
* @param storageID if known, null otherwise.
|
||||||
* @param reason a textual reason why the block should be marked corrupt,
|
* @param reason a textual reason why the block should be marked corrupt,
|
||||||
* for logging purposes
|
* for logging purposes
|
||||||
*/
|
*/
|
||||||
|
@ -1098,20 +1099,30 @@ public class BlockManager {
|
||||||
+ blk + " not found");
|
+ blk + " not found");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
|
|
||||||
blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
|
|
||||||
dn, storageID);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void markBlockAsCorrupt(BlockToMarkCorrupt b,
|
|
||||||
DatanodeInfo dn, String storageID) throws IOException {
|
|
||||||
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
|
DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
|
||||||
if (node == null) {
|
if (node == null) {
|
||||||
throw new IOException("Cannot mark " + b
|
throw new IOException("Cannot mark " + blk
|
||||||
+ " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
|
+ " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
|
||||||
+ ") does not exist");
|
+ ") does not exist");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
|
||||||
|
blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
|
||||||
|
storageID == null ? null : node.getStorageInfo(storageID),
|
||||||
|
node);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param b
|
||||||
|
* @param storageInfo storage that contains the block, if known. null otherwise.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
private void markBlockAsCorrupt(BlockToMarkCorrupt b,
|
||||||
|
DatanodeStorageInfo storageInfo,
|
||||||
|
DatanodeDescriptor node) throws IOException {
|
||||||
|
|
||||||
BlockCollection bc = b.corrupted.getBlockCollection();
|
BlockCollection bc = b.corrupted.getBlockCollection();
|
||||||
if (bc == null) {
|
if (bc == null) {
|
||||||
blockLog.info("BLOCK markBlockAsCorrupt: " + b
|
blockLog.info("BLOCK markBlockAsCorrupt: " + b
|
||||||
|
@ -1121,7 +1132,9 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add replica to the data-node if it is not already there
|
// Add replica to the data-node if it is not already there
|
||||||
node.addBlock(storageID, b.stored);
|
if (storageInfo != null) {
|
||||||
|
storageInfo.addBlock(b.stored);
|
||||||
|
}
|
||||||
|
|
||||||
// Add this replica to corruptReplicas Map
|
// Add this replica to corruptReplicas Map
|
||||||
corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
|
corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
|
||||||
|
@ -1460,7 +1473,7 @@ public class BlockManager {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* if the number of targets < minimum replication.
|
* if the number of targets < minimum replication.
|
||||||
* @see BlockPlacementPolicy#chooseTarget(String, int, Node,
|
* @see BlockPlacementPolicy#chooseTarget(String, int, Node,
|
||||||
* List, boolean, Set, long)
|
* List, boolean, Set, long, StorageType)
|
||||||
*/
|
*/
|
||||||
public DatanodeStorageInfo[] chooseTarget(final String src,
|
public DatanodeStorageInfo[] chooseTarget(final String src,
|
||||||
final int numOfReplicas, final DatanodeDescriptor client,
|
final int numOfReplicas, final DatanodeDescriptor client,
|
||||||
|
@ -1697,7 +1710,7 @@ public class BlockManager {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public boolean processReport(final DatanodeID nodeID,
|
public boolean processReport(final DatanodeID nodeID,
|
||||||
final DatanodeStorage storage, final String poolId,
|
final DatanodeStorage storage,
|
||||||
final BlockListAsLongs newReport) throws IOException {
|
final BlockListAsLongs newReport) throws IOException {
|
||||||
namesystem.writeLock();
|
namesystem.writeLock();
|
||||||
final long startTime = Time.now(); //after acquiring write lock
|
final long startTime = Time.now(); //after acquiring write lock
|
||||||
|
@ -1729,9 +1742,9 @@ public class BlockManager {
|
||||||
if (storageInfo.numBlocks() == 0) {
|
if (storageInfo.numBlocks() == 0) {
|
||||||
// The first block report can be processed a lot more efficiently than
|
// The first block report can be processed a lot more efficiently than
|
||||||
// ordinary block reports. This shortens restart times.
|
// ordinary block reports. This shortens restart times.
|
||||||
processFirstBlockReport(node, storage.getStorageID(), newReport);
|
processFirstBlockReport(storageInfo, newReport);
|
||||||
} else {
|
} else {
|
||||||
processReport(node, storage, newReport);
|
processReport(storageInfo, newReport);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that we have an up-to-date block report, we know that any
|
// Now that we have an up-to-date block report, we know that any
|
||||||
|
@ -1793,9 +1806,8 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void processReport(final DatanodeDescriptor node,
|
private void processReport(final DatanodeStorageInfo storageInfo,
|
||||||
final DatanodeStorage storage,
|
final BlockListAsLongs report) throws IOException {
|
||||||
final BlockListAsLongs report) throws IOException {
|
|
||||||
// Normal case:
|
// Normal case:
|
||||||
// Modify the (block-->datanode) map, according to the difference
|
// Modify the (block-->datanode) map, according to the difference
|
||||||
// between the old and new block report.
|
// between the old and new block report.
|
||||||
|
@ -1805,19 +1817,20 @@ public class BlockManager {
|
||||||
Collection<Block> toInvalidate = new LinkedList<Block>();
|
Collection<Block> toInvalidate = new LinkedList<Block>();
|
||||||
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
||||||
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
|
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
|
||||||
reportDiff(node, storage, report,
|
reportDiff(storageInfo, report,
|
||||||
toAdd, toRemove, toInvalidate, toCorrupt, toUC);
|
toAdd, toRemove, toInvalidate, toCorrupt, toUC);
|
||||||
|
|
||||||
|
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||||
// Process the blocks on each queue
|
// Process the blocks on each queue
|
||||||
for (StatefulBlockInfo b : toUC) {
|
for (StatefulBlockInfo b : toUC) {
|
||||||
addStoredBlockUnderConstruction(b, node, storage.getStorageID());
|
addStoredBlockUnderConstruction(b, storageInfo);
|
||||||
}
|
}
|
||||||
for (Block b : toRemove) {
|
for (Block b : toRemove) {
|
||||||
removeStoredBlock(b, node);
|
removeStoredBlock(b, node);
|
||||||
}
|
}
|
||||||
int numBlocksLogged = 0;
|
int numBlocksLogged = 0;
|
||||||
for (BlockInfo b : toAdd) {
|
for (BlockInfo b : toAdd) {
|
||||||
addStoredBlock(b, node, storage.getStorageID(), null, numBlocksLogged < maxNumBlocksToLog);
|
addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog);
|
||||||
numBlocksLogged++;
|
numBlocksLogged++;
|
||||||
}
|
}
|
||||||
if (numBlocksLogged > maxNumBlocksToLog) {
|
if (numBlocksLogged > maxNumBlocksToLog) {
|
||||||
|
@ -1831,7 +1844,7 @@ public class BlockManager {
|
||||||
addToInvalidates(b, node);
|
addToInvalidates(b, node);
|
||||||
}
|
}
|
||||||
for (BlockToMarkCorrupt b : toCorrupt) {
|
for (BlockToMarkCorrupt b : toCorrupt) {
|
||||||
markBlockAsCorrupt(b, node, storage.getStorageID());
|
markBlockAsCorrupt(b, storageInfo, node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1842,16 +1855,16 @@ public class BlockManager {
|
||||||
* a toRemove list (since there won't be any). It also silently discards
|
* a toRemove list (since there won't be any). It also silently discards
|
||||||
* any invalid blocks, thereby deferring their processing until
|
* any invalid blocks, thereby deferring their processing until
|
||||||
* the next block report.
|
* the next block report.
|
||||||
* @param node - DatanodeDescriptor of the node that sent the report
|
* @param storageInfo - DatanodeStorageInfo that sent the report
|
||||||
* @param report - the initial block report, to be processed
|
* @param report - the initial block report, to be processed
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void processFirstBlockReport(final DatanodeDescriptor node,
|
private void processFirstBlockReport(
|
||||||
final String storageID,
|
final DatanodeStorageInfo storageInfo,
|
||||||
final BlockListAsLongs report) throws IOException {
|
final BlockListAsLongs report) throws IOException {
|
||||||
if (report == null) return;
|
if (report == null) return;
|
||||||
assert (namesystem.hasWriteLock());
|
assert (namesystem.hasWriteLock());
|
||||||
assert (node.getStorageInfo(storageID).numBlocks() == 0);
|
assert (storageInfo.numBlocks() == 0);
|
||||||
BlockReportIterator itBR = report.getBlockReportIterator();
|
BlockReportIterator itBR = report.getBlockReportIterator();
|
||||||
|
|
||||||
while(itBR.hasNext()) {
|
while(itBR.hasNext()) {
|
||||||
|
@ -1860,7 +1873,7 @@ public class BlockManager {
|
||||||
|
|
||||||
if (shouldPostponeBlocksFromFuture &&
|
if (shouldPostponeBlocksFromFuture &&
|
||||||
namesystem.isGenStampInFuture(iblk)) {
|
namesystem.isGenStampInFuture(iblk)) {
|
||||||
queueReportedBlock(node, storageID, iblk, reportedState,
|
queueReportedBlock(storageInfo, iblk, reportedState,
|
||||||
QUEUE_REASON_FUTURE_GENSTAMP);
|
QUEUE_REASON_FUTURE_GENSTAMP);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1872,15 +1885,16 @@ public class BlockManager {
|
||||||
// If block is corrupt, mark it and continue to next block.
|
// If block is corrupt, mark it and continue to next block.
|
||||||
BlockUCState ucState = storedBlock.getBlockUCState();
|
BlockUCState ucState = storedBlock.getBlockUCState();
|
||||||
BlockToMarkCorrupt c = checkReplicaCorrupt(
|
BlockToMarkCorrupt c = checkReplicaCorrupt(
|
||||||
iblk, reportedState, storedBlock, ucState, node);
|
iblk, reportedState, storedBlock, ucState,
|
||||||
|
storageInfo.getDatanodeDescriptor());
|
||||||
if (c != null) {
|
if (c != null) {
|
||||||
if (shouldPostponeBlocksFromFuture) {
|
if (shouldPostponeBlocksFromFuture) {
|
||||||
// In the Standby, we may receive a block report for a file that we
|
// In the Standby, we may receive a block report for a file that we
|
||||||
// just have an out-of-date gen-stamp or state for, for example.
|
// just have an out-of-date gen-stamp or state for, for example.
|
||||||
queueReportedBlock(node, storageID, iblk, reportedState,
|
queueReportedBlock(storageInfo, iblk, reportedState,
|
||||||
QUEUE_REASON_CORRUPT_STATE);
|
QUEUE_REASON_CORRUPT_STATE);
|
||||||
} else {
|
} else {
|
||||||
markBlockAsCorrupt(c, node, storageID);
|
markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor());
|
||||||
}
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -1888,7 +1902,7 @@ public class BlockManager {
|
||||||
// If block is under construction, add this replica to its list
|
// If block is under construction, add this replica to its list
|
||||||
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
|
if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
|
||||||
((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent(
|
((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent(
|
||||||
node.getStorageInfo(storageID), iblk, reportedState);
|
storageInfo, iblk, reportedState);
|
||||||
// OpenFileBlocks only inside snapshots also will be added to safemode
|
// OpenFileBlocks only inside snapshots also will be added to safemode
|
||||||
// threshold. So we need to update such blocks to safemode
|
// threshold. So we need to update such blocks to safemode
|
||||||
// refer HDFS-5283
|
// refer HDFS-5283
|
||||||
|
@ -1901,12 +1915,12 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
//add replica if appropriate
|
//add replica if appropriate
|
||||||
if (reportedState == ReplicaState.FINALIZED) {
|
if (reportedState == ReplicaState.FINALIZED) {
|
||||||
addStoredBlockImmediate(storedBlock, node, storageID);
|
addStoredBlockImmediate(storedBlock, storageInfo);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage,
|
private void reportDiff(DatanodeStorageInfo storageInfo,
|
||||||
BlockListAsLongs newReport,
|
BlockListAsLongs newReport,
|
||||||
Collection<BlockInfo> toAdd, // add to DatanodeDescriptor
|
Collection<BlockInfo> toAdd, // add to DatanodeDescriptor
|
||||||
Collection<Block> toRemove, // remove from DatanodeDescriptor
|
Collection<Block> toRemove, // remove from DatanodeDescriptor
|
||||||
|
@ -1914,8 +1928,6 @@ public class BlockManager {
|
||||||
Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
|
Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
|
||||||
Collection<StatefulBlockInfo> toUC) { // add to under-construction list
|
Collection<StatefulBlockInfo> toUC) { // add to under-construction list
|
||||||
|
|
||||||
final DatanodeStorageInfo storageInfo = dn.getStorageInfo(storage.getStorageID());
|
|
||||||
|
|
||||||
// place a delimiter in the list which separates blocks
|
// place a delimiter in the list which separates blocks
|
||||||
// that have been reported from those that have not
|
// that have been reported from those that have not
|
||||||
BlockInfo delimiter = new BlockInfo(new Block(), 1);
|
BlockInfo delimiter = new BlockInfo(new Block(), 1);
|
||||||
|
@ -1932,7 +1944,7 @@ public class BlockManager {
|
||||||
while(itBR.hasNext()) {
|
while(itBR.hasNext()) {
|
||||||
Block iblk = itBR.next();
|
Block iblk = itBR.next();
|
||||||
ReplicaState iState = itBR.getCurrentReplicaState();
|
ReplicaState iState = itBR.getCurrentReplicaState();
|
||||||
BlockInfo storedBlock = processReportedBlock(dn, storage.getStorageID(),
|
BlockInfo storedBlock = processReportedBlock(storageInfo,
|
||||||
iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
|
iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
|
||||||
|
|
||||||
// move block to the head of the list
|
// move block to the head of the list
|
||||||
|
@ -1969,7 +1981,7 @@ public class BlockManager {
|
||||||
* BlockInfoUnderConstruction's list of replicas.</li>
|
* BlockInfoUnderConstruction's list of replicas.</li>
|
||||||
* </ol>
|
* </ol>
|
||||||
*
|
*
|
||||||
* @param dn descriptor for the datanode that made the report
|
* @param storageInfo DatanodeStorageInfo that sent the report.
|
||||||
* @param block reported block replica
|
* @param block reported block replica
|
||||||
* @param reportedState reported replica state
|
* @param reportedState reported replica state
|
||||||
* @param toAdd add to DatanodeDescriptor
|
* @param toAdd add to DatanodeDescriptor
|
||||||
|
@ -1981,14 +1993,16 @@ public class BlockManager {
|
||||||
* @return the up-to-date stored block, if it should be kept.
|
* @return the up-to-date stored block, if it should be kept.
|
||||||
* Otherwise, null.
|
* Otherwise, null.
|
||||||
*/
|
*/
|
||||||
private BlockInfo processReportedBlock(final DatanodeDescriptor dn,
|
private BlockInfo processReportedBlock(
|
||||||
final String storageID,
|
final DatanodeStorageInfo storageInfo,
|
||||||
final Block block, final ReplicaState reportedState,
|
final Block block, final ReplicaState reportedState,
|
||||||
final Collection<BlockInfo> toAdd,
|
final Collection<BlockInfo> toAdd,
|
||||||
final Collection<Block> toInvalidate,
|
final Collection<Block> toInvalidate,
|
||||||
final Collection<BlockToMarkCorrupt> toCorrupt,
|
final Collection<BlockToMarkCorrupt> toCorrupt,
|
||||||
final Collection<StatefulBlockInfo> toUC) {
|
final Collection<StatefulBlockInfo> toUC) {
|
||||||
|
|
||||||
|
DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
|
||||||
|
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Reported block " + block
|
LOG.debug("Reported block " + block
|
||||||
+ " on " + dn + " size " + block.getNumBytes()
|
+ " on " + dn + " size " + block.getNumBytes()
|
||||||
|
@ -1997,7 +2011,7 @@ public class BlockManager {
|
||||||
|
|
||||||
if (shouldPostponeBlocksFromFuture &&
|
if (shouldPostponeBlocksFromFuture &&
|
||||||
namesystem.isGenStampInFuture(block)) {
|
namesystem.isGenStampInFuture(block)) {
|
||||||
queueReportedBlock(dn, storageID, block, reportedState,
|
queueReportedBlock(storageInfo, block, reportedState,
|
||||||
QUEUE_REASON_FUTURE_GENSTAMP);
|
QUEUE_REASON_FUTURE_GENSTAMP);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -2037,7 +2051,7 @@ public class BlockManager {
|
||||||
// TODO: Pretty confident this should be s/storedBlock/block below,
|
// TODO: Pretty confident this should be s/storedBlock/block below,
|
||||||
// since we should be postponing the info of the reported block, not
|
// since we should be postponing the info of the reported block, not
|
||||||
// the stored block. See HDFS-6289 for more context.
|
// the stored block. See HDFS-6289 for more context.
|
||||||
queueReportedBlock(dn, storageID, storedBlock, reportedState,
|
queueReportedBlock(storageInfo, storedBlock, reportedState,
|
||||||
QUEUE_REASON_CORRUPT_STATE);
|
QUEUE_REASON_CORRUPT_STATE);
|
||||||
} else {
|
} else {
|
||||||
toCorrupt.add(c);
|
toCorrupt.add(c);
|
||||||
|
@ -2066,17 +2080,17 @@ public class BlockManager {
|
||||||
* standby node. @see PendingDataNodeMessages.
|
* standby node. @see PendingDataNodeMessages.
|
||||||
* @param reason a textual reason to report in the debug logs
|
* @param reason a textual reason to report in the debug logs
|
||||||
*/
|
*/
|
||||||
private void queueReportedBlock(DatanodeDescriptor dn, String storageID, Block block,
|
private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
|
||||||
ReplicaState reportedState, String reason) {
|
ReplicaState reportedState, String reason) {
|
||||||
assert shouldPostponeBlocksFromFuture;
|
assert shouldPostponeBlocksFromFuture;
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Queueing reported block " + block +
|
LOG.debug("Queueing reported block " + block +
|
||||||
" in state " + reportedState +
|
" in state " + reportedState +
|
||||||
" from datanode " + dn + " for later processing " +
|
" from datanode " + storageInfo.getDatanodeDescriptor() +
|
||||||
"because " + reason + ".");
|
" for later processing because " + reason + ".");
|
||||||
}
|
}
|
||||||
pendingDNMessages.enqueueReportedBlock(dn, storageID, block, reportedState);
|
pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2099,7 +2113,7 @@ public class BlockManager {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Processing previouly queued message " + rbi);
|
LOG.debug("Processing previouly queued message " + rbi);
|
||||||
}
|
}
|
||||||
processAndHandleReportedBlock(rbi.getNode(), rbi.getStorageID(),
|
processAndHandleReportedBlock(rbi.getStorageInfo(),
|
||||||
rbi.getBlock(), rbi.getReportedState(), null);
|
rbi.getBlock(), rbi.getReportedState(), null);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2156,6 +2170,16 @@ public class BlockManager {
|
||||||
} else {
|
} else {
|
||||||
return null; // not corrupt
|
return null; // not corrupt
|
||||||
}
|
}
|
||||||
|
case UNDER_CONSTRUCTION:
|
||||||
|
if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) {
|
||||||
|
final long reportedGS = reported.getGenerationStamp();
|
||||||
|
return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is "
|
||||||
|
+ ucState + " and reported state " + reportedState
|
||||||
|
+ ", But reported genstamp " + reportedGS
|
||||||
|
+ " does not match genstamp in block map "
|
||||||
|
+ storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
default:
|
default:
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -2219,19 +2243,20 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
|
void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
|
||||||
DatanodeDescriptor node, String storageID) throws IOException {
|
DatanodeStorageInfo storageInfo) throws IOException {
|
||||||
BlockInfoUnderConstruction block = ucBlock.storedBlock;
|
BlockInfoUnderConstruction block = ucBlock.storedBlock;
|
||||||
block.addReplicaIfNotPresent(node.getStorageInfo(storageID),
|
block.addReplicaIfNotPresent(
|
||||||
ucBlock.reportedBlock, ucBlock.reportedState);
|
storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
|
||||||
|
|
||||||
if (ucBlock.reportedState == ReplicaState.FINALIZED && block.findDatanode(node) < 0) {
|
if (ucBlock.reportedState == ReplicaState.FINALIZED &&
|
||||||
addStoredBlock(block, node, storageID, null, true);
|
block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) {
|
||||||
|
addStoredBlock(block, storageInfo, null, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Faster version of
|
* Faster version of
|
||||||
* {@link #addStoredBlock(BlockInfo, DatanodeDescriptor, String, DatanodeDescriptor, boolean)}
|
* {@link #addStoredBlock(BlockInfo, DatanodeStorageInfo, DatanodeDescriptor, boolean)}
|
||||||
* , intended for use with initial block report at startup. If not in startup
|
* , intended for use with initial block report at startup. If not in startup
|
||||||
* safe mode, will call standard addStoredBlock(). Assumes this method is
|
* safe mode, will call standard addStoredBlock(). Assumes this method is
|
||||||
* called "immediately" so there is no need to refresh the storedBlock from
|
* called "immediately" so there is no need to refresh the storedBlock from
|
||||||
|
@ -2242,17 +2267,17 @@ public class BlockManager {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void addStoredBlockImmediate(BlockInfo storedBlock,
|
private void addStoredBlockImmediate(BlockInfo storedBlock,
|
||||||
DatanodeDescriptor node, String storageID)
|
DatanodeStorageInfo storageInfo)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert (storedBlock != null && namesystem.hasWriteLock());
|
assert (storedBlock != null && namesystem.hasWriteLock());
|
||||||
if (!namesystem.isInStartupSafeMode()
|
if (!namesystem.isInStartupSafeMode()
|
||||||
|| namesystem.isPopulatingReplQueues()) {
|
|| namesystem.isPopulatingReplQueues()) {
|
||||||
addStoredBlock(storedBlock, node, storageID, null, false);
|
addStoredBlock(storedBlock, storageInfo, null, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// just add it
|
// just add it
|
||||||
node.addBlock(storageID, storedBlock);
|
storageInfo.addBlock(storedBlock);
|
||||||
|
|
||||||
// Now check for completion of blocks and safe block count
|
// Now check for completion of blocks and safe block count
|
||||||
int numCurrentReplica = countLiveNodes(storedBlock);
|
int numCurrentReplica = countLiveNodes(storedBlock);
|
||||||
|
@ -2274,13 +2299,13 @@ public class BlockManager {
|
||||||
* @return the block that is stored in blockMap.
|
* @return the block that is stored in blockMap.
|
||||||
*/
|
*/
|
||||||
private Block addStoredBlock(final BlockInfo block,
|
private Block addStoredBlock(final BlockInfo block,
|
||||||
DatanodeDescriptor node,
|
DatanodeStorageInfo storageInfo,
|
||||||
String storageID,
|
|
||||||
DatanodeDescriptor delNodeHint,
|
DatanodeDescriptor delNodeHint,
|
||||||
boolean logEveryBlock)
|
boolean logEveryBlock)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert block != null && namesystem.hasWriteLock();
|
assert block != null && namesystem.hasWriteLock();
|
||||||
BlockInfo storedBlock;
|
BlockInfo storedBlock;
|
||||||
|
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||||
if (block instanceof BlockInfoUnderConstruction) {
|
if (block instanceof BlockInfoUnderConstruction) {
|
||||||
//refresh our copy in case the block got completed in another thread
|
//refresh our copy in case the block got completed in another thread
|
||||||
storedBlock = blocksMap.getStoredBlock(block);
|
storedBlock = blocksMap.getStoredBlock(block);
|
||||||
|
@ -2300,7 +2325,7 @@ public class BlockManager {
|
||||||
assert bc != null : "Block must belong to a file";
|
assert bc != null : "Block must belong to a file";
|
||||||
|
|
||||||
// add block to the datanode
|
// add block to the datanode
|
||||||
boolean added = node.addBlock(storageID, storedBlock);
|
boolean added = storageInfo.addBlock(storedBlock);
|
||||||
|
|
||||||
int curReplicaDelta;
|
int curReplicaDelta;
|
||||||
if (added) {
|
if (added) {
|
||||||
|
@ -2829,12 +2854,15 @@ public class BlockManager {
|
||||||
} else {
|
} else {
|
||||||
final String[] datanodeUuids = new String[locations.size()];
|
final String[] datanodeUuids = new String[locations.size()];
|
||||||
final String[] storageIDs = new String[datanodeUuids.length];
|
final String[] storageIDs = new String[datanodeUuids.length];
|
||||||
|
final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
|
||||||
for(int i = 0; i < locations.size(); i++) {
|
for(int i = 0; i < locations.size(); i++) {
|
||||||
final DatanodeStorageInfo s = locations.get(i);
|
final DatanodeStorageInfo s = locations.get(i);
|
||||||
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
|
datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
|
||||||
storageIDs[i] = s.getStorageID();
|
storageIDs[i] = s.getStorageID();
|
||||||
|
storageTypes[i] = s.getStorageType();
|
||||||
}
|
}
|
||||||
results.add(new BlockWithLocations(block, datanodeUuids, storageIDs));
|
results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
|
||||||
|
storageTypes));
|
||||||
return block.getNumBytes();
|
return block.getNumBytes();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2843,8 +2871,9 @@ public class BlockManager {
|
||||||
* The given node is reporting that it received a certain block.
|
* The given node is reporting that it received a certain block.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
void addBlock(DatanodeDescriptor node, String storageID, Block block, String delHint)
|
void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
|
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||||
// Decrement number of blocks scheduled to this datanode.
|
// Decrement number of blocks scheduled to this datanode.
|
||||||
// for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with
|
// for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with
|
||||||
// RECEIVED_BLOCK), we currently also decrease the approximate number.
|
// RECEIVED_BLOCK), we currently also decrease the approximate number.
|
||||||
|
@ -2864,12 +2893,12 @@ public class BlockManager {
|
||||||
// Modify the blocks->datanode map and node's map.
|
// Modify the blocks->datanode map and node's map.
|
||||||
//
|
//
|
||||||
pendingReplications.decrement(block, node);
|
pendingReplications.decrement(block, node);
|
||||||
processAndHandleReportedBlock(node, storageID, block, ReplicaState.FINALIZED,
|
processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
|
||||||
delHintNode);
|
delHintNode);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void processAndHandleReportedBlock(DatanodeDescriptor node,
|
private void processAndHandleReportedBlock(
|
||||||
String storageID, Block block,
|
DatanodeStorageInfo storageInfo, Block block,
|
||||||
ReplicaState reportedState, DatanodeDescriptor delHintNode)
|
ReplicaState reportedState, DatanodeDescriptor delHintNode)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// blockReceived reports a finalized block
|
// blockReceived reports a finalized block
|
||||||
|
@ -2877,7 +2906,9 @@ public class BlockManager {
|
||||||
Collection<Block> toInvalidate = new LinkedList<Block>();
|
Collection<Block> toInvalidate = new LinkedList<Block>();
|
||||||
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
|
||||||
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
|
Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
|
||||||
processReportedBlock(node, storageID, block, reportedState,
|
final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
|
||||||
|
|
||||||
|
processReportedBlock(storageInfo, block, reportedState,
|
||||||
toAdd, toInvalidate, toCorrupt, toUC);
|
toAdd, toInvalidate, toCorrupt, toUC);
|
||||||
// the block is only in one of the to-do lists
|
// the block is only in one of the to-do lists
|
||||||
// if it is in none then data-node already has it
|
// if it is in none then data-node already has it
|
||||||
|
@ -2885,11 +2916,11 @@ public class BlockManager {
|
||||||
: "The block should be only in one of the lists.";
|
: "The block should be only in one of the lists.";
|
||||||
|
|
||||||
for (StatefulBlockInfo b : toUC) {
|
for (StatefulBlockInfo b : toUC) {
|
||||||
addStoredBlockUnderConstruction(b, node, storageID);
|
addStoredBlockUnderConstruction(b, storageInfo);
|
||||||
}
|
}
|
||||||
long numBlocksLogged = 0;
|
long numBlocksLogged = 0;
|
||||||
for (BlockInfo b : toAdd) {
|
for (BlockInfo b : toAdd) {
|
||||||
addStoredBlock(b, node, storageID, delHintNode, numBlocksLogged < maxNumBlocksToLog);
|
addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
|
||||||
numBlocksLogged++;
|
numBlocksLogged++;
|
||||||
}
|
}
|
||||||
if (numBlocksLogged > maxNumBlocksToLog) {
|
if (numBlocksLogged > maxNumBlocksToLog) {
|
||||||
|
@ -2903,7 +2934,7 @@ public class BlockManager {
|
||||||
addToInvalidates(b, node);
|
addToInvalidates(b, node);
|
||||||
}
|
}
|
||||||
for (BlockToMarkCorrupt b : toCorrupt) {
|
for (BlockToMarkCorrupt b : toCorrupt) {
|
||||||
markBlockAsCorrupt(b, node, storageID);
|
markBlockAsCorrupt(b, storageInfo, node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2930,13 +2961,15 @@ public class BlockManager {
|
||||||
"Got incremental block report from unregistered or dead node");
|
"Got incremental block report from unregistered or dead node");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (node.getStorageInfo(srdb.getStorage().getStorageID()) == null) {
|
DatanodeStorageInfo storageInfo =
|
||||||
|
node.getStorageInfo(srdb.getStorage().getStorageID());
|
||||||
|
if (storageInfo == null) {
|
||||||
// The DataNode is reporting an unknown storage. Usually the NN learns
|
// The DataNode is reporting an unknown storage. Usually the NN learns
|
||||||
// about new storages from heartbeats but during NN restart we may
|
// about new storages from heartbeats but during NN restart we may
|
||||||
// receive a block report or incremental report before the heartbeat.
|
// receive a block report or incremental report before the heartbeat.
|
||||||
// We must handle this for protocol compatibility. This issue was
|
// We must handle this for protocol compatibility. This issue was
|
||||||
// uncovered by HDFS-6094.
|
// uncovered by HDFS-6094.
|
||||||
node.updateStorage(srdb.getStorage());
|
storageInfo = node.updateStorage(srdb.getStorage());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
|
for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
|
||||||
|
@ -2946,14 +2979,13 @@ public class BlockManager {
|
||||||
deleted++;
|
deleted++;
|
||||||
break;
|
break;
|
||||||
case RECEIVED_BLOCK:
|
case RECEIVED_BLOCK:
|
||||||
addBlock(node, srdb.getStorage().getStorageID(),
|
addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints());
|
||||||
rdbi.getBlock(), rdbi.getDelHints());
|
|
||||||
received++;
|
received++;
|
||||||
break;
|
break;
|
||||||
case RECEIVING_BLOCK:
|
case RECEIVING_BLOCK:
|
||||||
receiving++;
|
receiving++;
|
||||||
processAndHandleReportedBlock(node, srdb.getStorage().getStorageID(),
|
processAndHandleReportedBlock(storageInfo, rdbi.getBlock(),
|
||||||
rdbi.getBlock(), ReplicaState.RBW, null);
|
ReplicaState.RBW, null);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
String msg =
|
String msg =
|
||||||
|
|
|
@ -260,8 +260,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
|
||||||
}
|
}
|
||||||
|
|
||||||
public StorageReport[] getStorageReports() {
|
public StorageReport[] getStorageReports() {
|
||||||
final StorageReport[] reports = new StorageReport[storageMap.size()];
|
|
||||||
final DatanodeStorageInfo[] infos = getStorageInfos();
|
final DatanodeStorageInfo[] infos = getStorageInfos();
|
||||||
|
final StorageReport[] reports = new StorageReport[infos.length];
|
||||||
for(int i = 0; i < infos.length; i++) {
|
for(int i = 0; i < infos.length; i++) {
|
||||||
reports[i] = infos[i].toStorageReport();
|
reports[i] = infos[i].toStorageReport();
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,7 +207,7 @@ public class DatanodeStorageInfo {
|
||||||
return blockPoolUsed;
|
return blockPoolUsed;
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean addBlock(BlockInfo b) {
|
public boolean addBlock(BlockInfo b) {
|
||||||
if(!b.addStorage(this))
|
if(!b.addStorage(this))
|
||||||
return false;
|
return false;
|
||||||
// add to the head of the data-node list
|
// add to the head of the data-node list
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* In the Standby Node, we can receive messages about blocks
|
* In the Standby Node, we can receive messages about blocks
|
||||||
|
@ -41,14 +42,12 @@ class PendingDataNodeMessages {
|
||||||
|
|
||||||
static class ReportedBlockInfo {
|
static class ReportedBlockInfo {
|
||||||
private final Block block;
|
private final Block block;
|
||||||
private final DatanodeDescriptor dn;
|
private final DatanodeStorageInfo storageInfo;
|
||||||
private final String storageID;
|
|
||||||
private final ReplicaState reportedState;
|
private final ReplicaState reportedState;
|
||||||
|
|
||||||
ReportedBlockInfo(DatanodeDescriptor dn, String storageID, Block block,
|
ReportedBlockInfo(DatanodeStorageInfo storageInfo, Block block,
|
||||||
ReplicaState reportedState) {
|
ReplicaState reportedState) {
|
||||||
this.dn = dn;
|
this.storageInfo = storageInfo;
|
||||||
this.storageID = storageID;
|
|
||||||
this.block = block;
|
this.block = block;
|
||||||
this.reportedState = reportedState;
|
this.reportedState = reportedState;
|
||||||
}
|
}
|
||||||
|
@ -57,21 +56,18 @@ class PendingDataNodeMessages {
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
DatanodeDescriptor getNode() {
|
|
||||||
return dn;
|
|
||||||
}
|
|
||||||
|
|
||||||
String getStorageID() {
|
|
||||||
return storageID;
|
|
||||||
}
|
|
||||||
|
|
||||||
ReplicaState getReportedState() {
|
ReplicaState getReportedState() {
|
||||||
return reportedState;
|
return reportedState;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
DatanodeStorageInfo getStorageInfo() {
|
||||||
|
return storageInfo;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "ReportedBlockInfo [block=" + block + ", dn=" + dn
|
return "ReportedBlockInfo [block=" + block + ", dn="
|
||||||
|
+ storageInfo.getDatanodeDescriptor()
|
||||||
+ ", reportedState=" + reportedState + "]";
|
+ ", reportedState=" + reportedState + "]";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +83,7 @@ class PendingDataNodeMessages {
|
||||||
Queue<ReportedBlockInfo> oldQueue = entry.getValue();
|
Queue<ReportedBlockInfo> oldQueue = entry.getValue();
|
||||||
while (!oldQueue.isEmpty()) {
|
while (!oldQueue.isEmpty()) {
|
||||||
ReportedBlockInfo rbi = oldQueue.remove();
|
ReportedBlockInfo rbi = oldQueue.remove();
|
||||||
if (!rbi.getNode().equals(dn)) {
|
if (!rbi.getStorageInfo().getDatanodeDescriptor().equals(dn)) {
|
||||||
newQueue.add(rbi);
|
newQueue.add(rbi);
|
||||||
} else {
|
} else {
|
||||||
count--;
|
count--;
|
||||||
|
@ -97,11 +93,11 @@ class PendingDataNodeMessages {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void enqueueReportedBlock(DatanodeDescriptor dn, String storageID, Block block,
|
void enqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
|
||||||
ReplicaState reportedState) {
|
ReplicaState reportedState) {
|
||||||
block = new Block(block);
|
block = new Block(block);
|
||||||
getBlockQueue(block).add(
|
getBlockQueue(block).add(
|
||||||
new ReportedBlockInfo(dn, storageID, block, reportedState));
|
new ReportedBlockInfo(storageInfo, block, reportedState));
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Sets;
|
import com.google.common.collect.Sets;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
|
||||||
|
@ -38,6 +39,8 @@ import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* One instance per block-pool/namespace on the DN, which handles the
|
* One instance per block-pool/namespace on the DN, which handles the
|
||||||
|
@ -91,6 +94,28 @@ class BPOfferService {
|
||||||
*/
|
*/
|
||||||
private long lastActiveClaimTxId = -1;
|
private long lastActiveClaimTxId = -1;
|
||||||
|
|
||||||
|
private final ReentrantReadWriteLock mReadWriteLock =
|
||||||
|
new ReentrantReadWriteLock();
|
||||||
|
private final Lock mReadLock = mReadWriteLock.readLock();
|
||||||
|
private final Lock mWriteLock = mReadWriteLock.writeLock();
|
||||||
|
|
||||||
|
// utility methods to acquire and release read lock and write lock
|
||||||
|
void readLock() {
|
||||||
|
mReadLock.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void readUnlock() {
|
||||||
|
mReadLock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void writeLock() {
|
||||||
|
mWriteLock.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
void writeUnlock() {
|
||||||
|
mWriteLock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
BPOfferService(List<InetSocketAddress> nnAddrs, DataNode dn) {
|
BPOfferService(List<InetSocketAddress> nnAddrs, DataNode dn) {
|
||||||
Preconditions.checkArgument(!nnAddrs.isEmpty(),
|
Preconditions.checkArgument(!nnAddrs.isEmpty(),
|
||||||
"Must pass at least one NN.");
|
"Must pass at least one NN.");
|
||||||
|
@ -136,13 +161,18 @@ class BPOfferService {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized String getBlockPoolId() {
|
String getBlockPoolId() {
|
||||||
if (bpNSInfo != null) {
|
readLock();
|
||||||
return bpNSInfo.getBlockPoolID();
|
try {
|
||||||
} else {
|
if (bpNSInfo != null) {
|
||||||
LOG.warn("Block pool ID needed, but service not yet registered with NN",
|
return bpNSInfo.getBlockPoolID();
|
||||||
new Exception("trace"));
|
} else {
|
||||||
return null;
|
LOG.warn("Block pool ID needed, but service not yet registered with NN",
|
||||||
|
new Exception("trace"));
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
readUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,27 +180,37 @@ class BPOfferService {
|
||||||
return getNamespaceInfo() != null;
|
return getNamespaceInfo() != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized NamespaceInfo getNamespaceInfo() {
|
NamespaceInfo getNamespaceInfo() {
|
||||||
return bpNSInfo;
|
readLock();
|
||||||
|
try {
|
||||||
|
return bpNSInfo;
|
||||||
|
} finally {
|
||||||
|
readUnlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized String toString() {
|
public String toString() {
|
||||||
if (bpNSInfo == null) {
|
readLock();
|
||||||
// If we haven't yet connected to our NN, we don't yet know our
|
try {
|
||||||
// own block pool ID.
|
if (bpNSInfo == null) {
|
||||||
// If _none_ of the block pools have connected yet, we don't even
|
// If we haven't yet connected to our NN, we don't yet know our
|
||||||
// know the DatanodeID ID of this DN.
|
// own block pool ID.
|
||||||
String datanodeUuid = dn.getDatanodeUuid();
|
// If _none_ of the block pools have connected yet, we don't even
|
||||||
|
// know the DatanodeID ID of this DN.
|
||||||
|
String datanodeUuid = dn.getDatanodeUuid();
|
||||||
|
|
||||||
if (datanodeUuid == null || datanodeUuid.isEmpty()) {
|
if (datanodeUuid == null || datanodeUuid.isEmpty()) {
|
||||||
datanodeUuid = "unassigned";
|
datanodeUuid = "unassigned";
|
||||||
|
}
|
||||||
|
return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
|
||||||
|
} else {
|
||||||
|
return "Block pool " + getBlockPoolId() +
|
||||||
|
" (Datanode Uuid " + dn.getDatanodeUuid() +
|
||||||
|
")";
|
||||||
}
|
}
|
||||||
return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
|
} finally {
|
||||||
} else {
|
readUnlock();
|
||||||
return "Block pool " + getBlockPoolId() +
|
|
||||||
" (Datanode Uuid " + dn.getDatanodeUuid() +
|
|
||||||
")";
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,32 +306,37 @@ class BPOfferService {
|
||||||
* verifies that this namespace matches (eg to prevent a misconfiguration
|
* verifies that this namespace matches (eg to prevent a misconfiguration
|
||||||
* where a StandbyNode from a different cluster is specified)
|
* where a StandbyNode from a different cluster is specified)
|
||||||
*/
|
*/
|
||||||
synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
|
void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
|
||||||
if (this.bpNSInfo == null) {
|
writeLock();
|
||||||
this.bpNSInfo = nsInfo;
|
try {
|
||||||
boolean success = false;
|
if (this.bpNSInfo == null) {
|
||||||
|
this.bpNSInfo = nsInfo;
|
||||||
|
boolean success = false;
|
||||||
|
|
||||||
// Now that we know the namespace ID, etc, we can pass this to the DN.
|
// Now that we know the namespace ID, etc, we can pass this to the DN.
|
||||||
// The DN can now initialize its local storage if we are the
|
// The DN can now initialize its local storage if we are the
|
||||||
// first BP to handshake, etc.
|
// first BP to handshake, etc.
|
||||||
try {
|
try {
|
||||||
dn.initBlockPool(this);
|
dn.initBlockPool(this);
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (!success) {
|
if (!success) {
|
||||||
// The datanode failed to initialize the BP. We need to reset
|
// The datanode failed to initialize the BP. We need to reset
|
||||||
// the namespace info so that other BPService actors still have
|
// the namespace info so that other BPService actors still have
|
||||||
// a chance to set it, and re-initialize the datanode.
|
// a chance to set it, and re-initialize the datanode.
|
||||||
this.bpNSInfo = null;
|
this.bpNSInfo = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
|
||||||
|
"Blockpool ID");
|
||||||
|
checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
|
||||||
|
"Namespace ID");
|
||||||
|
checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
|
||||||
|
"Cluster ID");
|
||||||
}
|
}
|
||||||
} else {
|
} finally {
|
||||||
checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
|
writeUnlock();
|
||||||
"Blockpool ID");
|
|
||||||
checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
|
|
||||||
"Namespace ID");
|
|
||||||
checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
|
|
||||||
"Cluster ID");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,22 +345,27 @@ class BPOfferService {
|
||||||
* NN, it calls this function to verify that the NN it connected to
|
* NN, it calls this function to verify that the NN it connected to
|
||||||
* is consistent with other NNs serving the block-pool.
|
* is consistent with other NNs serving the block-pool.
|
||||||
*/
|
*/
|
||||||
synchronized void registrationSucceeded(BPServiceActor bpServiceActor,
|
void registrationSucceeded(BPServiceActor bpServiceActor,
|
||||||
DatanodeRegistration reg) throws IOException {
|
DatanodeRegistration reg) throws IOException {
|
||||||
if (bpRegistration != null) {
|
writeLock();
|
||||||
checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
|
try {
|
||||||
reg.getStorageInfo().getNamespaceID(), "namespace ID");
|
if (bpRegistration != null) {
|
||||||
checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
|
checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
|
||||||
reg.getStorageInfo().getClusterID(), "cluster ID");
|
reg.getStorageInfo().getNamespaceID(), "namespace ID");
|
||||||
} else {
|
checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
|
||||||
bpRegistration = reg;
|
reg.getStorageInfo().getClusterID(), "cluster ID");
|
||||||
}
|
} else {
|
||||||
|
bpRegistration = reg;
|
||||||
|
}
|
||||||
|
|
||||||
dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
|
dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
|
||||||
// Add the initial block token secret keys to the DN's secret manager.
|
// Add the initial block token secret keys to the DN's secret manager.
|
||||||
if (dn.isBlockTokenEnabled) {
|
if (dn.isBlockTokenEnabled) {
|
||||||
dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(),
|
dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(),
|
||||||
reg.getExportedKeys());
|
reg.getExportedKeys());
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
writeUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,25 +383,35 @@ class BPOfferService {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized DatanodeRegistration createRegistration() {
|
DatanodeRegistration createRegistration() {
|
||||||
Preconditions.checkState(bpNSInfo != null,
|
writeLock();
|
||||||
"getRegistration() can only be called after initial handshake");
|
try {
|
||||||
return dn.createBPRegistration(bpNSInfo);
|
Preconditions.checkState(bpNSInfo != null,
|
||||||
|
"getRegistration() can only be called after initial handshake");
|
||||||
|
return dn.createBPRegistration(bpNSInfo);
|
||||||
|
} finally {
|
||||||
|
writeUnlock();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Called when an actor shuts down. If this is the last actor
|
* Called when an actor shuts down. If this is the last actor
|
||||||
* to shut down, shuts down the whole blockpool in the DN.
|
* to shut down, shuts down the whole blockpool in the DN.
|
||||||
*/
|
*/
|
||||||
synchronized void shutdownActor(BPServiceActor actor) {
|
void shutdownActor(BPServiceActor actor) {
|
||||||
if (bpServiceToActive == actor) {
|
writeLock();
|
||||||
bpServiceToActive = null;
|
try {
|
||||||
}
|
if (bpServiceToActive == actor) {
|
||||||
|
bpServiceToActive = null;
|
||||||
|
}
|
||||||
|
|
||||||
bpServices.remove(actor);
|
bpServices.remove(actor);
|
||||||
|
|
||||||
if (bpServices.isEmpty()) {
|
if (bpServices.isEmpty()) {
|
||||||
dn.shutdownBlockPool(this);
|
dn.shutdownBlockPool(this);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
writeUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,11 +452,16 @@ class BPOfferService {
|
||||||
* @return a proxy to the active NN, or null if the BPOS has not
|
* @return a proxy to the active NN, or null if the BPOS has not
|
||||||
* acknowledged any NN as active yet.
|
* acknowledged any NN as active yet.
|
||||||
*/
|
*/
|
||||||
synchronized DatanodeProtocolClientSideTranslatorPB getActiveNN() {
|
DatanodeProtocolClientSideTranslatorPB getActiveNN() {
|
||||||
if (bpServiceToActive != null) {
|
readLock();
|
||||||
return bpServiceToActive.bpNamenode;
|
try {
|
||||||
} else {
|
if (bpServiceToActive != null) {
|
||||||
return null;
|
return bpServiceToActive.bpNamenode;
|
||||||
|
} else {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
readUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -424,45 +489,50 @@ class BPOfferService {
|
||||||
* @param actor the actor which received the heartbeat
|
* @param actor the actor which received the heartbeat
|
||||||
* @param nnHaState the HA-related heartbeat contents
|
* @param nnHaState the HA-related heartbeat contents
|
||||||
*/
|
*/
|
||||||
synchronized void updateActorStatesFromHeartbeat(
|
void updateActorStatesFromHeartbeat(
|
||||||
BPServiceActor actor,
|
BPServiceActor actor,
|
||||||
NNHAStatusHeartbeat nnHaState) {
|
NNHAStatusHeartbeat nnHaState) {
|
||||||
final long txid = nnHaState.getTxId();
|
writeLock();
|
||||||
|
try {
|
||||||
|
final long txid = nnHaState.getTxId();
|
||||||
|
|
||||||
final boolean nnClaimsActive =
|
final boolean nnClaimsActive =
|
||||||
nnHaState.getState() == HAServiceState.ACTIVE;
|
nnHaState.getState() == HAServiceState.ACTIVE;
|
||||||
final boolean bposThinksActive = bpServiceToActive == actor;
|
final boolean bposThinksActive = bpServiceToActive == actor;
|
||||||
final boolean isMoreRecentClaim = txid > lastActiveClaimTxId;
|
final boolean isMoreRecentClaim = txid > lastActiveClaimTxId;
|
||||||
|
|
||||||
if (nnClaimsActive && !bposThinksActive) {
|
if (nnClaimsActive && !bposThinksActive) {
|
||||||
LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " +
|
LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " +
|
||||||
"txid=" + txid);
|
"txid=" + txid);
|
||||||
if (!isMoreRecentClaim) {
|
if (!isMoreRecentClaim) {
|
||||||
// Split-brain scenario - an NN is trying to claim active
|
// Split-brain scenario - an NN is trying to claim active
|
||||||
// state when a different NN has already claimed it with a higher
|
// state when a different NN has already claimed it with a higher
|
||||||
// txid.
|
// txid.
|
||||||
LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" +
|
LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" +
|
||||||
txid + " but there was already a more recent claim at txid=" +
|
txid + " but there was already a more recent claim at txid=" +
|
||||||
lastActiveClaimTxId);
|
lastActiveClaimTxId);
|
||||||
return;
|
return;
|
||||||
} else {
|
|
||||||
if (bpServiceToActive == null) {
|
|
||||||
LOG.info("Acknowledging ACTIVE Namenode " + actor);
|
|
||||||
} else {
|
} else {
|
||||||
LOG.info("Namenode " + actor + " taking over ACTIVE state from " +
|
if (bpServiceToActive == null) {
|
||||||
bpServiceToActive + " at higher txid=" + txid);
|
LOG.info("Acknowledging ACTIVE Namenode " + actor);
|
||||||
|
} else {
|
||||||
|
LOG.info("Namenode " + actor + " taking over ACTIVE state from " +
|
||||||
|
bpServiceToActive + " at higher txid=" + txid);
|
||||||
|
}
|
||||||
|
bpServiceToActive = actor;
|
||||||
}
|
}
|
||||||
bpServiceToActive = actor;
|
} else if (!nnClaimsActive && bposThinksActive) {
|
||||||
|
LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " +
|
||||||
|
"txid=" + nnHaState.getTxId());
|
||||||
|
bpServiceToActive = null;
|
||||||
}
|
}
|
||||||
} else if (!nnClaimsActive && bposThinksActive) {
|
|
||||||
LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " +
|
|
||||||
"txid=" + nnHaState.getTxId());
|
|
||||||
bpServiceToActive = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bpServiceToActive == actor) {
|
if (bpServiceToActive == actor) {
|
||||||
assert txid >= lastActiveClaimTxId;
|
assert txid >= lastActiveClaimTxId;
|
||||||
lastActiveClaimTxId = txid;
|
lastActiveClaimTxId = txid;
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
writeUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -533,12 +603,15 @@ class BPOfferService {
|
||||||
actor.reRegister();
|
actor.reRegister();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
synchronized (this) {
|
writeLock();
|
||||||
|
try {
|
||||||
if (actor == bpServiceToActive) {
|
if (actor == bpServiceToActive) {
|
||||||
return processCommandFromActive(cmd, actor);
|
return processCommandFromActive(cmd, actor);
|
||||||
} else {
|
} else {
|
||||||
return processCommandFromStandby(cmd, actor);
|
return processCommandFromStandby(cmd, actor);
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
|
writeUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -152,7 +152,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
// During startup some of them can upgrade or roll back
|
// During startup some of them can upgrade or roll back
|
||||||
// while others could be up-to-date for the regular startup.
|
// while others could be up-to-date for the regular startup.
|
||||||
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
|
for (int idx = 0; idx < getNumStorageDirs(); idx++) {
|
||||||
doTransition(getStorageDir(idx), nsInfo, startOpt);
|
doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
|
||||||
assert getCTime() == nsInfo.getCTime()
|
assert getCTime() == nsInfo.getCTime()
|
||||||
: "Data-node and name-node CTimes must be the same.";
|
: "Data-node and name-node CTimes must be the same.";
|
||||||
}
|
}
|
||||||
|
@ -242,7 +242,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @param startOpt startup option
|
* @param startOpt startup option
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void doTransition(StorageDirectory sd,
|
private void doTransition(DataNode datanode, StorageDirectory sd,
|
||||||
NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
|
NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
|
||||||
if (startOpt == StartupOption.ROLLBACK) {
|
if (startOpt == StartupOption.ROLLBACK) {
|
||||||
doRollback(sd, nsInfo); // rollback if applicable
|
doRollback(sd, nsInfo); // rollback if applicable
|
||||||
|
@ -275,7 +275,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
}
|
}
|
||||||
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
|
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
|| this.cTime < nsInfo.getCTime()) {
|
|| this.cTime < nsInfo.getCTime()) {
|
||||||
doUpgrade(sd, nsInfo); // upgrade
|
doUpgrade(datanode, sd, nsInfo); // upgrade
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
|
// layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
|
||||||
|
@ -304,7 +304,8 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @param nsInfo Namespace Info from the namenode
|
* @param nsInfo Namespace Info from the namenode
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
|
void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo)
|
||||||
|
throws IOException {
|
||||||
// Upgrading is applicable only to release with federation or after
|
// Upgrading is applicable only to release with federation or after
|
||||||
if (!DataNodeLayoutVersion.supports(
|
if (!DataNodeLayoutVersion.supports(
|
||||||
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
LayoutVersion.Feature.FEDERATION, layoutVersion)) {
|
||||||
|
@ -312,7 +313,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
}
|
}
|
||||||
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
|
LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
|
||||||
+ ".\n old LV = " + this.getLayoutVersion() + "; old CTime = "
|
+ ".\n old LV = " + this.getLayoutVersion() + "; old CTime = "
|
||||||
+ this.getCTime() + ".\n new LV = " + nsInfo.getLayoutVersion()
|
+ this.getCTime() + ".\n new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION
|
||||||
+ "; new CTime = " + nsInfo.getCTime());
|
+ "; new CTime = " + nsInfo.getCTime());
|
||||||
// get <SD>/previous directory
|
// get <SD>/previous directory
|
||||||
String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
|
String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
|
||||||
|
@ -340,7 +341,7 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
rename(bpCurDir, bpTmpDir);
|
rename(bpCurDir, bpTmpDir);
|
||||||
|
|
||||||
// 3. Create new <SD>/current with block files hardlinks and VERSION
|
// 3. Create new <SD>/current with block files hardlinks and VERSION
|
||||||
linkAllBlocks(bpTmpDir, bpCurDir);
|
linkAllBlocks(datanode, bpTmpDir, bpCurDir);
|
||||||
this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
assert this.namespaceID == nsInfo.getNamespaceID()
|
assert this.namespaceID == nsInfo.getNamespaceID()
|
||||||
: "Data-node and name-node layout versions must be the same.";
|
: "Data-node and name-node layout versions must be the same.";
|
||||||
|
@ -517,14 +518,15 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* @param toDir the current data directory
|
* @param toDir the current data directory
|
||||||
* @throws IOException if error occurs during hardlink
|
* @throws IOException if error occurs during hardlink
|
||||||
*/
|
*/
|
||||||
private void linkAllBlocks(File fromDir, File toDir) throws IOException {
|
private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
|
||||||
|
throws IOException {
|
||||||
// do the link
|
// do the link
|
||||||
int diskLayoutVersion = this.getLayoutVersion();
|
int diskLayoutVersion = this.getLayoutVersion();
|
||||||
// hardlink finalized blocks in tmpDir
|
// hardlink finalized blocks in tmpDir
|
||||||
HardLink hardLink = new HardLink();
|
HardLink hardLink = new HardLink();
|
||||||
DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
|
DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
|
||||||
new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
|
new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
|
||||||
DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW),
|
DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
|
||||||
new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
|
new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
|
||||||
LOG.info( hardLink.linkStats.report() );
|
LOG.info( hardLink.linkStats.report() );
|
||||||
}
|
}
|
||||||
|
|
|
@ -253,7 +253,7 @@ class BlockReceiver implements Closeable {
|
||||||
|
|
||||||
if (cause != null) { // possible disk error
|
if (cause != null) { // possible disk error
|
||||||
ioe = cause;
|
ioe = cause;
|
||||||
datanode.checkDiskError();
|
datanode.checkDiskErrorAsync();
|
||||||
}
|
}
|
||||||
|
|
||||||
throw ioe;
|
throw ioe;
|
||||||
|
@ -329,7 +329,7 @@ class BlockReceiver implements Closeable {
|
||||||
}
|
}
|
||||||
// disk check
|
// disk check
|
||||||
if(ioe != null) {
|
if(ioe != null) {
|
||||||
datanode.checkDiskError();
|
datanode.checkDiskErrorAsync();
|
||||||
throw ioe;
|
throw ioe;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -639,7 +639,7 @@ class BlockReceiver implements Closeable {
|
||||||
manageWriterOsCache(offsetInBlock);
|
manageWriterOsCache(offsetInBlock);
|
||||||
}
|
}
|
||||||
} catch (IOException iex) {
|
} catch (IOException iex) {
|
||||||
datanode.checkDiskError();
|
datanode.checkDiskErrorAsync();
|
||||||
throw iex;
|
throw iex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1208,7 +1208,7 @@ class BlockReceiver implements Closeable {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.warn("IOException in BlockReceiver.run(): ", e);
|
LOG.warn("IOException in BlockReceiver.run(): ", e);
|
||||||
if (running) {
|
if (running) {
|
||||||
datanode.checkDiskError();
|
datanode.checkDiskErrorAsync();
|
||||||
LOG.info(myString, e);
|
LOG.info(myString, e);
|
||||||
running = false;
|
running = false;
|
||||||
if (!Thread.interrupted()) { // failure not caused by interruption
|
if (!Thread.interrupted()) { // failure not caused by interruption
|
||||||
|
|
|
@ -1075,6 +1075,11 @@ public class DataNode extends Configured
|
||||||
// In the case that this is the first block pool to connect, initialize
|
// In the case that this is the first block pool to connect, initialize
|
||||||
// the dataset, block scanners, etc.
|
// the dataset, block scanners, etc.
|
||||||
initStorage(nsInfo);
|
initStorage(nsInfo);
|
||||||
|
|
||||||
|
// Exclude failed disks before initializing the block pools to avoid startup
|
||||||
|
// failures.
|
||||||
|
checkDiskError();
|
||||||
|
|
||||||
initPeriodicScanners(conf);
|
initPeriodicScanners(conf);
|
||||||
|
|
||||||
data.addBlockPool(nsInfo.getBlockPoolID(), conf);
|
data.addBlockPool(nsInfo.getBlockPoolID(), conf);
|
||||||
|
@ -1510,9 +1515,9 @@ public class DataNode extends Configured
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check if there is a disk failure and if so, handle the error
|
* Check if there is a disk failure asynchronously and if so, handle the error
|
||||||
*/
|
*/
|
||||||
public void checkDiskError() {
|
public void checkDiskErrorAsync() {
|
||||||
synchronized(checkDiskErrorMutex) {
|
synchronized(checkDiskErrorMutex) {
|
||||||
checkDiskErrorFlag = true;
|
checkDiskErrorFlag = true;
|
||||||
if(checkDiskErrorThread == null) {
|
if(checkDiskErrorThread == null) {
|
||||||
|
@ -1821,7 +1826,7 @@ public class DataNode extends Configured
|
||||||
LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
|
LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
|
||||||
targets[0] + " got ", ie);
|
targets[0] + " got ", ie);
|
||||||
// check if there are any disk problem
|
// check if there are any disk problem
|
||||||
checkDiskError();
|
checkDiskErrorAsync();
|
||||||
} finally {
|
} finally {
|
||||||
xmitsInProgress.getAndDecrement();
|
xmitsInProgress.getAndDecrement();
|
||||||
IOUtils.closeStream(blockSender);
|
IOUtils.closeStream(blockSender);
|
||||||
|
@ -2760,6 +2765,17 @@ public class DataNode extends Configured
|
||||||
return shortCircuitRegistry;
|
return shortCircuitRegistry;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check the disk error
|
||||||
|
*/
|
||||||
|
private void checkDiskError() {
|
||||||
|
try {
|
||||||
|
data.checkDataDir();
|
||||||
|
} catch (DiskErrorException de) {
|
||||||
|
handleDiskError(de.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Starts a new thread which will check for disk error check request
|
* Starts a new thread which will check for disk error check request
|
||||||
* every 5 sec
|
* every 5 sec
|
||||||
|
@ -2776,9 +2792,7 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
if(tempFlag) {
|
if(tempFlag) {
|
||||||
try {
|
try {
|
||||||
data.checkDataDir();
|
checkDiskError();
|
||||||
} catch (DiskErrorException de) {
|
|
||||||
handleDiskError(de.getMessage());
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.warn("Unexpected exception occurred while checking disk error " + e);
|
LOG.warn("Unexpected exception occurred while checking disk error " + e);
|
||||||
checkDiskErrorThread = null;
|
checkDiskErrorThread = null;
|
||||||
|
|
|
@ -62,7 +62,10 @@ public class DataNodeLayoutVersion {
|
||||||
* </ul>
|
* </ul>
|
||||||
*/
|
*/
|
||||||
public static enum Feature implements LayoutFeature {
|
public static enum Feature implements LayoutFeature {
|
||||||
FIRST_LAYOUT(-55, -53, "First datanode layout", false);
|
FIRST_LAYOUT(-55, -53, "First datanode layout", false),
|
||||||
|
BLOCKID_BASED_LAYOUT(-56,
|
||||||
|
"The block ID of a finalized block uniquely determines its position " +
|
||||||
|
"in the directory structure");
|
||||||
|
|
||||||
private final FeatureInfo info;
|
private final FeatureInfo info;
|
||||||
|
|
||||||
|
|
|
@ -18,13 +18,19 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.util.concurrent.Futures;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.*;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
import org.apache.hadoop.fs.HardLink;
|
||||||
|
import org.apache.hadoop.fs.LocalFileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
|
@ -35,13 +41,30 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.RandomAccessFile;
|
||||||
import java.nio.channels.FileLock;
|
import java.nio.channels.FileLock;
|
||||||
import java.util.*;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Properties;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Data storage information file.
|
* Data storage information file.
|
||||||
|
@ -261,6 +284,7 @@ public class DataStorage extends Storage {
|
||||||
STORAGE_DIR_CURRENT));
|
STORAGE_DIR_CURRENT));
|
||||||
bpDataDirs.add(bpRoot);
|
bpDataDirs.add(bpRoot);
|
||||||
}
|
}
|
||||||
|
|
||||||
// mkdir for the list of BlockPoolStorage
|
// mkdir for the list of BlockPoolStorage
|
||||||
makeBlockPoolDataDir(bpDataDirs, null);
|
makeBlockPoolDataDir(bpDataDirs, null);
|
||||||
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(
|
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(
|
||||||
|
@ -488,7 +512,7 @@ public class DataStorage extends Storage {
|
||||||
|
|
||||||
// do upgrade
|
// do upgrade
|
||||||
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
|
if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
|
||||||
doUpgrade(sd, nsInfo); // upgrade
|
doUpgrade(datanode, sd, nsInfo); // upgrade
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,7 +547,8 @@ public class DataStorage extends Storage {
|
||||||
* @param sd storage directory
|
* @param sd storage directory
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
|
void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo)
|
||||||
|
throws IOException {
|
||||||
// If the existing on-disk layout version supportes federation, simply
|
// If the existing on-disk layout version supportes federation, simply
|
||||||
// update its layout version.
|
// update its layout version.
|
||||||
if (DataNodeLayoutVersion.supports(
|
if (DataNodeLayoutVersion.supports(
|
||||||
|
@ -568,7 +593,8 @@ public class DataStorage extends Storage {
|
||||||
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(),
|
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(),
|
||||||
nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
|
nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
|
||||||
bpStorage.format(curDir, nsInfo);
|
bpStorage.format(curDir, nsInfo);
|
||||||
linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
|
linkAllBlocks(datanode, tmpDir, bbwDir, new File(curBpDir,
|
||||||
|
STORAGE_DIR_CURRENT));
|
||||||
|
|
||||||
// 4. Write version file under <SD>/current
|
// 4. Write version file under <SD>/current
|
||||||
layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
|
||||||
|
@ -746,22 +772,22 @@ public class DataStorage extends Storage {
|
||||||
*
|
*
|
||||||
* @throws IOException If error occurs during hardlink
|
* @throws IOException If error occurs during hardlink
|
||||||
*/
|
*/
|
||||||
private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
|
private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
|
||||||
throws IOException {
|
File toDir) throws IOException {
|
||||||
HardLink hardLink = new HardLink();
|
HardLink hardLink = new HardLink();
|
||||||
// do the link
|
// do the link
|
||||||
int diskLayoutVersion = this.getLayoutVersion();
|
int diskLayoutVersion = this.getLayoutVersion();
|
||||||
if (DataNodeLayoutVersion.supports(
|
if (DataNodeLayoutVersion.supports(
|
||||||
LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
|
LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
|
||||||
// hardlink finalized blocks in tmpDir/finalized
|
// hardlink finalized blocks in tmpDir/finalized
|
||||||
linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED),
|
linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
|
||||||
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
|
new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
|
||||||
// hardlink rbw blocks in tmpDir/rbw
|
// hardlink rbw blocks in tmpDir/rbw
|
||||||
linkBlocks(new File(fromDir, STORAGE_DIR_RBW),
|
linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
|
||||||
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
|
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
|
||||||
} else { // pre-RBW version
|
} else { // pre-RBW version
|
||||||
// hardlink finalized blocks in tmpDir
|
// hardlink finalized blocks in tmpDir
|
||||||
linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
|
linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
|
||||||
diskLayoutVersion, hardLink);
|
diskLayoutVersion, hardLink);
|
||||||
if (fromBbwDir.exists()) {
|
if (fromBbwDir.exists()) {
|
||||||
/*
|
/*
|
||||||
|
@ -770,15 +796,67 @@ public class DataStorage extends Storage {
|
||||||
* NOT underneath the 'current' directory in those releases. See
|
* NOT underneath the 'current' directory in those releases. See
|
||||||
* HDFS-3731 for details.
|
* HDFS-3731 for details.
|
||||||
*/
|
*/
|
||||||
linkBlocks(fromBbwDir,
|
linkBlocks(datanode, fromBbwDir,
|
||||||
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
|
new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LOG.info( hardLink.linkStats.report() );
|
LOG.info( hardLink.linkStats.report() );
|
||||||
}
|
}
|
||||||
|
|
||||||
static void linkBlocks(File from, File to, int oldLV, HardLink hl)
|
private static class LinkArgs {
|
||||||
throws IOException {
|
public File src;
|
||||||
|
public File dst;
|
||||||
|
|
||||||
|
public LinkArgs(File src, File dst) {
|
||||||
|
this.src = src;
|
||||||
|
this.dst = dst;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void linkBlocks(DataNode datanode, File from, File to, int oldLV,
|
||||||
|
HardLink hl) throws IOException {
|
||||||
|
boolean upgradeToIdBasedLayout = false;
|
||||||
|
// If we are upgrading from a version older than the one where we introduced
|
||||||
|
// block ID-based layout AND we're working with the finalized directory,
|
||||||
|
// we'll need to upgrade from the old flat layout to the block ID-based one
|
||||||
|
if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo().
|
||||||
|
getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) {
|
||||||
|
upgradeToIdBasedLayout = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
final List<LinkArgs> idBasedLayoutSingleLinks = Lists.newArrayList();
|
||||||
|
linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to,
|
||||||
|
idBasedLayoutSingleLinks);
|
||||||
|
int numLinkWorkers = datanode.getConf().getInt(
|
||||||
|
DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY,
|
||||||
|
DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS);
|
||||||
|
ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers);
|
||||||
|
final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1;
|
||||||
|
List<Future<Void>> futures = Lists.newArrayList();
|
||||||
|
for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) {
|
||||||
|
final int iCopy = i;
|
||||||
|
futures.add(linkWorkers.submit(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws IOException {
|
||||||
|
int upperBound = Math.min(iCopy + step,
|
||||||
|
idBasedLayoutSingleLinks.size());
|
||||||
|
for (int j = iCopy; j < upperBound; j++) {
|
||||||
|
LinkArgs cur = idBasedLayoutSingleLinks.get(j);
|
||||||
|
NativeIO.link(cur.src, cur.dst);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
linkWorkers.shutdown();
|
||||||
|
for (Future<Void> f : futures) {
|
||||||
|
Futures.get(f, IOException.class);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl,
|
||||||
|
boolean upgradeToIdBasedLayout, File blockRoot,
|
||||||
|
List<LinkArgs> idBasedLayoutSingleLinks) throws IOException {
|
||||||
if (!from.exists()) {
|
if (!from.exists()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -805,9 +883,6 @@ public class DataStorage extends Storage {
|
||||||
// from is a directory
|
// from is a directory
|
||||||
hl.linkStats.countDirs++;
|
hl.linkStats.countDirs++;
|
||||||
|
|
||||||
if (!to.mkdirs())
|
|
||||||
throw new IOException("Cannot create directory " + to);
|
|
||||||
|
|
||||||
String[] blockNames = from.list(new java.io.FilenameFilter() {
|
String[] blockNames = from.list(new java.io.FilenameFilter() {
|
||||||
@Override
|
@Override
|
||||||
public boolean accept(File dir, String name) {
|
public boolean accept(File dir, String name) {
|
||||||
|
@ -815,12 +890,36 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// If we are upgrading to block ID-based layout, we don't want to recreate
|
||||||
|
// any subdirs from the source that contain blocks, since we have a new
|
||||||
|
// directory structure
|
||||||
|
if (!upgradeToIdBasedLayout || !to.getName().startsWith(
|
||||||
|
BLOCK_SUBDIR_PREFIX)) {
|
||||||
|
if (!to.mkdirs())
|
||||||
|
throw new IOException("Cannot create directory " + to);
|
||||||
|
}
|
||||||
|
|
||||||
// Block files just need hard links with the same file names
|
// Block files just need hard links with the same file names
|
||||||
// but a different directory
|
// but a different directory
|
||||||
if (blockNames.length > 0) {
|
if (blockNames.length > 0) {
|
||||||
HardLink.createHardLinkMult(from, blockNames, to);
|
if (upgradeToIdBasedLayout) {
|
||||||
hl.linkStats.countMultLinks++;
|
for (String blockName : blockNames) {
|
||||||
hl.linkStats.countFilesMultLinks += blockNames.length;
|
long blockId = Block.getBlockId(blockName);
|
||||||
|
File blockLocation = DatanodeUtil.idToBlockDir(blockRoot, blockId);
|
||||||
|
if (!blockLocation.exists()) {
|
||||||
|
if (!blockLocation.mkdirs()) {
|
||||||
|
throw new IOException("Failed to mkdirs " + blockLocation);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName),
|
||||||
|
new File(blockLocation, blockName)));
|
||||||
|
hl.linkStats.countSingleLinks++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
HardLink.createHardLinkMult(from, blockNames, to);
|
||||||
|
hl.linkStats.countMultLinks++;
|
||||||
|
hl.linkStats.countFilesMultLinks += blockNames.length;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
hl.linkStats.countEmptyDirs++;
|
hl.linkStats.countEmptyDirs++;
|
||||||
}
|
}
|
||||||
|
@ -834,8 +933,9 @@ public class DataStorage extends Storage {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
for(int i = 0; i < otherNames.length; i++)
|
for(int i = 0; i < otherNames.length; i++)
|
||||||
linkBlocks(new File(from, otherNames[i]),
|
linkBlocksHelper(new File(from, otherNames[i]),
|
||||||
new File(to, otherNames[i]), oldLV, hl);
|
new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
|
||||||
|
blockRoot, idBasedLayoutSingleLinks);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -30,6 +30,8 @@ public class DatanodeUtil {
|
||||||
|
|
||||||
public static final String DISK_ERROR = "Possible disk error: ";
|
public static final String DISK_ERROR = "Possible disk error: ";
|
||||||
|
|
||||||
|
private static final String SEP = System.getProperty("file.separator");
|
||||||
|
|
||||||
/** Get the cause of an I/O exception if caused by a possible disk error
|
/** Get the cause of an I/O exception if caused by a possible disk error
|
||||||
* @param ioe an I/O exception
|
* @param ioe an I/O exception
|
||||||
* @return cause if the I/O exception is caused by a possible disk error;
|
* @return cause if the I/O exception is caused by a possible disk error;
|
||||||
|
@ -78,4 +80,38 @@ public class DatanodeUtil {
|
||||||
public static File getUnlinkTmpFile(File f) {
|
public static File getUnlinkTmpFile(File f) {
|
||||||
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
|
return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Checks whether there are any files anywhere in the directory tree rooted
|
||||||
|
* at dir (directories don't count as files). dir must exist
|
||||||
|
* @return true if there are no files
|
||||||
|
* @throws IOException if unable to list subdirectories
|
||||||
|
*/
|
||||||
|
public static boolean dirNoFilesRecursive(File dir) throws IOException {
|
||||||
|
File[] contents = dir.listFiles();
|
||||||
|
if (contents == null) {
|
||||||
|
throw new IOException("Cannot list contents of " + dir);
|
||||||
|
}
|
||||||
|
for (File f : contents) {
|
||||||
|
if (!f.isDirectory() || (f.isDirectory() && !dirNoFilesRecursive(f))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the directory where a finalized block with this ID should be stored.
|
||||||
|
* Do not attempt to create the directory.
|
||||||
|
* @param root the root directory where finalized blocks are stored
|
||||||
|
* @param blockId
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public static File idToBlockDir(File root, long blockId) {
|
||||||
|
int d1 = (int)((blockId >> 16) & 0xff);
|
||||||
|
int d2 = (int)((blockId >> 8) & 0xff);
|
||||||
|
String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
|
||||||
|
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
|
||||||
|
return new File(root, path);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -54,10 +54,10 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
||||||
private File baseDir;
|
private File baseDir;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Ints representing the sub directory path from base dir to the directory
|
* Whether or not this replica's parent directory includes subdirs, in which
|
||||||
* containing this replica.
|
* case we can generate them based on the replica's block ID
|
||||||
*/
|
*/
|
||||||
private int[] subDirs;
|
private boolean hasSubdirs;
|
||||||
|
|
||||||
private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
|
private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
|
||||||
|
|
||||||
|
@ -151,18 +151,8 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
||||||
* @return the parent directory path where this replica is located
|
* @return the parent directory path where this replica is located
|
||||||
*/
|
*/
|
||||||
File getDir() {
|
File getDir() {
|
||||||
if (subDirs == null) {
|
return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
|
||||||
return null;
|
getBlockId()) : baseDir;
|
||||||
}
|
|
||||||
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
for (int i : subDirs) {
|
|
||||||
sb.append(DataStorage.BLOCK_SUBDIR_PREFIX);
|
|
||||||
sb.append(i);
|
|
||||||
sb.append("/");
|
|
||||||
}
|
|
||||||
File ret = new File(baseDir, sb.toString());
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -175,54 +165,46 @@ abstract public class ReplicaInfo extends Block implements Replica {
|
||||||
|
|
||||||
private void setDirInternal(File dir) {
|
private void setDirInternal(File dir) {
|
||||||
if (dir == null) {
|
if (dir == null) {
|
||||||
subDirs = null;
|
|
||||||
baseDir = null;
|
baseDir = null;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
ReplicaDirInfo replicaDirInfo = parseSubDirs(dir);
|
ReplicaDirInfo dirInfo = parseBaseDir(dir);
|
||||||
this.subDirs = replicaDirInfo.subDirs;
|
this.hasSubdirs = dirInfo.hasSubidrs;
|
||||||
|
|
||||||
synchronized (internedBaseDirs) {
|
synchronized (internedBaseDirs) {
|
||||||
if (!internedBaseDirs.containsKey(replicaDirInfo.baseDirPath)) {
|
if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) {
|
||||||
// Create a new String path of this file and make a brand new File object
|
// Create a new String path of this file and make a brand new File object
|
||||||
// to guarantee we drop the reference to the underlying char[] storage.
|
// to guarantee we drop the reference to the underlying char[] storage.
|
||||||
File baseDir = new File(replicaDirInfo.baseDirPath);
|
File baseDir = new File(dirInfo.baseDirPath);
|
||||||
internedBaseDirs.put(replicaDirInfo.baseDirPath, baseDir);
|
internedBaseDirs.put(dirInfo.baseDirPath, baseDir);
|
||||||
}
|
}
|
||||||
this.baseDir = internedBaseDirs.get(replicaDirInfo.baseDirPath);
|
this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static class ReplicaDirInfo {
|
public static class ReplicaDirInfo {
|
||||||
@VisibleForTesting
|
|
||||||
public String baseDirPath;
|
public String baseDirPath;
|
||||||
|
public boolean hasSubidrs;
|
||||||
|
|
||||||
@VisibleForTesting
|
public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) {
|
||||||
public int[] subDirs;
|
this.baseDirPath = baseDirPath;
|
||||||
|
this.hasSubidrs = hasSubidrs;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static ReplicaDirInfo parseSubDirs(File dir) {
|
public static ReplicaDirInfo parseBaseDir(File dir) {
|
||||||
ReplicaDirInfo ret = new ReplicaDirInfo();
|
|
||||||
|
|
||||||
File currentDir = dir;
|
File currentDir = dir;
|
||||||
List<Integer> subDirList = new ArrayList<Integer>();
|
boolean hasSubdirs = false;
|
||||||
while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
|
while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
|
||||||
// Prepend the integer into the list.
|
hasSubdirs = true;
|
||||||
subDirList.add(0, Integer.parseInt(currentDir.getName().replaceFirst(
|
|
||||||
DataStorage.BLOCK_SUBDIR_PREFIX, "")));
|
|
||||||
currentDir = currentDir.getParentFile();
|
currentDir = currentDir.getParentFile();
|
||||||
}
|
}
|
||||||
ret.subDirs = new int[subDirList.size()];
|
|
||||||
for (int i = 0; i < subDirList.size(); i++) {
|
|
||||||
ret.subDirs[i] = subDirList.get(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
ret.baseDirPath = currentDir.getAbsolutePath();
|
return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs);
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -59,7 +59,8 @@ class BlockPoolSlice {
|
||||||
private final String bpid;
|
private final String bpid;
|
||||||
private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
|
private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
|
||||||
private final File currentDir; // StorageDirectory/current/bpid/current
|
private final File currentDir; // StorageDirectory/current/bpid/current
|
||||||
private final LDir finalizedDir; // directory store Finalized replica
|
// directory where finalized replicas are stored
|
||||||
|
private final File finalizedDir;
|
||||||
private final File rbwDir; // directory store RBW replica
|
private final File rbwDir; // directory store RBW replica
|
||||||
private final File tmpDir; // directory store Temporary replica
|
private final File tmpDir; // directory store Temporary replica
|
||||||
private static final String DU_CACHE_FILE = "dfsUsed";
|
private static final String DU_CACHE_FILE = "dfsUsed";
|
||||||
|
@ -82,8 +83,13 @@ class BlockPoolSlice {
|
||||||
this.bpid = bpid;
|
this.bpid = bpid;
|
||||||
this.volume = volume;
|
this.volume = volume;
|
||||||
this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
|
this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
|
||||||
final File finalizedDir = new File(
|
this.finalizedDir = new File(
|
||||||
currentDir, DataStorage.STORAGE_DIR_FINALIZED);
|
currentDir, DataStorage.STORAGE_DIR_FINALIZED);
|
||||||
|
if (!this.finalizedDir.exists()) {
|
||||||
|
if (!this.finalizedDir.mkdirs()) {
|
||||||
|
throw new IOException("Failed to mkdirs " + this.finalizedDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Files that were being written when the datanode was last shutdown
|
// Files that were being written when the datanode was last shutdown
|
||||||
// are now moved back to the data directory. It is possible that
|
// are now moved back to the data directory. It is possible that
|
||||||
|
@ -95,10 +101,6 @@ class BlockPoolSlice {
|
||||||
FileUtil.fullyDelete(tmpDir);
|
FileUtil.fullyDelete(tmpDir);
|
||||||
}
|
}
|
||||||
this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
|
this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
|
||||||
final int maxBlocksPerDir = conf.getInt(
|
|
||||||
DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
|
|
||||||
DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
|
|
||||||
this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
|
|
||||||
if (!rbwDir.mkdirs()) { // create rbw directory if not exist
|
if (!rbwDir.mkdirs()) { // create rbw directory if not exist
|
||||||
if (!rbwDir.isDirectory()) {
|
if (!rbwDir.isDirectory()) {
|
||||||
throw new IOException("Mkdirs failed to create " + rbwDir.toString());
|
throw new IOException("Mkdirs failed to create " + rbwDir.toString());
|
||||||
|
@ -131,7 +133,7 @@ class BlockPoolSlice {
|
||||||
}
|
}
|
||||||
|
|
||||||
File getFinalizedDir() {
|
File getFinalizedDir() {
|
||||||
return finalizedDir.dir;
|
return finalizedDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
File getRbwDir() {
|
File getRbwDir() {
|
||||||
|
@ -239,25 +241,56 @@ class BlockPoolSlice {
|
||||||
}
|
}
|
||||||
|
|
||||||
File addBlock(Block b, File f) throws IOException {
|
File addBlock(Block b, File f) throws IOException {
|
||||||
File blockFile = finalizedDir.addBlock(b, f);
|
File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
|
||||||
|
if (!blockDir.exists()) {
|
||||||
|
if (!blockDir.mkdirs()) {
|
||||||
|
throw new IOException("Failed to mkdirs " + blockDir);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
|
||||||
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
|
File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
|
||||||
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
|
dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
|
||||||
return blockFile;
|
return blockFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
void checkDirs() throws DiskErrorException {
|
void checkDirs() throws DiskErrorException {
|
||||||
finalizedDir.checkDirTree();
|
DiskChecker.checkDirs(finalizedDir);
|
||||||
DiskChecker.checkDir(tmpDir);
|
DiskChecker.checkDir(tmpDir);
|
||||||
DiskChecker.checkDir(rbwDir);
|
DiskChecker.checkDir(rbwDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
void getVolumeMap(ReplicaMap volumeMap) throws IOException {
|
void getVolumeMap(ReplicaMap volumeMap) throws IOException {
|
||||||
// add finalized replicas
|
// add finalized replicas
|
||||||
finalizedDir.getVolumeMap(bpid, volumeMap, volume);
|
addToReplicasMap(volumeMap, finalizedDir, true);
|
||||||
// add rbw replicas
|
// add rbw replicas
|
||||||
addToReplicasMap(volumeMap, rbwDir, false);
|
addToReplicasMap(volumeMap, rbwDir, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Recover an unlinked tmp file on datanode restart. If the original block
|
||||||
|
* does not exist, then the tmp file is renamed to be the
|
||||||
|
* original file name and the original name is returned; otherwise the tmp
|
||||||
|
* file is deleted and null is returned.
|
||||||
|
*/
|
||||||
|
File recoverTempUnlinkedBlock(File unlinkedTmp) throws IOException {
|
||||||
|
File blockFile = FsDatasetUtil.getOrigFile(unlinkedTmp);
|
||||||
|
if (blockFile.exists()) {
|
||||||
|
// If the original block file still exists, then no recovery is needed.
|
||||||
|
if (!unlinkedTmp.delete()) {
|
||||||
|
throw new IOException("Unable to cleanup unlinked tmp file " +
|
||||||
|
unlinkedTmp);
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
} else {
|
||||||
|
if (!unlinkedTmp.renameTo(blockFile)) {
|
||||||
|
throw new IOException("Unable to rename unlinked tmp file " +
|
||||||
|
unlinkedTmp);
|
||||||
|
}
|
||||||
|
return blockFile;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add replicas under the given directory to the volume map
|
* Add replicas under the given directory to the volume map
|
||||||
* @param volumeMap the replicas map
|
* @param volumeMap the replicas map
|
||||||
|
@ -267,23 +300,34 @@ class BlockPoolSlice {
|
||||||
*/
|
*/
|
||||||
void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized
|
void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
File blockFiles[] = FileUtil.listFiles(dir);
|
File files[] = FileUtil.listFiles(dir);
|
||||||
for (File blockFile : blockFiles) {
|
for (File file : files) {
|
||||||
if (!Block.isBlockFilename(blockFile))
|
if (file.isDirectory()) {
|
||||||
|
addToReplicasMap(volumeMap, file, isFinalized);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) {
|
||||||
|
file = recoverTempUnlinkedBlock(file);
|
||||||
|
if (file == null) { // the original block still exists, so we cover it
|
||||||
|
// in another iteration and can continue here
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!Block.isBlockFilename(file))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
long genStamp = FsDatasetUtil.getGenerationStampFromFile(
|
long genStamp = FsDatasetUtil.getGenerationStampFromFile(
|
||||||
blockFiles, blockFile);
|
files, file);
|
||||||
long blockId = Block.filename2id(blockFile.getName());
|
long blockId = Block.filename2id(file.getName());
|
||||||
ReplicaInfo newReplica = null;
|
ReplicaInfo newReplica = null;
|
||||||
if (isFinalized) {
|
if (isFinalized) {
|
||||||
newReplica = new FinalizedReplica(blockId,
|
newReplica = new FinalizedReplica(blockId,
|
||||||
blockFile.length(), genStamp, volume, blockFile.getParentFile());
|
file.length(), genStamp, volume, file.getParentFile());
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
boolean loadRwr = true;
|
boolean loadRwr = true;
|
||||||
File restartMeta = new File(blockFile.getParent() +
|
File restartMeta = new File(file.getParent() +
|
||||||
File.pathSeparator + "." + blockFile.getName() + ".restart");
|
File.pathSeparator + "." + file.getName() + ".restart");
|
||||||
Scanner sc = null;
|
Scanner sc = null;
|
||||||
try {
|
try {
|
||||||
sc = new Scanner(restartMeta);
|
sc = new Scanner(restartMeta);
|
||||||
|
@ -291,8 +335,8 @@ class BlockPoolSlice {
|
||||||
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
|
if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
|
||||||
// It didn't expire. Load the replica as a RBW.
|
// It didn't expire. Load the replica as a RBW.
|
||||||
newReplica = new ReplicaBeingWritten(blockId,
|
newReplica = new ReplicaBeingWritten(blockId,
|
||||||
validateIntegrityAndSetLength(blockFile, genStamp),
|
validateIntegrityAndSetLength(file, genStamp),
|
||||||
genStamp, volume, blockFile.getParentFile(), null);
|
genStamp, volume, file.getParentFile(), null);
|
||||||
loadRwr = false;
|
loadRwr = false;
|
||||||
}
|
}
|
||||||
sc.close();
|
sc.close();
|
||||||
|
@ -301,7 +345,7 @@ class BlockPoolSlice {
|
||||||
restartMeta.getPath());
|
restartMeta.getPath());
|
||||||
}
|
}
|
||||||
} catch (FileNotFoundException fnfe) {
|
} catch (FileNotFoundException fnfe) {
|
||||||
// nothing to do here
|
// nothing to do hereFile dir =
|
||||||
} finally {
|
} finally {
|
||||||
if (sc != null) {
|
if (sc != null) {
|
||||||
sc.close();
|
sc.close();
|
||||||
|
@ -310,15 +354,15 @@ class BlockPoolSlice {
|
||||||
// Restart meta doesn't exist or expired.
|
// Restart meta doesn't exist or expired.
|
||||||
if (loadRwr) {
|
if (loadRwr) {
|
||||||
newReplica = new ReplicaWaitingToBeRecovered(blockId,
|
newReplica = new ReplicaWaitingToBeRecovered(blockId,
|
||||||
validateIntegrityAndSetLength(blockFile, genStamp),
|
validateIntegrityAndSetLength(file, genStamp),
|
||||||
genStamp, volume, blockFile.getParentFile());
|
genStamp, volume, file.getParentFile());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica);
|
ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica);
|
||||||
if (oldReplica != null) {
|
if (oldReplica != null) {
|
||||||
FsDatasetImpl.LOG.warn("Two block files with the same block id exist " +
|
FsDatasetImpl.LOG.warn("Two block files with the same block id exist " +
|
||||||
"on disk: " + oldReplica.getBlockFile() + " and " + blockFile );
|
"on disk: " + oldReplica.getBlockFile() + " and " + file );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -405,10 +449,6 @@ class BlockPoolSlice {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearPath(File f) {
|
|
||||||
finalizedDir.clearPath(f);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return currentDir.getAbsolutePath();
|
return currentDir.getAbsolutePath();
|
||||||
|
|
|
@ -1151,7 +1151,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
return f;
|
return f;
|
||||||
|
|
||||||
// if file is not null, but doesn't exist - possibly disk failed
|
// if file is not null, but doesn't exist - possibly disk failed
|
||||||
datanode.checkDiskError();
|
datanode.checkDiskErrorAsync();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
|
@ -1224,13 +1224,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
||||||
+ ". Parent not found for file " + f);
|
+ ". Parent not found for file " + f);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
ReplicaState replicaState = info.getState();
|
|
||||||
if (replicaState == ReplicaState.FINALIZED ||
|
|
||||||
(replicaState == ReplicaState.RUR &&
|
|
||||||
((ReplicaUnderRecovery)info).getOriginalReplica().getState() ==
|
|
||||||
ReplicaState.FINALIZED)) {
|
|
||||||
v.clearPath(bpid, parent);
|
|
||||||
}
|
|
||||||
volumeMap.remove(bpid, invalidBlks[i]);
|
volumeMap.remove(bpid, invalidBlks[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.StorageType;
|
import org.apache.hadoop.hdfs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
|
@ -236,10 +237,6 @@ class FsVolumeImpl implements FsVolumeSpi {
|
||||||
bp.addToReplicasMap(volumeMap, dir, isFinalized);
|
bp.addToReplicasMap(volumeMap, dir, isFinalized);
|
||||||
}
|
}
|
||||||
|
|
||||||
void clearPath(String bpid, File f) throws IOException {
|
|
||||||
getBlockPoolSlice(bpid).clearPath(f);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return currentDir.getAbsolutePath();
|
return currentDir.getAbsolutePath();
|
||||||
|
@ -274,7 +271,8 @@ class FsVolumeImpl implements FsVolumeSpi {
|
||||||
File finalizedDir = new File(bpCurrentDir,
|
File finalizedDir = new File(bpCurrentDir,
|
||||||
DataStorage.STORAGE_DIR_FINALIZED);
|
DataStorage.STORAGE_DIR_FINALIZED);
|
||||||
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
|
File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
|
||||||
if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) {
|
if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
|
||||||
|
finalizedDir)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
|
if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
|
||||||
|
@ -301,7 +299,8 @@ class FsVolumeImpl implements FsVolumeSpi {
|
||||||
if (!rbwDir.delete()) {
|
if (!rbwDir.delete()) {
|
||||||
throw new IOException("Failed to delete " + rbwDir);
|
throw new IOException("Failed to delete " + rbwDir);
|
||||||
}
|
}
|
||||||
if (!finalizedDir.delete()) {
|
if (!DatanodeUtil.dirNoFilesRecursive(finalizedDir) ||
|
||||||
|
!FileUtil.fullyDelete(finalizedDir)) {
|
||||||
throw new IOException("Failed to delete " + finalizedDir);
|
throw new IOException("Failed to delete " + finalizedDir);
|
||||||
}
|
}
|
||||||
FileUtil.fullyDelete(tmpDir);
|
FileUtil.fullyDelete(tmpDir);
|
||||||
|
|
|
@ -1,228 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
|
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A node type that can be built into a tree reflecting the
|
|
||||||
* hierarchy of replicas on the local disk.
|
|
||||||
*/
|
|
||||||
class LDir {
|
|
||||||
final File dir;
|
|
||||||
final int maxBlocksPerDir;
|
|
||||||
|
|
||||||
private int numBlocks = 0;
|
|
||||||
private LDir[] children = null;
|
|
||||||
private int lastChildIdx = 0;
|
|
||||||
|
|
||||||
LDir(File dir, int maxBlocksPerDir) throws IOException {
|
|
||||||
this.dir = dir;
|
|
||||||
this.maxBlocksPerDir = maxBlocksPerDir;
|
|
||||||
|
|
||||||
if (!dir.exists()) {
|
|
||||||
if (!dir.mkdirs()) {
|
|
||||||
throw new IOException("Failed to mkdirs " + dir);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
File[] files = FileUtil.listFiles(dir);
|
|
||||||
List<LDir> dirList = new ArrayList<LDir>();
|
|
||||||
for (int idx = 0; idx < files.length; idx++) {
|
|
||||||
if (files[idx].isDirectory()) {
|
|
||||||
dirList.add(new LDir(files[idx], maxBlocksPerDir));
|
|
||||||
} else if (Block.isBlockFilename(files[idx])) {
|
|
||||||
numBlocks++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (dirList.size() > 0) {
|
|
||||||
children = dirList.toArray(new LDir[dirList.size()]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
File addBlock(Block b, File src) throws IOException {
|
|
||||||
//First try without creating subdirectories
|
|
||||||
File file = addBlock(b, src, false, false);
|
|
||||||
return (file != null) ? file : addBlock(b, src, true, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
private File addBlock(Block b, File src, boolean createOk, boolean resetIdx
|
|
||||||
) throws IOException {
|
|
||||||
if (numBlocks < maxBlocksPerDir) {
|
|
||||||
final File dest = FsDatasetImpl.moveBlockFiles(b, src, dir);
|
|
||||||
numBlocks += 1;
|
|
||||||
return dest;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (lastChildIdx < 0 && resetIdx) {
|
|
||||||
//reset so that all children will be checked
|
|
||||||
lastChildIdx = DFSUtil.getRandom().nextInt(children.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (lastChildIdx >= 0 && children != null) {
|
|
||||||
//Check if any child-tree has room for a block.
|
|
||||||
for (int i=0; i < children.length; i++) {
|
|
||||||
int idx = (lastChildIdx + i)%children.length;
|
|
||||||
File file = children[idx].addBlock(b, src, false, resetIdx);
|
|
||||||
if (file != null) {
|
|
||||||
lastChildIdx = idx;
|
|
||||||
return file;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lastChildIdx = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!createOk) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (children == null || children.length == 0) {
|
|
||||||
children = new LDir[maxBlocksPerDir];
|
|
||||||
for (int idx = 0; idx < maxBlocksPerDir; idx++) {
|
|
||||||
final File sub = new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx);
|
|
||||||
children[idx] = new LDir(sub, maxBlocksPerDir);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//now pick a child randomly for creating a new set of subdirs.
|
|
||||||
lastChildIdx = DFSUtil.getRandom().nextInt(children.length);
|
|
||||||
return children[ lastChildIdx ].addBlock(b, src, true, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
void getVolumeMap(String bpid, ReplicaMap volumeMap, FsVolumeImpl volume
|
|
||||||
) throws IOException {
|
|
||||||
if (children != null) {
|
|
||||||
for (int i = 0; i < children.length; i++) {
|
|
||||||
children[i].getVolumeMap(bpid, volumeMap, volume);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
recoverTempUnlinkedBlock();
|
|
||||||
volume.addToReplicasMap(bpid, volumeMap, dir, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Recover unlinked tmp files on datanode restart. If the original block
|
|
||||||
* does not exist, then the tmp file is renamed to be the
|
|
||||||
* original file name; otherwise the tmp file is deleted.
|
|
||||||
*/
|
|
||||||
private void recoverTempUnlinkedBlock() throws IOException {
|
|
||||||
File files[] = FileUtil.listFiles(dir);
|
|
||||||
for (File file : files) {
|
|
||||||
if (!FsDatasetUtil.isUnlinkTmpFile(file)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
File blockFile = FsDatasetUtil.getOrigFile(file);
|
|
||||||
if (blockFile.exists()) {
|
|
||||||
// If the original block file still exists, then no recovery is needed.
|
|
||||||
if (!file.delete()) {
|
|
||||||
throw new IOException("Unable to cleanup unlinked tmp file " + file);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!file.renameTo(blockFile)) {
|
|
||||||
throw new IOException("Unable to cleanup detached file " + file);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* check if a data diretory is healthy
|
|
||||||
* @throws DiskErrorException
|
|
||||||
*/
|
|
||||||
void checkDirTree() throws DiskErrorException {
|
|
||||||
DiskChecker.checkDir(dir);
|
|
||||||
|
|
||||||
if (children != null) {
|
|
||||||
for (int i = 0; i < children.length; i++) {
|
|
||||||
children[i].checkDirTree();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void clearPath(File f) {
|
|
||||||
String root = dir.getAbsolutePath();
|
|
||||||
String dir = f.getAbsolutePath();
|
|
||||||
if (dir.startsWith(root)) {
|
|
||||||
String[] dirNames = dir.substring(root.length()).
|
|
||||||
split(File.separator + DataStorage.BLOCK_SUBDIR_PREFIX);
|
|
||||||
if (clearPath(f, dirNames, 1))
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
clearPath(f, null, -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* dirNames is an array of string integers derived from
|
|
||||||
* usual directory structure data/subdirN/subdirXY/subdirM ...
|
|
||||||
* If dirName array is non-null, we only check the child at
|
|
||||||
* the children[dirNames[idx]]. This avoids iterating over
|
|
||||||
* children in common case. If directory structure changes
|
|
||||||
* in later versions, we need to revisit this.
|
|
||||||
*/
|
|
||||||
private boolean clearPath(File f, String[] dirNames, int idx) {
|
|
||||||
if ((dirNames == null || idx == dirNames.length) &&
|
|
||||||
dir.compareTo(f) == 0) {
|
|
||||||
numBlocks--;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (dirNames != null) {
|
|
||||||
//guess the child index from the directory name
|
|
||||||
if (idx > (dirNames.length - 1) || children == null) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
int childIdx;
|
|
||||||
try {
|
|
||||||
childIdx = Integer.parseInt(dirNames[idx]);
|
|
||||||
} catch (NumberFormatException ignored) {
|
|
||||||
// layout changed? we could print a warning.
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return (childIdx >= 0 && childIdx < children.length) ?
|
|
||||||
children[childIdx].clearPath(f, dirNames, idx+1) : false;
|
|
||||||
}
|
|
||||||
|
|
||||||
//guesses failed. back to blind iteration.
|
|
||||||
if (children != null) {
|
|
||||||
for(int i=0; i < children.length; i++) {
|
|
||||||
if (children[i].clearPath(f, null, -1)){
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
return "FSDir{dir=" + dir + ", children="
|
|
||||||
+ (children == null ? null : Arrays.asList(children)) + "}";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1103,9 +1103,6 @@ public class FSDirectory implements Closeable {
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
// update inodeMap
|
|
||||||
removeFromInodeMap(Arrays.asList(allSrcInodes));
|
|
||||||
|
|
||||||
trgInode.setModificationTime(timestamp, trgLatestSnapshot);
|
trgInode.setModificationTime(timestamp, trgLatestSnapshot);
|
||||||
trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
|
trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
|
||||||
// update quota on the parent directory ('count' files removed, 0 space)
|
// update quota on the parent directory ('count' files removed, 0 space)
|
||||||
|
|
|
@ -4585,8 +4585,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
// Otherwise fsck will report these blocks as MISSING, especially if the
|
// Otherwise fsck will report these blocks as MISSING, especially if the
|
||||||
// blocksReceived from Datanodes take a long time to arrive.
|
// blocksReceived from Datanodes take a long time to arrive.
|
||||||
for (int i = 0; i < trimmedTargets.size(); i++) {
|
for (int i = 0; i < trimmedTargets.size(); i++) {
|
||||||
trimmedTargets.get(i).addBlock(
|
DatanodeStorageInfo storageInfo =
|
||||||
trimmedStorages.get(i), storedBlock);
|
trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i));
|
||||||
|
if (storageInfo != null) {
|
||||||
|
storageInfo.addBlock(storedBlock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -6066,7 +6069,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
}
|
}
|
||||||
|
|
||||||
public void processIncrementalBlockReport(final DatanodeID nodeID,
|
public void processIncrementalBlockReport(final DatanodeID nodeID,
|
||||||
final String poolId, final StorageReceivedDeletedBlocks srdb)
|
final StorageReceivedDeletedBlocks srdb)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
writeLock();
|
writeLock();
|
||||||
try {
|
try {
|
||||||
|
@ -8824,6 +8827,29 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void checkAccess(String src, FsAction mode) throws AccessControlException,
|
||||||
|
FileNotFoundException, UnresolvedLinkException, IOException {
|
||||||
|
checkOperation(OperationCategory.READ);
|
||||||
|
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||||
|
readLock();
|
||||||
|
try {
|
||||||
|
checkOperation(OperationCategory.READ);
|
||||||
|
src = FSDirectory.resolvePath(src, pathComponents, dir);
|
||||||
|
if (dir.getINode(src) == null) {
|
||||||
|
throw new FileNotFoundException("Path not found");
|
||||||
|
}
|
||||||
|
if (isPermissionEnabled) {
|
||||||
|
FSPermissionChecker pc = getPermissionChecker();
|
||||||
|
checkPathAccess(pc, src, mode);
|
||||||
|
}
|
||||||
|
} catch (AccessControlException e) {
|
||||||
|
logAuditEvent(false, "checkAccess", src);
|
||||||
|
throw e;
|
||||||
|
} finally {
|
||||||
|
readUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default AuditLogger implementation; used when no access logger is
|
* Default AuditLogger implementation; used when no access logger is
|
||||||
* defined in the config file. It can also be explicitly listed in the
|
* defined in the config file. It can also be explicitly listed in the
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.ha.HAServiceStatus;
|
import org.apache.hadoop.ha.HAServiceStatus;
|
||||||
import org.apache.hadoop.ha.HealthCheckFailedException;
|
import org.apache.hadoop.ha.HealthCheckFailedException;
|
||||||
|
@ -1067,7 +1068,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
// for the same node and storage, so the value returned by the last
|
// for the same node and storage, so the value returned by the last
|
||||||
// call of this loop is the final updated value for noStaleStorage.
|
// call of this loop is the final updated value for noStaleStorage.
|
||||||
//
|
//
|
||||||
noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
|
noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks);
|
||||||
metrics.incrStorageBlockReportOps();
|
metrics.incrStorageBlockReportOps();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1103,7 +1104,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
+" blocks.");
|
+" blocks.");
|
||||||
}
|
}
|
||||||
for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
|
for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
|
||||||
namesystem.processIncrementalBlockReport(nodeReg, poolId, r);
|
namesystem.processIncrementalBlockReport(nodeReg, r);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1458,5 +1459,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
||||||
public void removeXAttr(String src, XAttr xAttr) throws IOException {
|
public void removeXAttr(String src, XAttr xAttr) throws IOException {
|
||||||
namesystem.removeXAttr(src, xAttr);
|
namesystem.removeXAttr(src, xAttr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void checkAccess(String path, FsAction mode) throws IOException {
|
||||||
|
namesystem.checkAccess(path, mode);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.Options;
|
import org.apache.hadoop.fs.Options;
|
||||||
import org.apache.hadoop.fs.XAttr;
|
import org.apache.hadoop.fs.XAttr;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.hdfs.StorageType;
|
import org.apache.hadoop.hdfs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.XAttrHelper;
|
import org.apache.hadoop.hdfs.XAttrHelper;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
|
import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
|
import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
|
||||||
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
|
import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
|
||||||
|
import org.apache.hadoop.hdfs.web.resources.FsActionParam;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.RetriableException;
|
import org.apache.hadoop.ipc.RetriableException;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
|
@ -755,10 +757,12 @@ public class NamenodeWebHdfsMethods {
|
||||||
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
||||||
final XAttrEncodingParam xattrEncoding,
|
final XAttrEncodingParam xattrEncoding,
|
||||||
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
|
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
|
||||||
final ExcludeDatanodesParam excludeDatanodes
|
final ExcludeDatanodesParam excludeDatanodes,
|
||||||
|
@QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
|
||||||
|
final FsActionParam fsAction
|
||||||
) throws IOException, InterruptedException {
|
) throws IOException, InterruptedException {
|
||||||
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
|
return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
|
||||||
renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
|
renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Handle HTTP GET request. */
|
/** Handle HTTP GET request. */
|
||||||
|
@ -789,11 +793,13 @@ public class NamenodeWebHdfsMethods {
|
||||||
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
@QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT)
|
||||||
final XAttrEncodingParam xattrEncoding,
|
final XAttrEncodingParam xattrEncoding,
|
||||||
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
|
@QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
|
||||||
final ExcludeDatanodesParam excludeDatanodes
|
final ExcludeDatanodesParam excludeDatanodes,
|
||||||
|
@QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
|
||||||
|
final FsActionParam fsAction
|
||||||
) throws IOException, InterruptedException {
|
) throws IOException, InterruptedException {
|
||||||
|
|
||||||
init(ugi, delegation, username, doAsUser, path, op, offset, length,
|
init(ugi, delegation, username, doAsUser, path, op, offset, length,
|
||||||
renewer, bufferSize, xattrEncoding, excludeDatanodes);
|
renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
|
||||||
|
|
||||||
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
return ugi.doAs(new PrivilegedExceptionAction<Response>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -801,7 +807,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
try {
|
try {
|
||||||
return get(ugi, delegation, username, doAsUser,
|
return get(ugi, delegation, username, doAsUser,
|
||||||
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
|
path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
|
||||||
xattrNames, xattrEncoding, excludeDatanodes);
|
xattrNames, xattrEncoding, excludeDatanodes, fsAction);
|
||||||
} finally {
|
} finally {
|
||||||
reset();
|
reset();
|
||||||
}
|
}
|
||||||
|
@ -822,7 +828,8 @@ public class NamenodeWebHdfsMethods {
|
||||||
final BufferSizeParam bufferSize,
|
final BufferSizeParam bufferSize,
|
||||||
final List<XAttrNameParam> xattrNames,
|
final List<XAttrNameParam> xattrNames,
|
||||||
final XAttrEncodingParam xattrEncoding,
|
final XAttrEncodingParam xattrEncoding,
|
||||||
final ExcludeDatanodesParam excludeDatanodes
|
final ExcludeDatanodesParam excludeDatanodes,
|
||||||
|
final FsActionParam fsAction
|
||||||
) throws IOException, URISyntaxException {
|
) throws IOException, URISyntaxException {
|
||||||
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
final NameNode namenode = (NameNode)context.getAttribute("name.node");
|
||||||
final NamenodeProtocols np = getRPCServer(namenode);
|
final NamenodeProtocols np = getRPCServer(namenode);
|
||||||
|
@ -919,6 +926,10 @@ public class NamenodeWebHdfsMethods {
|
||||||
final String js = JsonUtil.toJsonString(xAttrs);
|
final String js = JsonUtil.toJsonString(xAttrs);
|
||||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||||
}
|
}
|
||||||
|
case CHECKACCESS: {
|
||||||
|
np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
|
||||||
|
return Response.ok().build();
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
throw new UnsupportedOperationException(op + " is not supported");
|
throw new UnsupportedOperationException(op + " is not supported");
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,10 +17,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.protocol;
|
package org.apache.hadoop.hdfs.server.protocol;
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hdfs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -39,12 +38,15 @@ public class BlocksWithLocations {
|
||||||
final Block block;
|
final Block block;
|
||||||
final String[] datanodeUuids;
|
final String[] datanodeUuids;
|
||||||
final String[] storageIDs;
|
final String[] storageIDs;
|
||||||
|
final StorageType[] storageTypes;
|
||||||
|
|
||||||
/** constructor */
|
/** constructor */
|
||||||
public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) {
|
public BlockWithLocations(Block block, String[] datanodeUuids,
|
||||||
|
String[] storageIDs, StorageType[] storageTypes) {
|
||||||
this.block = block;
|
this.block = block;
|
||||||
this.datanodeUuids = datanodeUuids;
|
this.datanodeUuids = datanodeUuids;
|
||||||
this.storageIDs = storageIDs;
|
this.storageIDs = storageIDs;
|
||||||
|
this.storageTypes = storageTypes;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** get the block */
|
/** get the block */
|
||||||
|
@ -62,6 +64,11 @@ public class BlocksWithLocations {
|
||||||
return storageIDs;
|
return storageIDs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** @return the storage types */
|
||||||
|
public StorageType[] getStorageTypes() {
|
||||||
|
return storageTypes;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
final StringBuilder b = new StringBuilder();
|
final StringBuilder b = new StringBuilder();
|
||||||
|
@ -70,12 +77,18 @@ public class BlocksWithLocations {
|
||||||
return b.append("[]").toString();
|
return b.append("[]").toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
b.append(storageIDs[0]).append('@').append(datanodeUuids[0]);
|
appendString(0, b.append("["));
|
||||||
for(int i = 1; i < datanodeUuids.length; i++) {
|
for(int i = 1; i < datanodeUuids.length; i++) {
|
||||||
b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]);
|
appendString(i, b.append(","));
|
||||||
}
|
}
|
||||||
return b.append("]").toString();
|
return b.append("]").toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private StringBuilder appendString(int i, StringBuilder b) {
|
||||||
|
return b.append("[").append(storageTypes[i]).append("]")
|
||||||
|
.append(storageIDs[i])
|
||||||
|
.append("@").append(datanodeUuids[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private final BlockWithLocations[] blocks;
|
private final BlockWithLocations[] blocks;
|
||||||
|
|
|
@ -29,8 +29,8 @@ import org.xml.sax.ContentHandler;
|
||||||
import org.xml.sax.SAXException;
|
import org.xml.sax.SAXException;
|
||||||
import org.xml.sax.helpers.AttributesImpl;
|
import org.xml.sax.helpers.AttributesImpl;
|
||||||
|
|
||||||
import com.sun.org.apache.xml.internal.serialize.OutputFormat;
|
import org.apache.xml.serialize.OutputFormat;
|
||||||
import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
|
import org.apache.xml.serialize.XMLSerializer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An XmlEditsVisitor walks over an EditLog structure and writes out
|
* An XmlEditsVisitor walks over an EditLog structure and writes out
|
||||||
|
|
|
@ -37,7 +37,7 @@ import com.google.common.base.Preconditions;
|
||||||
public class EnumCounters<E extends Enum<E>> {
|
public class EnumCounters<E extends Enum<E>> {
|
||||||
/** The class of the enum. */
|
/** The class of the enum. */
|
||||||
private final Class<E> enumClass;
|
private final Class<E> enumClass;
|
||||||
/** The counter array, counters[i] corresponds to the enumConstants[i]. */
|
/** An array of longs corresponding to the enum type. */
|
||||||
private final long[] counters;
|
private final long[] counters;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -75,6 +75,13 @@ public class EnumCounters<E extends Enum<E>> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Reset all counters to zero. */
|
||||||
|
public final void reset() {
|
||||||
|
for(int i = 0; i < counters.length; i++) {
|
||||||
|
this.counters[i] = 0L;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** Add the given value to counter e. */
|
/** Add the given value to counter e. */
|
||||||
public final void add(final E e, final long value) {
|
public final void add(final E e, final long value) {
|
||||||
counters[e.ordinal()] += value;
|
counters[e.ordinal()] += value;
|
||||||
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Similar to {@link EnumCounters} except that the value type is double.
|
||||||
|
*
|
||||||
|
* @param <E> the enum type
|
||||||
|
*/
|
||||||
|
public class EnumDoubles<E extends Enum<E>> {
|
||||||
|
/** The class of the enum. */
|
||||||
|
private final Class<E> enumClass;
|
||||||
|
/** An array of doubles corresponding to the enum type. */
|
||||||
|
private final double[] doubles;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Construct doubles for the given enum constants.
|
||||||
|
* @param enumClass the enum class.
|
||||||
|
*/
|
||||||
|
public EnumDoubles(final Class<E> enumClass) {
|
||||||
|
final E[] enumConstants = enumClass.getEnumConstants();
|
||||||
|
Preconditions.checkNotNull(enumConstants);
|
||||||
|
this.enumClass = enumClass;
|
||||||
|
this.doubles = new double[enumConstants.length];
|
||||||
|
}
|
||||||
|
|
||||||
|
/** @return the value corresponding to e. */
|
||||||
|
public final double get(final E e) {
|
||||||
|
return doubles[e.ordinal()];
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Negate all values. */
|
||||||
|
public final void negation() {
|
||||||
|
for(int i = 0; i < doubles.length; i++) {
|
||||||
|
doubles[i] = -doubles[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Set e to the given value. */
|
||||||
|
public final void set(final E e, final double value) {
|
||||||
|
doubles[e.ordinal()] = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Set the values of this object to that object. */
|
||||||
|
public final void set(final EnumDoubles<E> that) {
|
||||||
|
for(int i = 0; i < doubles.length; i++) {
|
||||||
|
this.doubles[i] = that.doubles[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Reset all values to zero. */
|
||||||
|
public final void reset() {
|
||||||
|
for(int i = 0; i < doubles.length; i++) {
|
||||||
|
this.doubles[i] = 0.0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Add the given value to e. */
|
||||||
|
public final void add(final E e, final double value) {
|
||||||
|
doubles[e.ordinal()] += value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Add the values of that object to this. */
|
||||||
|
public final void add(final EnumDoubles<E> that) {
|
||||||
|
for(int i = 0; i < doubles.length; i++) {
|
||||||
|
this.doubles[i] += that.doubles[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Subtract the given value from e. */
|
||||||
|
public final void subtract(final E e, final double value) {
|
||||||
|
doubles[e.ordinal()] -= value;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Subtract the values of this object from that object. */
|
||||||
|
public final void subtract(final EnumDoubles<E> that) {
|
||||||
|
for(int i = 0; i < doubles.length; i++) {
|
||||||
|
this.doubles[i] -= that.doubles[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object obj) {
|
||||||
|
if (obj == this) {
|
||||||
|
return true;
|
||||||
|
} else if (obj == null || !(obj instanceof EnumDoubles)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
final EnumDoubles<?> that = (EnumDoubles<?>)obj;
|
||||||
|
return this.enumClass == that.enumClass
|
||||||
|
&& Arrays.equals(this.doubles, that.doubles);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return Arrays.hashCode(doubles);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
final E[] enumConstants = enumClass.getEnumConstants();
|
||||||
|
final StringBuilder b = new StringBuilder();
|
||||||
|
for(int i = 0; i < doubles.length; i++) {
|
||||||
|
final String name = enumConstants[i].name();
|
||||||
|
b.append(name).append("=").append(doubles[i]).append(", ");
|
||||||
|
}
|
||||||
|
return b.substring(0, b.length() - 2);
|
||||||
|
}
|
||||||
|
}
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrCodec;
|
||||||
import org.apache.hadoop.fs.XAttrSetFlag;
|
import org.apache.hadoop.fs.XAttrSetFlag;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
@ -1356,6 +1357,12 @@ public class WebHdfsFileSystem extends FileSystem
|
||||||
}.run();
|
}.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void access(final Path path, final FsAction mode) throws IOException {
|
||||||
|
final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
|
||||||
|
new FsPathRunner(op, path, new FsActionParam(mode)).run();
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ContentSummary getContentSummary(final Path p) throws IOException {
|
public ContentSummary getContentSummary(final Path p) throws IOException {
|
||||||
statistics.incrementReadOps(1);
|
statistics.incrementReadOps(1);
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.web.resources;
|
||||||
|
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
|
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
/** {@link FsAction} Parameter */
|
||||||
|
public class FsActionParam extends StringParam {
|
||||||
|
|
||||||
|
/** Parameter name. */
|
||||||
|
public static final String NAME = "fsaction";
|
||||||
|
|
||||||
|
/** Default parameter value. */
|
||||||
|
public static final String DEFAULT = NULL;
|
||||||
|
|
||||||
|
private static String FS_ACTION_PATTERN = "[rwx-]{3}";
|
||||||
|
|
||||||
|
private static final Domain DOMAIN = new Domain(NAME,
|
||||||
|
Pattern.compile(FS_ACTION_PATTERN));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
* @param str a string representation of the parameter value.
|
||||||
|
*/
|
||||||
|
public FsActionParam(final String str) {
|
||||||
|
super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
* @param value the parameter value.
|
||||||
|
*/
|
||||||
|
public FsActionParam(final FsAction value) {
|
||||||
|
super(DOMAIN, value == null? null: value.SYMBOL);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getName() {
|
||||||
|
return NAME;
|
||||||
|
}
|
||||||
|
}
|
|
@ -39,7 +39,9 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
|
||||||
GETXATTRS(false, HttpURLConnection.HTTP_OK),
|
GETXATTRS(false, HttpURLConnection.HTTP_OK),
|
||||||
LISTXATTRS(false, HttpURLConnection.HTTP_OK),
|
LISTXATTRS(false, HttpURLConnection.HTTP_OK),
|
||||||
|
|
||||||
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
|
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
|
||||||
|
|
||||||
|
CHECKACCESS(false, HttpURLConnection.HTTP_OK);
|
||||||
|
|
||||||
final boolean redirect;
|
final boolean redirect;
|
||||||
final int expectedHttpResponseCode;
|
final int expectedHttpResponseCode;
|
||||||
|
|
|
@ -656,6 +656,14 @@ message DeleteSnapshotRequestProto {
|
||||||
message DeleteSnapshotResponseProto { // void response
|
message DeleteSnapshotResponseProto { // void response
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message CheckAccessRequestProto {
|
||||||
|
required string path = 1;
|
||||||
|
required AclEntryProto.FsActionProto mode = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CheckAccessResponseProto { // void response
|
||||||
|
}
|
||||||
|
|
||||||
service ClientNamenodeProtocol {
|
service ClientNamenodeProtocol {
|
||||||
rpc getBlockLocations(GetBlockLocationsRequestProto)
|
rpc getBlockLocations(GetBlockLocationsRequestProto)
|
||||||
returns(GetBlockLocationsResponseProto);
|
returns(GetBlockLocationsResponseProto);
|
||||||
|
@ -785,6 +793,8 @@ service ClientNamenodeProtocol {
|
||||||
returns(ListXAttrsResponseProto);
|
returns(ListXAttrsResponseProto);
|
||||||
rpc removeXAttr(RemoveXAttrRequestProto)
|
rpc removeXAttr(RemoveXAttrRequestProto)
|
||||||
returns(RemoveXAttrResponseProto);
|
returns(RemoveXAttrResponseProto);
|
||||||
|
rpc checkAccess(CheckAccessRequestProto)
|
||||||
|
returns(CheckAccessResponseProto);
|
||||||
rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
|
rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
|
||||||
returns(CreateEncryptionZoneResponseProto);
|
returns(CreateEncryptionZoneResponseProto);
|
||||||
rpc listEncryptionZones(ListEncryptionZonesRequestProto)
|
rpc listEncryptionZones(ListEncryptionZonesRequestProto)
|
||||||
|
|
|
@ -424,6 +424,7 @@ message BlockWithLocationsProto {
|
||||||
required BlockProto block = 1; // Block
|
required BlockProto block = 1; // Block
|
||||||
repeated string datanodeUuids = 2; // Datanodes with replicas of the block
|
repeated string datanodeUuids = 2; // Datanodes with replicas of the block
|
||||||
repeated string storageUuids = 3; // Storages with replicas of the block
|
repeated string storageUuids = 3; // Storages with replicas of the block
|
||||||
|
repeated StorageTypeProto storageTypes = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2052,6 +2052,14 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.datanode.block.id.layout.upgrade.threads</name>
|
||||||
|
<value>12</value>
|
||||||
|
<description>The number of threads to use when creating hard links from
|
||||||
|
current to previous blocks during upgrade of a DataNode to block ID-based
|
||||||
|
block layout (see HDFS-6482 for details on the layout).</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.list.encryption.zones.num.responses</name>
|
<name>dfs.namenode.list.encryption.zones.num.responses</name>
|
||||||
<value>100</value>
|
<value>100</value>
|
||||||
|
|
|
@ -47,18 +47,21 @@ HDFS NFS Gateway
|
||||||
The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts.
|
The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts.
|
||||||
In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
|
In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
|
||||||
user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
|
user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
|
||||||
and users belonging to the groups 'nfs-users1'
|
and users belonging to the groups 'users-group1'
|
||||||
and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following
|
and 'users-group2' use the NFS mounts, then in core-site.xml of the NameNode, the following
|
||||||
two properities must be set and only NameNode needs restart after the configuration change
|
two properities must be set and only NameNode needs restart after the configuration change
|
||||||
(NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
|
(NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
|
||||||
|
|
||||||
----
|
----
|
||||||
<property>
|
<property>
|
||||||
<name>hadoop.proxyuser.nfsserver.groups</name>
|
<name>hadoop.proxyuser.nfsserver.groups</name>
|
||||||
<value>nfs-users1,nfs-users2</value>
|
<value>root,users-group1,users-group2</value>
|
||||||
<description>
|
<description>
|
||||||
The 'nfsserver' user is allowed to proxy all members of the 'nfs-users1' and
|
The 'nfsserver' user is allowed to proxy all members of the 'users-group1' and
|
||||||
'nfs-users2' groups. Set this to '*' to allow nfsserver user to proxy any group.
|
'users-group2' groups. Note that in most cases you will need to include the
|
||||||
|
group "root" because the user "root" (which usually belonges to "root" group) will
|
||||||
|
generally be the user that initially executes the mount on the NFS client system.
|
||||||
|
Set this to '*' to allow nfsserver user to proxy any group.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
----
|
----
|
||||||
|
|
|
@ -82,6 +82,9 @@ WebHDFS REST API
|
||||||
* {{{List all XAttrs}<<<LISTXATTRS>>>}}
|
* {{{List all XAttrs}<<<LISTXATTRS>>>}}
|
||||||
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
|
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
|
||||||
|
|
||||||
|
* {{{Check access}<<<CHECKACCESS>>>}}
|
||||||
|
(see {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
|
||||||
|
|
||||||
* HTTP PUT
|
* HTTP PUT
|
||||||
|
|
||||||
* {{{Create and Write to a File}<<<CREATE>>>}}
|
* {{{Create and Write to a File}<<<CREATE>>>}}
|
||||||
|
@ -927,6 +930,28 @@ Transfer-Encoding: chunked
|
||||||
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
|
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
|
||||||
|
|
||||||
|
|
||||||
|
** {Check access}
|
||||||
|
|
||||||
|
* Submit a HTTP GET request.
|
||||||
|
|
||||||
|
+---------------------------------
|
||||||
|
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CHECKACCESS
|
||||||
|
&fsaction=<FSACTION>
|
||||||
|
+---------------------------------
|
||||||
|
|
||||||
|
The client receives a response with zero content length:
|
||||||
|
|
||||||
|
+---------------------------------
|
||||||
|
HTTP/1.1 200 OK
|
||||||
|
Content-Length: 0
|
||||||
|
+---------------------------------
|
||||||
|
|
||||||
|
[]
|
||||||
|
|
||||||
|
See also:
|
||||||
|
{{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
|
||||||
|
|
||||||
|
|
||||||
* {Extended Attributes(XAttrs) Operations}
|
* {Extended Attributes(XAttrs) Operations}
|
||||||
|
|
||||||
** {Set XAttr}
|
** {Set XAttr}
|
||||||
|
@ -2166,6 +2191,25 @@ var tokenProperties =
|
||||||
{{Proxy Users}}
|
{{Proxy Users}}
|
||||||
|
|
||||||
|
|
||||||
|
** {Fs Action}
|
||||||
|
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|| Name | <<<fsaction>>> |
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|| Description | File system operation read/write/execute |
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|| Type | String |
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|| Default Value | null (an invalid value) |
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|| Valid Values | Strings matching regex pattern \"[rwx-]\{3\}\" |
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|| Syntax | \"[rwx-]\{3\}\" |
|
||||||
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|
||||||
|
See also:
|
||||||
|
{{{Check access}<<<CHECKACCESS>>>}},
|
||||||
|
|
||||||
** {Group}
|
** {Group}
|
||||||
|
|
||||||
*----------------+-------------------------------------------------------------------+
|
*----------------+-------------------------------------------------------------------+
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.mockito.Mockito;
|
||||||
public class TestGenericRefresh {
|
public class TestGenericRefresh {
|
||||||
private static MiniDFSCluster cluster;
|
private static MiniDFSCluster cluster;
|
||||||
private static Configuration config;
|
private static Configuration config;
|
||||||
private static final int NNPort = 54222;
|
|
||||||
|
|
||||||
private static RefreshHandler firstHandler;
|
private static RefreshHandler firstHandler;
|
||||||
private static RefreshHandler secondHandler;
|
private static RefreshHandler secondHandler;
|
||||||
|
@ -57,8 +56,8 @@ public class TestGenericRefresh {
|
||||||
config = new Configuration();
|
config = new Configuration();
|
||||||
config.set("hadoop.security.authorization", "true");
|
config.set("hadoop.security.authorization", "true");
|
||||||
|
|
||||||
FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
|
FileSystem.setDefaultUri(config, "hdfs://localhost:0");
|
||||||
cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
|
cluster = new MiniDFSCluster.Builder(config).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +102,8 @@ public class TestGenericRefresh {
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidIdentifier() throws Exception {
|
public void testInvalidIdentifier() throws Exception {
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
|
String [] args = new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "unregisteredIdentity"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
|
assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
|
||||||
}
|
}
|
||||||
|
@ -111,7 +111,8 @@ public class TestGenericRefresh {
|
||||||
@Test
|
@Test
|
||||||
public void testValidIdentifier() throws Exception {
|
public void testValidIdentifier() throws Exception {
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
|
String[] args = new String[]{"-refresh",
|
||||||
|
"localhost:" + cluster.getNameNodePort(), "firstHandler"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals("DFSAdmin should succeed", 0, exitCode);
|
assertEquals("DFSAdmin should succeed", 0, exitCode);
|
||||||
|
|
||||||
|
@ -124,11 +125,13 @@ public class TestGenericRefresh {
|
||||||
@Test
|
@Test
|
||||||
public void testVariableArgs() throws Exception {
|
public void testVariableArgs() throws Exception {
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
|
String[] args = new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "secondHandler", "one"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals("DFSAdmin should return 2", 2, exitCode);
|
assertEquals("DFSAdmin should return 2", 2, exitCode);
|
||||||
|
|
||||||
exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
|
exitCode = admin.run(new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "secondHandler", "one", "two"});
|
||||||
assertEquals("DFSAdmin should now return 3", 3, exitCode);
|
assertEquals("DFSAdmin should now return 3", 3, exitCode);
|
||||||
|
|
||||||
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
|
Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
|
||||||
|
@ -141,7 +144,8 @@ public class TestGenericRefresh {
|
||||||
|
|
||||||
// And now this should fail
|
// And now this should fail
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
|
String[] args = new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "firstHandler"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals("DFSAdmin should return -1", -1, exitCode);
|
assertEquals("DFSAdmin should return -1", -1, exitCode);
|
||||||
}
|
}
|
||||||
|
@ -161,7 +165,8 @@ public class TestGenericRefresh {
|
||||||
|
|
||||||
// this should trigger both
|
// this should trigger both
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
|
String[] args = new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "sharedId", "one"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
|
assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
|
||||||
|
|
||||||
|
@ -189,7 +194,8 @@ public class TestGenericRefresh {
|
||||||
|
|
||||||
// We refresh both
|
// We refresh both
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
|
String[] args = new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "shared"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
|
assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
|
||||||
|
|
||||||
|
@ -215,7 +221,8 @@ public class TestGenericRefresh {
|
||||||
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
|
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
|
||||||
|
|
||||||
DFSAdmin admin = new DFSAdmin(config);
|
DFSAdmin admin = new DFSAdmin(config);
|
||||||
String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
|
String[] args = new String[]{"-refresh", "localhost:" +
|
||||||
|
cluster.getNameNodePort(), "exceptional"};
|
||||||
int exitCode = admin.run(args);
|
int exitCode = admin.run(args);
|
||||||
assertEquals(-1, exitCode); // Exceptions result in a -1
|
assertEquals(-1, exitCode); // Exceptions result in a -1
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.net.BindException;
|
||||||
|
import java.util.Random;
|
||||||
import java.util.concurrent.BlockingQueue;
|
import java.util.concurrent.BlockingQueue;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
|
|
||||||
|
@ -42,24 +44,42 @@ public class TestRefreshCallQueue {
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
static int mockQueueConstructions;
|
static int mockQueueConstructions;
|
||||||
static int mockQueuePuts;
|
static int mockQueuePuts;
|
||||||
private static final int NNPort = 54222;
|
private String callQueueConfigKey = "";
|
||||||
private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl";
|
private final Random rand = new Random();
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void setUp() throws Exception {
|
public void setUp() throws Exception {
|
||||||
// We want to count additional events, so we reset here
|
// We want to count additional events, so we reset here
|
||||||
mockQueueConstructions = 0;
|
mockQueueConstructions = 0;
|
||||||
mockQueuePuts = 0;
|
mockQueuePuts = 0;
|
||||||
|
int portRetries = 5;
|
||||||
|
int nnPort;
|
||||||
|
|
||||||
config = new Configuration();
|
for (; portRetries > 0; --portRetries) {
|
||||||
config.setClass(CALLQUEUE_CONFIG_KEY,
|
// Pick a random port in the range [30000,60000).
|
||||||
MockCallQueue.class, BlockingQueue.class);
|
nnPort = 30000 + rand.nextInt(30000);
|
||||||
config.set("hadoop.security.authorization", "true");
|
config = new Configuration();
|
||||||
|
callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
|
||||||
|
config.setClass(callQueueConfigKey,
|
||||||
|
MockCallQueue.class, BlockingQueue.class);
|
||||||
|
config.set("hadoop.security.authorization", "true");
|
||||||
|
|
||||||
FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
|
FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
|
||||||
fs = FileSystem.get(config);
|
fs = FileSystem.get(config);
|
||||||
cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
|
|
||||||
cluster.waitActive();
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
break;
|
||||||
|
} catch (BindException be) {
|
||||||
|
// Retry with a different port number.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (portRetries == 0) {
|
||||||
|
// Bail if we get very unlucky with our choice of ports.
|
||||||
|
fail("Failed to pick an ephemeral port for the NameNode RPC server.");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@After
|
@After
|
||||||
|
|
|
@ -2353,8 +2353,8 @@ public class MiniDFSCluster {
|
||||||
* @return data file corresponding to the block
|
* @return data file corresponding to the block
|
||||||
*/
|
*/
|
||||||
public static File getBlockFile(File storageDir, ExtendedBlock blk) {
|
public static File getBlockFile(File storageDir, ExtendedBlock blk) {
|
||||||
return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()),
|
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
|
||||||
blk.getBlockName());
|
blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2364,10 +2364,32 @@ public class MiniDFSCluster {
|
||||||
* @return metadata file corresponding to the block
|
* @return metadata file corresponding to the block
|
||||||
*/
|
*/
|
||||||
public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
|
public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
|
||||||
return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()),
|
return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
|
||||||
blk.getBlockName() + "_" + blk.getGenerationStamp() +
|
blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
|
||||||
Block.METADATA_EXTENSION);
|
blk.getGenerationStamp() + Block.METADATA_EXTENSION);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return all block metadata files in given directory (recursive search)
|
||||||
|
*/
|
||||||
|
public static List<File> getAllBlockMetadataFiles(File storageDir) {
|
||||||
|
List<File> results = new ArrayList<File>();
|
||||||
|
File[] files = storageDir.listFiles();
|
||||||
|
if (files == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
for (File f : files) {
|
||||||
|
if (f.getName().startsWith("blk_") && f.getName().endsWith(
|
||||||
|
Block.METADATA_EXTENSION)) {
|
||||||
|
results.add(f);
|
||||||
|
} else if (f.isDirectory()) {
|
||||||
|
List<File> subdirResults = getAllBlockMetadataFiles(f);
|
||||||
|
if (subdirResults != null) {
|
||||||
|
results.addAll(subdirResults);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
|
||||||
import org.apache.hadoop.io.retry.FailoverProxyProvider;
|
import org.apache.hadoop.io.retry.FailoverProxyProvider;
|
||||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||||
import org.apache.hadoop.net.StandardSocketFactory;
|
import org.apache.hadoop.net.StandardSocketFactory;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -89,6 +90,11 @@ public class TestDFSClientFailover {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void clearConfig() {
|
||||||
|
SecurityUtil.setTokenServiceUseIp(true);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Make sure that client failover works when an active NN dies and the standby
|
* Make sure that client failover works when an active NN dies and the standby
|
||||||
* takes over.
|
* takes over.
|
||||||
|
@ -323,6 +329,7 @@ public class TestDFSClientFailover {
|
||||||
/**
|
/**
|
||||||
* Test to verify legacy proxy providers are correctly wrapped.
|
* Test to verify legacy proxy providers are correctly wrapped.
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testWrappedFailoverProxyProvider() throws Exception {
|
public void testWrappedFailoverProxyProvider() throws Exception {
|
||||||
// setup the config with the dummy provider class
|
// setup the config with the dummy provider class
|
||||||
Configuration config = new HdfsConfiguration(conf);
|
Configuration config = new HdfsConfiguration(conf);
|
||||||
|
@ -332,6 +339,9 @@ public class TestDFSClientFailover {
|
||||||
DummyLegacyFailoverProxyProvider.class.getName());
|
DummyLegacyFailoverProxyProvider.class.getName());
|
||||||
Path p = new Path("hdfs://" + logicalName + "/");
|
Path p = new Path("hdfs://" + logicalName + "/");
|
||||||
|
|
||||||
|
// not to use IP address for token service
|
||||||
|
SecurityUtil.setTokenServiceUseIp(false);
|
||||||
|
|
||||||
// Logical URI should be used.
|
// Logical URI should be used.
|
||||||
assertTrue("Legacy proxy providers should use logical URI.",
|
assertTrue("Legacy proxy providers should use logical URI.",
|
||||||
HAUtil.useLogicalUri(config, p.toUri()));
|
HAUtil.useLogicalUri(config, p.toUri()));
|
||||||
|
@ -340,6 +350,7 @@ public class TestDFSClientFailover {
|
||||||
/**
|
/**
|
||||||
* Test to verify IPFailoverProxyProvider is not requiring logical URI.
|
* Test to verify IPFailoverProxyProvider is not requiring logical URI.
|
||||||
*/
|
*/
|
||||||
|
@Test
|
||||||
public void testIPFailoverProxyProviderLogicalUri() throws Exception {
|
public void testIPFailoverProxyProviderLogicalUri() throws Exception {
|
||||||
// setup the config with the IP failover proxy provider class
|
// setup the config with the IP failover proxy provider class
|
||||||
Configuration config = new HdfsConfiguration(conf);
|
Configuration config = new HdfsConfiguration(conf);
|
||||||
|
|
|
@ -79,8 +79,8 @@ public class TestDFSFinalize {
|
||||||
File dnCurDirs[] = new File[dataNodeDirs.length];
|
File dnCurDirs[] = new File[dataNodeDirs.length];
|
||||||
for (int i = 0; i < dataNodeDirs.length; i++) {
|
for (int i = 0; i < dataNodeDirs.length; i++) {
|
||||||
dnCurDirs[i] = new File(dataNodeDirs[i],"current");
|
dnCurDirs[i] = new File(dataNodeDirs[i],"current");
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
|
||||||
UpgradeUtilities.checksumMasterDataNodeContents());
|
false), UpgradeUtilities.checksumMasterDataNodeContents());
|
||||||
}
|
}
|
||||||
for (int i = 0; i < nameNodeDirs.length; i++) {
|
for (int i = 0; i < nameNodeDirs.length; i++) {
|
||||||
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
|
assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
|
||||||
|
@ -96,8 +96,9 @@ public class TestDFSFinalize {
|
||||||
assertFalse(new File(bpRoot,"previous").isDirectory());
|
assertFalse(new File(bpRoot,"previous").isDirectory());
|
||||||
|
|
||||||
File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
|
File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
|
||||||
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
|
bpCurFinalizeDir, true),
|
||||||
|
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -421,6 +425,79 @@ public class TestDFSPermission {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAccessOwner() throws IOException, InterruptedException {
|
||||||
|
FileSystem rootFs = FileSystem.get(conf);
|
||||||
|
Path p1 = new Path("/p1");
|
||||||
|
rootFs.mkdirs(p1);
|
||||||
|
rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
|
||||||
|
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
|
||||||
|
@Override
|
||||||
|
public FileSystem run() throws Exception {
|
||||||
|
return FileSystem.get(conf);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
fs.setPermission(p1, new FsPermission((short) 0444));
|
||||||
|
fs.access(p1, FsAction.READ);
|
||||||
|
try {
|
||||||
|
fs.access(p1, FsAction.WRITE);
|
||||||
|
fail("The access call should have failed.");
|
||||||
|
} catch (AccessControlException e) {
|
||||||
|
// expected
|
||||||
|
}
|
||||||
|
|
||||||
|
Path badPath = new Path("/bad/bad");
|
||||||
|
try {
|
||||||
|
fs.access(badPath, FsAction.READ);
|
||||||
|
fail("The access call should have failed");
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
// expected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAccessGroupMember() throws IOException, InterruptedException {
|
||||||
|
FileSystem rootFs = FileSystem.get(conf);
|
||||||
|
Path p2 = new Path("/p2");
|
||||||
|
rootFs.mkdirs(p2);
|
||||||
|
rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
|
||||||
|
rootFs.setPermission(p2, new FsPermission((short) 0740));
|
||||||
|
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
|
||||||
|
@Override
|
||||||
|
public FileSystem run() throws Exception {
|
||||||
|
return FileSystem.get(conf);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
fs.access(p2, FsAction.READ);
|
||||||
|
try {
|
||||||
|
fs.access(p2, FsAction.EXECUTE);
|
||||||
|
fail("The access call should have failed.");
|
||||||
|
} catch (AccessControlException e) {
|
||||||
|
// expected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testAccessOthers() throws IOException, InterruptedException {
|
||||||
|
FileSystem rootFs = FileSystem.get(conf);
|
||||||
|
Path p3 = new Path("/p3");
|
||||||
|
rootFs.mkdirs(p3);
|
||||||
|
rootFs.setPermission(p3, new FsPermission((short) 0774));
|
||||||
|
fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
|
||||||
|
@Override
|
||||||
|
public FileSystem run() throws Exception {
|
||||||
|
return FileSystem.get(conf);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
fs.access(p3, FsAction.READ);
|
||||||
|
try {
|
||||||
|
fs.access(p3, FsAction.READ_WRITE);
|
||||||
|
fail("The access call should have failed.");
|
||||||
|
} catch (AccessControlException e) {
|
||||||
|
// expected
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Check if namenode performs permission checking correctly
|
/* Check if namenode performs permission checking correctly
|
||||||
* for the given user for operations mkdir, open, setReplication,
|
* for the given user for operations mkdir, open, setReplication,
|
||||||
* getFileInfo, isDirectory, exists, getContentLength, list, rename,
|
* getFileInfo, isDirectory, exists, getContentLength, list, rename,
|
||||||
|
|
|
@ -81,7 +81,7 @@ public class TestDFSRollback {
|
||||||
break;
|
break;
|
||||||
case DATA_NODE:
|
case DATA_NODE:
|
||||||
assertEquals(
|
assertEquals(
|
||||||
UpgradeUtilities.checksumContents(nodeType, curDir),
|
UpgradeUtilities.checksumContents(nodeType, curDir, false),
|
||||||
UpgradeUtilities.checksumMasterDataNodeContents());
|
UpgradeUtilities.checksumMasterDataNodeContents());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -239,7 +239,7 @@ public class TestDFSStorageStateRecovery {
|
||||||
assertTrue(new File(baseDirs[i],"previous").isDirectory());
|
assertTrue(new File(baseDirs[i],"previous").isDirectory());
|
||||||
assertEquals(
|
assertEquals(
|
||||||
UpgradeUtilities.checksumContents(
|
UpgradeUtilities.checksumContents(
|
||||||
NAME_NODE, new File(baseDirs[i],"previous")),
|
NAME_NODE, new File(baseDirs[i],"previous"), false),
|
||||||
UpgradeUtilities.checksumMasterNameNodeContents());
|
UpgradeUtilities.checksumMasterNameNodeContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -259,7 +259,8 @@ public class TestDFSStorageStateRecovery {
|
||||||
if (currentShouldExist) {
|
if (currentShouldExist) {
|
||||||
for (int i = 0; i < baseDirs.length; i++) {
|
for (int i = 0; i < baseDirs.length; i++) {
|
||||||
assertEquals(
|
assertEquals(
|
||||||
UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")),
|
UpgradeUtilities.checksumContents(DATA_NODE,
|
||||||
|
new File(baseDirs[i],"current"), false),
|
||||||
UpgradeUtilities.checksumMasterDataNodeContents());
|
UpgradeUtilities.checksumMasterDataNodeContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -267,7 +268,8 @@ public class TestDFSStorageStateRecovery {
|
||||||
for (int i = 0; i < baseDirs.length; i++) {
|
for (int i = 0; i < baseDirs.length; i++) {
|
||||||
assertTrue(new File(baseDirs[i],"previous").isDirectory());
|
assertTrue(new File(baseDirs[i],"previous").isDirectory());
|
||||||
assertEquals(
|
assertEquals(
|
||||||
UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")),
|
UpgradeUtilities.checksumContents(DATA_NODE,
|
||||||
|
new File(baseDirs[i],"previous"), false),
|
||||||
UpgradeUtilities.checksumMasterDataNodeContents());
|
UpgradeUtilities.checksumMasterDataNodeContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,8 +292,8 @@ public class TestDFSStorageStateRecovery {
|
||||||
if (currentShouldExist) {
|
if (currentShouldExist) {
|
||||||
for (int i = 0; i < baseDirs.length; i++) {
|
for (int i = 0; i < baseDirs.length; i++) {
|
||||||
File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
|
File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
|
||||||
UpgradeUtilities.checksumMasterBlockPoolContents());
|
false), UpgradeUtilities.checksumMasterBlockPoolContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (previousShouldExist) {
|
if (previousShouldExist) {
|
||||||
|
@ -299,8 +301,8 @@ public class TestDFSStorageStateRecovery {
|
||||||
File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
|
File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
|
||||||
assertTrue(bpPrevDir.isDirectory());
|
assertTrue(bpPrevDir.isDirectory());
|
||||||
assertEquals(
|
assertEquals(
|
||||||
UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir),
|
UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
|
||||||
UpgradeUtilities.checksumMasterBlockPoolContents());
|
false), UpgradeUtilities.checksumMasterBlockPoolContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,7 +100,7 @@ public class TestDFSUpgrade {
|
||||||
|
|
||||||
File previous = new File(baseDir, "previous");
|
File previous = new File(baseDir, "previous");
|
||||||
assertExists(previous);
|
assertExists(previous);
|
||||||
assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous),
|
assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false),
|
||||||
UpgradeUtilities.checksumMasterNameNodeContents());
|
UpgradeUtilities.checksumMasterNameNodeContents());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -114,23 +114,25 @@ public class TestDFSUpgrade {
|
||||||
void checkDataNode(String[] baseDirs, String bpid) throws IOException {
|
void checkDataNode(String[] baseDirs, String bpid) throws IOException {
|
||||||
for (int i = 0; i < baseDirs.length; i++) {
|
for (int i = 0; i < baseDirs.length; i++) {
|
||||||
File current = new File(baseDirs[i], "current/" + bpid + "/current");
|
File current = new File(baseDirs[i], "current/" + bpid + "/current");
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false),
|
||||||
UpgradeUtilities.checksumMasterDataNodeContents());
|
UpgradeUtilities.checksumMasterDataNodeContents());
|
||||||
|
|
||||||
// block files are placed under <sd>/current/<bpid>/current/finalized
|
// block files are placed under <sd>/current/<bpid>/current/finalized
|
||||||
File currentFinalized =
|
File currentFinalized =
|
||||||
MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
|
MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
|
||||||
|
currentFinalized, true),
|
||||||
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
|
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
|
||||||
|
|
||||||
File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
|
File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
|
||||||
assertTrue(previous.isDirectory());
|
assertTrue(previous.isDirectory());
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false),
|
||||||
UpgradeUtilities.checksumMasterDataNodeContents());
|
UpgradeUtilities.checksumMasterDataNodeContents());
|
||||||
|
|
||||||
File previousFinalized =
|
File previousFinalized =
|
||||||
new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
|
new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
|
||||||
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized),
|
assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
|
||||||
|
previousFinalized, true),
|
||||||
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
|
UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.BufferedReader;
|
import java.io.BufferedReader;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
import java.io.FileReader;
|
import java.io.FileReader;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -80,7 +81,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
long checksum;
|
long checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final Configuration upgradeConf;
|
static final Configuration upgradeConf;
|
||||||
|
|
||||||
static {
|
static {
|
||||||
upgradeConf = new HdfsConfiguration();
|
upgradeConf = new HdfsConfiguration();
|
||||||
|
@ -95,7 +96,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
|
|
||||||
boolean printChecksum = false;
|
boolean printChecksum = false;
|
||||||
|
|
||||||
private void unpackStorage(String tarFileName)
|
void unpackStorage(String tarFileName, String referenceName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
|
||||||
+ "/" + tarFileName;
|
+ "/" + tarFileName;
|
||||||
|
@ -110,7 +111,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
|
|
||||||
BufferedReader reader = new BufferedReader(new FileReader(
|
BufferedReader reader = new BufferedReader(new FileReader(
|
||||||
System.getProperty("test.cache.data", "build/test/cache")
|
System.getProperty("test.cache.data", "build/test/cache")
|
||||||
+ "/" + HADOOP_DFS_DIR_TXT));
|
+ "/" + referenceName));
|
||||||
String line;
|
String line;
|
||||||
while ( (line = reader.readLine()) != null ) {
|
while ( (line = reader.readLine()) != null ) {
|
||||||
|
|
||||||
|
@ -285,7 +286,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testUpgradeFromRel22Image() throws IOException {
|
public void testUpgradeFromRel22Image() throws IOException {
|
||||||
unpackStorage(HADOOP22_IMAGE);
|
unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
|
||||||
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
|
upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
|
||||||
numDataNodes(4));
|
numDataNodes(4));
|
||||||
}
|
}
|
||||||
|
@ -296,7 +297,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testUpgradeFromCorruptRel22Image() throws IOException {
|
public void testUpgradeFromCorruptRel22Image() throws IOException {
|
||||||
unpackStorage(HADOOP22_IMAGE);
|
unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
|
||||||
|
|
||||||
// Overwrite the md5 stored in the VERSION files
|
// Overwrite the md5 stored in the VERSION files
|
||||||
File baseDir = new File(MiniDFSCluster.getBaseDirectory());
|
File baseDir = new File(MiniDFSCluster.getBaseDirectory());
|
||||||
|
@ -333,7 +334,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testUpgradeFromRel1ReservedImage() throws Exception {
|
public void testUpgradeFromRel1ReservedImage() throws Exception {
|
||||||
unpackStorage(HADOOP1_RESERVED_IMAGE);
|
unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
// Try it once without setting the upgrade flag to ensure it fails
|
// Try it once without setting the upgrade flag to ensure it fails
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
|
@ -403,7 +404,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testUpgradeFromRel023ReservedImage() throws Exception {
|
public void testUpgradeFromRel023ReservedImage() throws Exception {
|
||||||
unpackStorage(HADOOP023_RESERVED_IMAGE);
|
unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
// Try it once without setting the upgrade flag to ensure it fails
|
// Try it once without setting the upgrade flag to ensure it fails
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
|
@ -468,7 +469,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testUpgradeFromRel2ReservedImage() throws Exception {
|
public void testUpgradeFromRel2ReservedImage() throws Exception {
|
||||||
unpackStorage(HADOOP2_RESERVED_IMAGE);
|
unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
// Try it once without setting the upgrade flag to ensure it fails
|
// Try it once without setting the upgrade flag to ensure it fails
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
|
@ -572,7 +573,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
} while (dirList.hasMore());
|
} while (dirList.hasMore());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void upgradeAndVerify(MiniDFSCluster.Builder bld)
|
void upgradeAndVerify(MiniDFSCluster.Builder bld)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
|
@ -601,7 +602,7 @@ public class TestDFSUpgradeFromImage {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testUpgradeFromRel1BBWImage() throws IOException {
|
public void testUpgradeFromRel1BBWImage() throws IOException {
|
||||||
unpackStorage(HADOOP1_BBW_IMAGE);
|
unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT);
|
||||||
Configuration conf = new Configuration(upgradeConf);
|
Configuration conf = new Configuration(upgradeConf);
|
||||||
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
|
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
|
||||||
System.getProperty("test.build.data") + File.separator +
|
System.getProperty("test.build.data") + File.separator +
|
||||||
|
|
|
@ -445,19 +445,14 @@ public class TestDatanodeBlockScanner {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testReplicaInfoParsing() throws Exception {
|
public void testReplicaInfoParsing() throws Exception {
|
||||||
testReplicaInfoParsingSingle(BASE_PATH, new int[0]);
|
testReplicaInfoParsingSingle(BASE_PATH);
|
||||||
testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1});
|
testReplicaInfoParsingSingle(BASE_PATH + "/subdir1");
|
||||||
testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43});
|
testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3");
|
||||||
testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3});
|
|
||||||
testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43});
|
|
||||||
testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3});
|
|
||||||
testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) {
|
private static void testReplicaInfoParsingSingle(String subDirPath) {
|
||||||
File testFile = new File(subDirPath);
|
File testFile = new File(subDirPath);
|
||||||
assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
|
assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath);
|
||||||
assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
public class TestDatanodeLayoutUpgrade {
|
||||||
|
private static final String HADOOP_DATANODE_DIR_TXT =
|
||||||
|
"hadoop-datanode-dir.txt";
|
||||||
|
private static final String HADOOP24_DATANODE = "hadoop-24-datanode-dir.tgz";
|
||||||
|
|
||||||
|
@Test
|
||||||
|
// Upgrade from LDir-based layout to block ID-based layout -- change described
|
||||||
|
// in HDFS-6482
|
||||||
|
public void testUpgradeToIdBasedLayout() throws IOException {
|
||||||
|
TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
|
||||||
|
upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
|
||||||
|
Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
|
||||||
|
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
|
||||||
|
System.getProperty("test.build.data") + File.separator +
|
||||||
|
"dfs" + File.separator + "data");
|
||||||
|
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
|
System.getProperty("test.build.data") + File.separator +
|
||||||
|
"dfs" + File.separator + "name");
|
||||||
|
upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
|
.manageDataDfsDirs(false).manageNameDfsDirs(false));
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue