Merge branch 'trunk' into HADOOP-12756
This commit is contained in:
commit
60f66a9306
|
@ -44,7 +44,7 @@
|
|||
-->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.ipc.Client$Connection" />
|
||||
<Field name="out" />
|
||||
<Field name="ipcStreams" />
|
||||
<Bug pattern="IS2_INCONSISTENT_SYNC" />
|
||||
</Match>
|
||||
<!--
|
||||
|
@ -341,13 +341,7 @@
|
|||
<Method name="removeRenewAction" />
|
||||
<Bug pattern="BC_UNCONFIRMED_CAST" />
|
||||
</Match>
|
||||
|
||||
<!-- Inconsistent synchronization flagged by findbugs is not valid. -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.ipc.Client$Connection" />
|
||||
<Field name="in" />
|
||||
<Bug pattern="IS2_INCONSISTENT_SYNC" />
|
||||
</Match>
|
||||
|
||||
<!--
|
||||
The switch condition for INITIATE is expected to fallthru to RESPONSE
|
||||
to process initial sasl response token included in the INITIATE
|
||||
|
|
|
@ -161,10 +161,6 @@ function hadoopcmd_case
|
|||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
# Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
}
|
||||
|
||||
# This script runs the hadoop core commands.
|
||||
|
@ -194,6 +190,8 @@ fi
|
|||
HADOOP_SUBCMD=$1
|
||||
shift
|
||||
|
||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
HADOOP_SUBCMD_ARGS=("$@")
|
||||
|
||||
if declare -f hadoop_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
|
||||
|
@ -203,15 +201,20 @@ else
|
|||
hadoopcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
|
||||
hadoop_verify_user "${HADOOP_SUBCMD}"
|
||||
hadoop_add_client_opts
|
||||
|
||||
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
|
||||
hadoop_common_worker_mode_execute "${HADOOP_COMMON_HOME}/bin/hadoop" "${HADOOP_USER_PARAMS[@]}"
|
||||
exit $?
|
||||
fi
|
||||
|
||||
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
|
||||
|
||||
hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
|
|
|
@ -306,6 +306,13 @@ function hadoop_bootstrap
|
|||
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
||||
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
||||
|
||||
# by default, whatever we are about to run doesn't support
|
||||
# daemonization
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
|
||||
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_SUBCMD_SECURESERVICE=false
|
||||
|
||||
# usage output set to zero
|
||||
hadoop_reset_usage
|
||||
|
||||
|
@ -1230,6 +1237,20 @@ function hadoop_translate_cygwin_path
|
|||
fi
|
||||
}
|
||||
|
||||
## @description Adds the HADOOP_CLIENT_OPTS variable to
|
||||
## @description HADOOP_OPTS if HADOOP_SUBCMD_SUPPORTDAEMONIZATION is false
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
function hadoop_add_client_opts
|
||||
{
|
||||
if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false
|
||||
|| -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Finish configuring Hadoop specific system properties
|
||||
## @description prior to executing Java
|
||||
## @audience private
|
||||
|
@ -1963,17 +1984,130 @@ function hadoop_secure_daemon_handler
|
|||
## @return will exit on failure conditions
|
||||
function hadoop_verify_user
|
||||
{
|
||||
local command=$1
|
||||
local uservar="HADOOP_${command}_USER"
|
||||
declare program=$1
|
||||
declare command=$2
|
||||
declare uprogram
|
||||
declare ucommand
|
||||
declare uvar
|
||||
|
||||
if [[ -n ${!uservar} ]]; then
|
||||
if [[ ${!uservar} != "${USER}" ]]; then
|
||||
hadoop_error "ERROR: ${command} can only be executed by ${!uservar}."
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
||||
uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
|
||||
ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
|
||||
else
|
||||
uprogram=${program^^}
|
||||
ucommand=${command^^}
|
||||
fi
|
||||
|
||||
uvar="${uprogram}_${ucommand}_USER"
|
||||
|
||||
if [[ -n ${!uvar} ]]; then
|
||||
if [[ ${!uvar} != "${USER}" ]]; then
|
||||
hadoop_error "ERROR: ${command} can only be executed by ${!uvar}."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS.
|
||||
## @description Also handles the deprecated cases from pre-3.x.
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param program
|
||||
## @param subcommand
|
||||
## @return will exit on failure conditions
|
||||
function hadoop_subcommand_opts
|
||||
{
|
||||
declare program=$1
|
||||
declare command=$2
|
||||
declare uvar
|
||||
declare depvar
|
||||
declare uprogram
|
||||
declare ucommand
|
||||
|
||||
if [[ -z "${program}" || -z "${command}" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# bash 4 and up have built-in ways to upper and lower
|
||||
# case the contents of vars. This is faster than
|
||||
# calling tr.
|
||||
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
||||
uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
|
||||
ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
|
||||
else
|
||||
uprogram=${program^^}
|
||||
ucommand=${command^^}
|
||||
fi
|
||||
|
||||
uvar="${uprogram}_${ucommand}_OPTS"
|
||||
|
||||
# Let's handle all of the deprecation cases early
|
||||
# HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS
|
||||
|
||||
depvar="HADOOP_${ucommand}_OPTS"
|
||||
|
||||
if [[ "${depvar}" != "${uvar}" ]]; then
|
||||
if [[ -n "${!depvar}" ]]; then
|
||||
hadoop_deprecate_envvar "${depvar}" "${uvar}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n ${!uvar} ]]; then
|
||||
hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Add custom (program)_(command)_SECURE_EXTRA_OPTS to HADOOP_OPTS.
|
||||
## @description This *does not* handle the pre-3.x deprecated cases
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable yes
|
||||
## @param program
|
||||
## @param subcommand
|
||||
## @return will exit on failure conditions
|
||||
function hadoop_subcommand_secure_opts
|
||||
{
|
||||
declare program=$1
|
||||
declare command=$2
|
||||
declare uvar
|
||||
declare uprogram
|
||||
declare ucommand
|
||||
|
||||
if [[ -z "${program}" || -z "${command}" ]]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# bash 4 and up have built-in ways to upper and lower
|
||||
# case the contents of vars. This is faster than
|
||||
# calling tr.
|
||||
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
|
||||
uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
|
||||
ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
|
||||
else
|
||||
uprogram=${program^^}
|
||||
ucommand=${command^^}
|
||||
fi
|
||||
|
||||
# HDFS_DATANODE_SECURE_EXTRA_OPTS
|
||||
# HDFS_NFS3_SECURE_EXTRA_OPTS
|
||||
# ...
|
||||
uvar="${uprogram}_${ucommand}_SECURE_EXTRA_OPTS"
|
||||
|
||||
if [[ -n ${!uvar} ]]; then
|
||||
hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
|
||||
return 0
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Perform the 'hadoop classpath', etc subcommand with the given
|
||||
## @description parameters
|
||||
## @audience private
|
||||
|
|
|
@ -294,16 +294,16 @@ esac
|
|||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# a) Set JMX options
|
||||
# export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
|
||||
# export HDFS_NAMENODE_OPTS="-Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.port=1026"
|
||||
#
|
||||
# b) Set garbage collection logs
|
||||
# export HADOOP_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
|
||||
# export HDFS_NAMENODE_OPTS="${HADOOP_GC_SETTINGS} -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
|
||||
#
|
||||
# c) ... or set them directly
|
||||
# export HADOOP_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
|
||||
# export HDFS_NAMENODE_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xloggc:${HADOOP_LOG_DIR}/gc-rm.log-$(date +'%Y%m%d%H%M')"
|
||||
|
||||
# this is the default:
|
||||
# export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
|
||||
# export HDFS_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
|
||||
|
||||
###
|
||||
# SecondaryNameNode specific parameters
|
||||
|
@ -313,7 +313,7 @@ esac
|
|||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# This is the default:
|
||||
# export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
|
||||
# export HDFS_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,RFAS"
|
||||
|
||||
###
|
||||
# DataNode specific parameters
|
||||
|
@ -323,7 +323,7 @@ esac
|
|||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# This is the default:
|
||||
# export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
|
||||
# export HDFS_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS"
|
||||
|
||||
# On secure datanodes, user to run the datanode as after dropping privileges.
|
||||
# This **MUST** be uncommented to enable secure HDFS if using privileged ports
|
||||
|
@ -336,7 +336,7 @@ esac
|
|||
# Supplemental options for secure datanodes
|
||||
# By default, Hadoop uses jsvc which needs to know to launch a
|
||||
# server jvm.
|
||||
# export HADOOP_DN_SECURE_EXTRA_OPTS="-jvm server"
|
||||
# export HDFS_DATANODE_SECURE_EXTRA_OPTS="-jvm server"
|
||||
|
||||
# Where datanode log files are stored in the secure data environment.
|
||||
# This will replace the hadoop.log.dir Java property in secure mode.
|
||||
|
@ -352,18 +352,18 @@ esac
|
|||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_NFS3_OPTS=""
|
||||
# export HDFS_NFS3_OPTS=""
|
||||
|
||||
# Specify the JVM options to be used when starting the Hadoop portmapper.
|
||||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_PORTMAP_OPTS="-Xmx512m"
|
||||
# export HDFS_PORTMAP_OPTS="-Xmx512m"
|
||||
|
||||
# Supplemental options for priviliged gateways
|
||||
# By default, Hadoop uses jsvc which needs to know to launch a
|
||||
# server jvm.
|
||||
# export HADOOP_NFS3_SECURE_EXTRA_OPTS="-jvm server"
|
||||
# export HDFS_NFS3_SECURE_EXTRA_OPTS="-jvm server"
|
||||
|
||||
# On privileged gateways, user to run the gateway as after dropping privileges
|
||||
# This will replace the hadoop.id.str Java property in secure mode.
|
||||
|
@ -376,7 +376,7 @@ esac
|
|||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_ZKFC_OPTS=""
|
||||
# export HDFS_ZKFC_OPTS=""
|
||||
|
||||
###
|
||||
# QuorumJournalNode specific parameters
|
||||
|
@ -385,7 +385,7 @@ esac
|
|||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_JOURNALNODE_OPTS=""
|
||||
# export HDFS_JOURNALNODE_OPTS=""
|
||||
|
||||
###
|
||||
# HDFS Balancer specific parameters
|
||||
|
@ -394,7 +394,7 @@ esac
|
|||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_BALANCER_OPTS=""
|
||||
# export HDFS_BALANCER_OPTS=""
|
||||
|
||||
###
|
||||
# HDFS Mover specific parameters
|
||||
|
@ -403,7 +403,7 @@ esac
|
|||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#
|
||||
# export HADOOP_MOVER_OPTS=""
|
||||
# export HDFS_MOVER_OPTS=""
|
||||
|
||||
###
|
||||
# Advanced Users Only!
|
||||
|
@ -417,6 +417,7 @@ esac
|
|||
#
|
||||
# To prevent accidents, shell commands be (superficially) locked
|
||||
# to only allow certain users to execute certain subcommands.
|
||||
# It uses the format of (command)_(subcommand)_USER.
|
||||
#
|
||||
# For example, to limit who can execute the namenode command,
|
||||
# export HADOOP_namenode_USER=hdfs
|
||||
# export HDFS_NAMENODE_USER=hdfs
|
||||
|
|
|
@ -24,11 +24,14 @@ import javax.servlet.ServletException;
|
|||
import javax.servlet.http.HttpServlet;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* A servlet to print out the running configuration data.
|
||||
*/
|
||||
|
@ -37,9 +40,8 @@ import org.apache.hadoop.http.HttpServer2;
|
|||
public class ConfServlet extends HttpServlet {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
private static final String FORMAT_JSON = "json";
|
||||
private static final String FORMAT_XML = "xml";
|
||||
private static final String FORMAT_PARAM = "format";
|
||||
protected static final String FORMAT_JSON = "json";
|
||||
protected static final String FORMAT_XML = "xml";
|
||||
|
||||
/**
|
||||
* Return the Configuration of the daemon hosting this servlet.
|
||||
|
@ -61,11 +63,7 @@ public class ConfServlet extends HttpServlet {
|
|||
return;
|
||||
}
|
||||
|
||||
String format = request.getParameter(FORMAT_PARAM);
|
||||
if (null == format) {
|
||||
format = FORMAT_XML;
|
||||
}
|
||||
|
||||
String format = parseAccecptHeader(request);
|
||||
if (FORMAT_XML.equals(format)) {
|
||||
response.setContentType("text/xml; charset=utf-8");
|
||||
} else if (FORMAT_JSON.equals(format)) {
|
||||
|
@ -81,6 +79,13 @@ public class ConfServlet extends HttpServlet {
|
|||
out.close();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static String parseAccecptHeader(HttpServletRequest request) {
|
||||
String format = request.getHeader(HttpHeaders.ACCEPT);
|
||||
return format != null && format.contains(FORMAT_JSON) ?
|
||||
FORMAT_JSON : FORMAT_XML;
|
||||
}
|
||||
|
||||
/**
|
||||
* Guts of the servlet - extracted for easy testing.
|
||||
*/
|
||||
|
|
|
@ -76,6 +76,7 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
private final byte[] key;
|
||||
private final byte[] initIV;
|
||||
private byte[] iv;
|
||||
private boolean closeOutputStream;
|
||||
|
||||
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
|
||||
int bufferSize, byte[] key, byte[] iv) throws IOException {
|
||||
|
@ -85,6 +86,13 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
|
||||
int bufferSize, byte[] key, byte[] iv, long streamOffset)
|
||||
throws IOException {
|
||||
this(out, codec, bufferSize, key, iv, streamOffset, true);
|
||||
}
|
||||
|
||||
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
|
||||
int bufferSize, byte[] key, byte[] iv, long streamOffset,
|
||||
boolean closeOutputStream)
|
||||
throws IOException {
|
||||
super(out);
|
||||
CryptoStreamUtils.checkCodec(codec);
|
||||
this.bufferSize = CryptoStreamUtils.checkBufferSize(codec, bufferSize);
|
||||
|
@ -95,6 +103,7 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
inBuffer = ByteBuffer.allocateDirect(this.bufferSize);
|
||||
outBuffer = ByteBuffer.allocateDirect(this.bufferSize);
|
||||
this.streamOffset = streamOffset;
|
||||
this.closeOutputStream = closeOutputStream;
|
||||
try {
|
||||
encryptor = codec.createEncryptor();
|
||||
} catch (GeneralSecurityException e) {
|
||||
|
@ -110,8 +119,14 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
|
||||
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
|
||||
byte[] key, byte[] iv, long streamOffset) throws IOException {
|
||||
this(out, codec, key, iv, streamOffset, true);
|
||||
}
|
||||
|
||||
public CryptoOutputStream(OutputStream out, CryptoCodec codec,
|
||||
byte[] key, byte[] iv, long streamOffset, boolean closeOutputStream)
|
||||
throws IOException {
|
||||
this(out, codec, CryptoStreamUtils.getBufferSize(codec.getConf()),
|
||||
key, iv, streamOffset);
|
||||
key, iv, streamOffset, closeOutputStream);
|
||||
}
|
||||
|
||||
public OutputStream getWrappedStream() {
|
||||
|
@ -221,7 +236,10 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
return;
|
||||
}
|
||||
try {
|
||||
super.close();
|
||||
flush();
|
||||
if (closeOutputStream) {
|
||||
super.close();
|
||||
}
|
||||
freeBuffers();
|
||||
} finally {
|
||||
closed = true;
|
||||
|
|
|
@ -78,12 +78,20 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
/** Default value for IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE */
|
||||
public static final int IPC_SERVER_RPC_READ_CONNECTION_QUEUE_SIZE_DEFAULT =
|
||||
100;
|
||||
|
||||
|
||||
/** Max request size a server will accept. */
|
||||
public static final String IPC_MAXIMUM_DATA_LENGTH =
|
||||
"ipc.maximum.data.length";
|
||||
|
||||
/** Default value for IPC_MAXIMUM_DATA_LENGTH. */
|
||||
public static final int IPC_MAXIMUM_DATA_LENGTH_DEFAULT = 64 * 1024 * 1024;
|
||||
|
||||
/** Max response size a client will accept. */
|
||||
public static final String IPC_MAXIMUM_RESPONSE_LENGTH =
|
||||
"ipc.maximum.response.length";
|
||||
/** Default value for IPC_MAXIMUM_RESPONSE_LENGTH. */
|
||||
public static final int IPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT =
|
||||
128 * 1024 * 1024;
|
||||
|
||||
/** How many calls per handler are allowed in the queue. */
|
||||
public static final String IPC_SERVER_HANDLER_QUEUE_SIZE_KEY =
|
||||
"ipc.server.handler.queue.size";
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InvalidObjectException;
|
||||
import java.io.ObjectInputValidation;
|
||||
import java.io.Serializable;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.regex.Pattern;
|
||||
|
@ -37,7 +40,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
@Stringable
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Stable
|
||||
public class Path implements Comparable {
|
||||
public class Path implements Comparable, Serializable, ObjectInputValidation {
|
||||
|
||||
/**
|
||||
* The directory separator, a slash.
|
||||
|
@ -66,6 +69,8 @@ public class Path implements Comparable {
|
|||
private static final Pattern HAS_DRIVE_LETTER_SPECIFIER =
|
||||
Pattern.compile("^/?[a-zA-Z]:");
|
||||
|
||||
private static final long serialVersionUID = 0xad00f;
|
||||
|
||||
private URI uri; // a hierarchical uri
|
||||
|
||||
/**
|
||||
|
@ -565,4 +570,17 @@ public class Path implements Comparable {
|
|||
}
|
||||
return new Path(newUri);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the contents of a deserialized Path, so as
|
||||
* to defend against malicious object streams.
|
||||
* @throws InvalidObjectException if there's no URI
|
||||
*/
|
||||
@Override
|
||||
public void validateObject() throws InvalidObjectException {
|
||||
if (uri == null) {
|
||||
throw new InvalidObjectException("No URI in deserialized Path");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,8 +28,14 @@ public class CryptoFSDataOutputStream extends FSDataOutputStream {
|
|||
|
||||
public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
|
||||
int bufferSize, byte[] key, byte[] iv) throws IOException {
|
||||
this(out, codec, bufferSize, key, iv, true);
|
||||
}
|
||||
|
||||
public CryptoFSDataOutputStream(FSDataOutputStream out, CryptoCodec codec,
|
||||
int bufferSize, byte[] key, byte[] iv, boolean closeOutputStream)
|
||||
throws IOException {
|
||||
super(new CryptoOutputStream(out, codec, bufferSize, key, iv,
|
||||
out.getPos()), null, out.getPos());
|
||||
out.getPos(), closeOutputStream), null, out.getPos());
|
||||
this.fsOut = out;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.WritableUtils;
|
||||
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
|
||||
|
@ -413,8 +414,8 @@ public class Client implements AutoCloseable {
|
|||
private SaslRpcClient saslRpcClient;
|
||||
|
||||
private Socket socket = null; // connected socket
|
||||
private DataInputStream in;
|
||||
private DataOutputStream out;
|
||||
private IpcStreams ipcStreams;
|
||||
private final int maxResponseLength;
|
||||
private final int rpcTimeout;
|
||||
private int maxIdleTime; //connections will be culled if it was idle for
|
||||
//maxIdleTime msecs
|
||||
|
@ -426,8 +427,8 @@ public class Client implements AutoCloseable {
|
|||
private final boolean doPing; //do we need to send ping message
|
||||
private final int pingInterval; // how often sends ping to the server
|
||||
private final int soTimeout; // used by ipc ping and rpc timeout
|
||||
private ResponseBuffer pingRequest; // ping message
|
||||
|
||||
private byte[] pingRequest; // ping message
|
||||
|
||||
// currently active calls
|
||||
private Hashtable<Integer, Call> calls = new Hashtable<Integer, Call>();
|
||||
private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
|
||||
|
@ -446,6 +447,9 @@ public class Client implements AutoCloseable {
|
|||
0,
|
||||
new UnknownHostException());
|
||||
}
|
||||
this.maxResponseLength = remoteId.conf.getInt(
|
||||
CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH,
|
||||
CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT);
|
||||
this.rpcTimeout = remoteId.getRpcTimeout();
|
||||
this.maxIdleTime = remoteId.getMaxIdleTime();
|
||||
this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
|
||||
|
@ -456,12 +460,13 @@ public class Client implements AutoCloseable {
|
|||
this.doPing = remoteId.getDoPing();
|
||||
if (doPing) {
|
||||
// construct a RPC header with the callId as the ping callId
|
||||
pingRequest = new ResponseBuffer();
|
||||
ResponseBuffer buf = new ResponseBuffer();
|
||||
RpcRequestHeaderProto pingHeader = ProtoUtil
|
||||
.makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
OperationProto.RPC_FINAL_PACKET, PING_CALL_ID,
|
||||
RpcConstants.INVALID_RETRY_COUNT, clientId);
|
||||
pingHeader.writeDelimitedTo(pingRequest);
|
||||
pingHeader.writeDelimitedTo(buf);
|
||||
pingRequest = buf.toByteArray();
|
||||
}
|
||||
this.pingInterval = remoteId.getPingInterval();
|
||||
if (rpcTimeout > 0) {
|
||||
|
@ -596,15 +601,15 @@ public class Client implements AutoCloseable {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private synchronized AuthMethod setupSaslConnection(final InputStream in2,
|
||||
final OutputStream out2) throws IOException {
|
||||
|
||||
private synchronized AuthMethod setupSaslConnection(IpcStreams streams)
|
||||
throws IOException {
|
||||
// Do not use Client.conf here! We must use ConnectionId.conf, since the
|
||||
// Client object is cached and shared between all RPC clients, even those
|
||||
// for separate services.
|
||||
saslRpcClient = new SaslRpcClient(remoteId.getTicket(),
|
||||
remoteId.getProtocol(), remoteId.getAddress(), remoteId.conf);
|
||||
return saslRpcClient.saslConnect(in2, out2);
|
||||
return saslRpcClient.saslConnect(streams);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -770,12 +775,9 @@ public class Client implements AutoCloseable {
|
|||
Random rand = null;
|
||||
while (true) {
|
||||
setupConnection();
|
||||
InputStream inStream = NetUtils.getInputStream(socket);
|
||||
OutputStream outStream = NetUtils.getOutputStream(socket);
|
||||
writeConnectionHeader(outStream);
|
||||
ipcStreams = new IpcStreams(socket, maxResponseLength);
|
||||
writeConnectionHeader(ipcStreams);
|
||||
if (authProtocol == AuthProtocol.SASL) {
|
||||
final InputStream in2 = inStream;
|
||||
final OutputStream out2 = outStream;
|
||||
UserGroupInformation ticket = remoteId.getTicket();
|
||||
if (ticket.getRealUser() != null) {
|
||||
ticket = ticket.getRealUser();
|
||||
|
@ -786,7 +788,7 @@ public class Client implements AutoCloseable {
|
|||
@Override
|
||||
public AuthMethod run()
|
||||
throws IOException, InterruptedException {
|
||||
return setupSaslConnection(in2, out2);
|
||||
return setupSaslConnection(ipcStreams);
|
||||
}
|
||||
});
|
||||
} catch (IOException ex) {
|
||||
|
@ -805,8 +807,7 @@ public class Client implements AutoCloseable {
|
|||
}
|
||||
if (authMethod != AuthMethod.SIMPLE) {
|
||||
// Sasl connect is successful. Let's set up Sasl i/o streams.
|
||||
inStream = saslRpcClient.getInputStream(inStream);
|
||||
outStream = saslRpcClient.getOutputStream(outStream);
|
||||
ipcStreams.setSaslClient(saslRpcClient);
|
||||
// for testing
|
||||
remoteId.saslQop =
|
||||
(String)saslRpcClient.getNegotiatedProperty(Sasl.QOP);
|
||||
|
@ -825,18 +826,11 @@ public class Client implements AutoCloseable {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (doPing) {
|
||||
inStream = new PingInputStream(inStream);
|
||||
}
|
||||
this.in = new DataInputStream(new BufferedInputStream(inStream));
|
||||
|
||||
// SASL may have already buffered the stream
|
||||
if (!(outStream instanceof BufferedOutputStream)) {
|
||||
outStream = new BufferedOutputStream(outStream);
|
||||
if (doPing) {
|
||||
ipcStreams.setInputStream(new PingInputStream(ipcStreams.in));
|
||||
}
|
||||
this.out = new DataOutputStream(outStream);
|
||||
|
||||
|
||||
writeConnectionContext(remoteId, authMethod);
|
||||
|
||||
// update last activity time
|
||||
|
@ -950,17 +944,28 @@ public class Client implements AutoCloseable {
|
|||
* | AuthProtocol (1 byte) |
|
||||
* +----------------------------------+
|
||||
*/
|
||||
private void writeConnectionHeader(OutputStream outStream)
|
||||
private void writeConnectionHeader(IpcStreams streams)
|
||||
throws IOException {
|
||||
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
|
||||
// Write out the header, version and authentication method
|
||||
out.write(RpcConstants.HEADER.array());
|
||||
out.write(RpcConstants.CURRENT_VERSION);
|
||||
out.write(serviceClass);
|
||||
out.write(authProtocol.callId);
|
||||
out.flush();
|
||||
// Write out the header, version and authentication method.
|
||||
// The output stream is buffered but we must not flush it yet. The
|
||||
// connection setup protocol requires the client to send multiple
|
||||
// messages before reading a response.
|
||||
//
|
||||
// insecure: send header+context+call, read
|
||||
// secure : send header+negotiate, read, (sasl), context+call, read
|
||||
//
|
||||
// The client must flush only when it's prepared to read. Otherwise
|
||||
// "broken pipe" exceptions occur if the server closes the connection
|
||||
// before all messages are sent.
|
||||
final DataOutputStream out = streams.out;
|
||||
synchronized (out) {
|
||||
out.write(RpcConstants.HEADER.array());
|
||||
out.write(RpcConstants.CURRENT_VERSION);
|
||||
out.write(serviceClass);
|
||||
out.write(authProtocol.callId);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Write the connection context header for each connection
|
||||
* Out is not synchronized because only the first thread does this.
|
||||
*/
|
||||
|
@ -976,12 +981,17 @@ public class Client implements AutoCloseable {
|
|||
.makeRpcRequestHeader(RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
OperationProto.RPC_FINAL_PACKET, CONNECTION_CONTEXT_CALL_ID,
|
||||
RpcConstants.INVALID_RETRY_COUNT, clientId);
|
||||
// do not flush. the context and first ipc call request must be sent
|
||||
// together to avoid possibility of broken pipes upon authz failure.
|
||||
// see writeConnectionHeader
|
||||
final ResponseBuffer buf = new ResponseBuffer();
|
||||
connectionContextHeader.writeDelimitedTo(buf);
|
||||
message.writeDelimitedTo(buf);
|
||||
buf.writeTo(out);
|
||||
synchronized (ipcStreams.out) {
|
||||
ipcStreams.sendRequest(buf.toByteArray());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* wait till someone signals us to start reading RPC response or
|
||||
* it is idle too long, it is marked as to be closed,
|
||||
* or the client is marked as not running.
|
||||
|
@ -1024,9 +1034,9 @@ public class Client implements AutoCloseable {
|
|||
long curTime = Time.now();
|
||||
if ( curTime - lastActivity.get() >= pingInterval) {
|
||||
lastActivity.set(curTime);
|
||||
synchronized (out) {
|
||||
pingRequest.writeTo(out);
|
||||
out.flush();
|
||||
synchronized (ipcStreams.out) {
|
||||
ipcStreams.sendRequest(pingRequest);
|
||||
ipcStreams.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1092,15 +1102,16 @@ public class Client implements AutoCloseable {
|
|||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
synchronized (Connection.this.out) {
|
||||
synchronized (ipcStreams.out) {
|
||||
if (shouldCloseConnection.get()) {
|
||||
return;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getName() + " sending #" + call.id);
|
||||
}
|
||||
buf.writeTo(out); // RpcRequestHeader + RpcRequest
|
||||
out.flush();
|
||||
// RpcRequestHeader + RpcRequest
|
||||
ipcStreams.sendRequest(buf.toByteArray());
|
||||
ipcStreams.flush();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// exception at this point would leave the connection in an
|
||||
|
@ -1141,10 +1152,7 @@ public class Client implements AutoCloseable {
|
|||
touch();
|
||||
|
||||
try {
|
||||
int totalLen = in.readInt();
|
||||
ByteBuffer bb = ByteBuffer.allocate(totalLen);
|
||||
in.readFully(bb.array());
|
||||
|
||||
ByteBuffer bb = ipcStreams.readResponse();
|
||||
RpcWritable.Buffer packet = RpcWritable.Buffer.wrap(bb);
|
||||
RpcResponseHeaderProto header =
|
||||
packet.getValue(RpcResponseHeaderProto.getDefaultInstance());
|
||||
|
@ -1209,8 +1217,7 @@ public class Client implements AutoCloseable {
|
|||
connections.remove(remoteId, this);
|
||||
|
||||
// close the streams and therefore the socket
|
||||
IOUtils.closeStream(out);
|
||||
IOUtils.closeStream(in);
|
||||
IOUtils.closeStream(ipcStreams);
|
||||
disposeSasl();
|
||||
|
||||
// clean up all calls
|
||||
|
@ -1739,4 +1746,75 @@ public class Client implements AutoCloseable {
|
|||
public void close() throws Exception {
|
||||
stop();
|
||||
}
|
||||
|
||||
/** Manages the input and output streams for an IPC connection.
|
||||
* Only exposed for use by SaslRpcClient.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class IpcStreams implements Closeable, Flushable {
|
||||
private DataInputStream in;
|
||||
public DataOutputStream out;
|
||||
private int maxResponseLength;
|
||||
private boolean firstResponse = true;
|
||||
|
||||
IpcStreams(Socket socket, int maxResponseLength) throws IOException {
|
||||
this.maxResponseLength = maxResponseLength;
|
||||
setInputStream(
|
||||
new BufferedInputStream(NetUtils.getInputStream(socket)));
|
||||
setOutputStream(
|
||||
new BufferedOutputStream(NetUtils.getOutputStream(socket)));
|
||||
}
|
||||
|
||||
void setSaslClient(SaslRpcClient client) throws IOException {
|
||||
setInputStream(client.getInputStream(in));
|
||||
setOutputStream(client.getOutputStream(out));
|
||||
}
|
||||
|
||||
private void setInputStream(InputStream is) {
|
||||
this.in = (is instanceof DataInputStream)
|
||||
? (DataInputStream)is : new DataInputStream(is);
|
||||
}
|
||||
|
||||
private void setOutputStream(OutputStream os) {
|
||||
this.out = (os instanceof DataOutputStream)
|
||||
? (DataOutputStream)os : new DataOutputStream(os);
|
||||
}
|
||||
|
||||
public ByteBuffer readResponse() throws IOException {
|
||||
int length = in.readInt();
|
||||
if (firstResponse) {
|
||||
firstResponse = false;
|
||||
// pre-rpcv9 exception, almost certainly a version mismatch.
|
||||
if (length == -1) {
|
||||
in.readInt(); // ignore fatal/error status, it's fatal for us.
|
||||
throw new RemoteException(WritableUtils.readString(in),
|
||||
WritableUtils.readString(in));
|
||||
}
|
||||
}
|
||||
if (length <= 0) {
|
||||
throw new RpcException("RPC response has invalid length");
|
||||
}
|
||||
if (maxResponseLength > 0 && length > maxResponseLength) {
|
||||
throw new RpcException("RPC response exceeds maximum data length");
|
||||
}
|
||||
ByteBuffer bb = ByteBuffer.allocate(length);
|
||||
in.readFully(bb.array());
|
||||
return bb;
|
||||
}
|
||||
|
||||
public void sendRequest(byte[] buf) throws IOException {
|
||||
out.write(buf);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void flush() throws IOException {
|
||||
out.flush();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
IOUtils.closeStream(out);
|
||||
IOUtils.closeStream(in);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
private static final ThreadLocal<AsyncGet<Message, Exception>>
|
||||
ASYNC_RETURN_MESSAGE = new ThreadLocal<>();
|
||||
|
||||
static { // Register the rpcRequest deserializer for ProtobufRpcEngine
|
||||
static { // Register the rpcRequest deserializer for WritableRpcEngine
|
||||
org.apache.hadoop.ipc.Server.registerProtocolEngine(
|
||||
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcProtobufRequest.class,
|
||||
new Server.ProtoBufRpcInvoker());
|
||||
|
@ -194,8 +194,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
}
|
||||
|
||||
if (args.length != 2) { // RpcController + Message
|
||||
throw new ServiceException(
|
||||
"Too many or few parameters for request. Method: ["
|
||||
throw new ServiceException("Too many parameters for request. Method: ["
|
||||
+ method.getName() + "]" + ", Expected: 2, Actual: "
|
||||
+ args.length);
|
||||
}
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.InvocationHandler;
|
||||
import java.lang.reflect.Proxy;
|
||||
|
@ -28,6 +26,7 @@ import java.net.ConnectException;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.net.NoRouteToHostException;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.io.*;
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -38,12 +37,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.commons.logging.*;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.*;
|
||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
|
||||
|
@ -56,6 +54,7 @@ import org.apache.hadoop.security.token.SecretManager;
|
|||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.*;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
|
@ -88,7 +87,7 @@ public class RPC {
|
|||
RPC_WRITABLE ((short) 2), // Use WritableRpcEngine
|
||||
RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
|
||||
final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
|
||||
private final short value;
|
||||
public final short value; //TODO make it private
|
||||
|
||||
RpcKind(short val) {
|
||||
this.value = val;
|
||||
|
@ -208,7 +207,7 @@ public class RPC {
|
|||
RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
|
||||
if (engine == null) {
|
||||
Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
|
||||
ProtobufRpcEngine.class);
|
||||
WritableRpcEngine.class);
|
||||
engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
|
||||
PROTOCOL_ENGINES.put(protocol, engine);
|
||||
}
|
||||
|
|
|
@ -237,14 +237,14 @@ public abstract class Server {
|
|||
static class RpcKindMapValue {
|
||||
final Class<? extends Writable> rpcRequestWrapperClass;
|
||||
final RpcInvoker rpcInvoker;
|
||||
|
||||
RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
|
||||
RpcInvoker rpcInvoker) {
|
||||
this.rpcInvoker = rpcInvoker;
|
||||
this.rpcRequestWrapperClass = rpcRequestWrapperClass;
|
||||
}
|
||||
}
|
||||
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new HashMap<>(4);
|
||||
static Map<RPC.RpcKind, RpcKindMapValue> rpcKindMap = new
|
||||
HashMap<RPC.RpcKind, RpcKindMapValue>(4);
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -18,11 +18,9 @@
|
|||
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.FilterOutputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -53,6 +51,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.GlobPattern;
|
||||
import org.apache.hadoop.ipc.Client.IpcStreams;
|
||||
import org.apache.hadoop.ipc.RPC.RpcKind;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.ipc.ResponseBuffer;
|
||||
|
@ -353,24 +352,16 @@ public class SaslRpcClient {
|
|||
* @return AuthMethod used to negotiate the connection
|
||||
* @throws IOException
|
||||
*/
|
||||
public AuthMethod saslConnect(InputStream inS, OutputStream outS)
|
||||
throws IOException {
|
||||
DataInputStream inStream = new DataInputStream(new BufferedInputStream(inS));
|
||||
DataOutputStream outStream = new DataOutputStream(new BufferedOutputStream(
|
||||
outS));
|
||||
|
||||
public AuthMethod saslConnect(IpcStreams ipcStreams) throws IOException {
|
||||
// redefined if/when a SASL negotiation starts, can be queried if the
|
||||
// negotiation fails
|
||||
authMethod = AuthMethod.SIMPLE;
|
||||
|
||||
sendSaslMessage(outStream, negotiateRequest);
|
||||
|
||||
sendSaslMessage(ipcStreams.out, negotiateRequest);
|
||||
// loop until sasl is complete or a rpc error occurs
|
||||
boolean done = false;
|
||||
do {
|
||||
int rpcLen = inStream.readInt();
|
||||
ByteBuffer bb = ByteBuffer.allocate(rpcLen);
|
||||
inStream.readFully(bb.array());
|
||||
ByteBuffer bb = ipcStreams.readResponse();
|
||||
|
||||
RpcWritable.Buffer saslPacket = RpcWritable.Buffer.wrap(bb);
|
||||
RpcResponseHeaderProto header =
|
||||
|
@ -447,7 +438,7 @@ public class SaslRpcClient {
|
|||
}
|
||||
}
|
||||
if (response != null) {
|
||||
sendSaslMessage(outStream, response.build());
|
||||
sendSaslMessage(ipcStreams.out, response.build());
|
||||
}
|
||||
} while (!done);
|
||||
return authMethod;
|
||||
|
@ -461,8 +452,10 @@ public class SaslRpcClient {
|
|||
ResponseBuffer buf = new ResponseBuffer();
|
||||
saslHeader.writeDelimitedTo(buf);
|
||||
message.writeDelimitedTo(buf);
|
||||
buf.writeTo(out);
|
||||
out.flush();
|
||||
synchronized (out) {
|
||||
buf.writeTo(out);
|
||||
out.flush();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -632,9 +632,24 @@ public class UserGroupInformation {
|
|||
* @param subject the user's subject
|
||||
*/
|
||||
UserGroupInformation(Subject subject) {
|
||||
this(subject, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a UGI from the given subject.
|
||||
* @param subject the subject
|
||||
* @param externalKeyTab if the subject's keytab is managed by the user.
|
||||
* Setting this to true will prevent UGI from attempting
|
||||
* to login the keytab, or to renew it.
|
||||
*/
|
||||
private UserGroupInformation(Subject subject, final boolean externalKeyTab) {
|
||||
this.subject = subject;
|
||||
this.user = subject.getPrincipals(User.class).iterator().next();
|
||||
this.isKeytab = KerberosUtil.hasKerberosKeyTab(subject);
|
||||
if (externalKeyTab) {
|
||||
this.isKeytab = false;
|
||||
} else {
|
||||
this.isKeytab = KerberosUtil.hasKerberosKeyTab(subject);
|
||||
}
|
||||
this.isKrbTkt = KerberosUtil.hasKerberosTicket(subject);
|
||||
}
|
||||
|
||||
|
@ -715,7 +730,7 @@ public class UserGroupInformation {
|
|||
*
|
||||
* @param user The principal name to load from the ticket
|
||||
* cache
|
||||
* @param ticketCache the path to the ticket cache file
|
||||
* @param ticketCachePath the path to the ticket cache file
|
||||
*
|
||||
* @throws IOException if the kerberos login fails
|
||||
*/
|
||||
|
@ -775,7 +790,7 @@ public class UserGroupInformation {
|
|||
/**
|
||||
* Create a UserGroupInformation from a Subject with Kerberos principal.
|
||||
*
|
||||
* @param subject The KerberosPrincipal to use in UGI
|
||||
* @param user The KerberosPrincipal to use in UGI
|
||||
*
|
||||
* @throws IOException if the kerberos login fails
|
||||
*/
|
||||
|
@ -850,10 +865,11 @@ public class UserGroupInformation {
|
|||
newLoginContext(authenticationMethod.getLoginAppName(),
|
||||
subject, new HadoopConfiguration());
|
||||
login.login();
|
||||
UserGroupInformation realUser = new UserGroupInformation(subject);
|
||||
LOG.debug("Assuming keytab is managed externally since logged in from"
|
||||
+ " subject.");
|
||||
UserGroupInformation realUser = new UserGroupInformation(subject, true);
|
||||
realUser.setLogin(login);
|
||||
realUser.setAuthenticationMethod(authenticationMethod);
|
||||
realUser = new UserGroupInformation(login.getSubject());
|
||||
// If the HADOOP_PROXY_USER environment variable or property
|
||||
// is specified, create a proxy user as the logged in user.
|
||||
String proxyUser = System.getenv(HADOOP_PROXY_USER);
|
||||
|
|
|
@ -17,22 +17,34 @@
|
|||
*/
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* This is a wrap class of a ReentrantLock. Extending AutoCloseable
|
||||
* interface such that the users can use a try-with-resource syntax.
|
||||
*/
|
||||
public class AutoCloseableLock implements AutoCloseable {
|
||||
|
||||
private final ReentrantLock lock;
|
||||
private final Lock lock;
|
||||
|
||||
/**
|
||||
* Creates an instance of {@code AutoCloseableLock}, initializes
|
||||
* the underlying {@code ReentrantLock} object.
|
||||
* the underlying lock instance with a new {@code ReentrantLock}.
|
||||
*/
|
||||
public AutoCloseableLock() {
|
||||
this.lock = new ReentrantLock();
|
||||
this(new ReentrantLock());
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap provided Lock instance.
|
||||
* @param lock Lock instance to wrap in AutoCloseable API.
|
||||
*/
|
||||
public AutoCloseableLock(Lock lock) {
|
||||
this.lock = lock;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,7 +98,7 @@ public class AutoCloseableLock implements AutoCloseable {
|
|||
|
||||
/**
|
||||
* A wrapper method that makes a call to {@code tryLock()} of
|
||||
* the underlying {@code ReentrantLock} object.
|
||||
* the underlying {@code Lock} object.
|
||||
*
|
||||
* If the lock is not held by another thread, acquires the lock, set the
|
||||
* hold count to one and returns {@code true}.
|
||||
|
@ -116,7 +128,19 @@ public class AutoCloseableLock implements AutoCloseable {
|
|||
* @return {@code true} if any thread holds this lock and
|
||||
* {@code false} otherwise
|
||||
*/
|
||||
public boolean isLocked() {
|
||||
return lock.isLocked();
|
||||
@VisibleForTesting
|
||||
boolean isLocked() {
|
||||
if (lock instanceof ReentrantLock) {
|
||||
return ((ReentrantLock)lock).isLocked();
|
||||
}
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
/**
|
||||
* See {@link ReentrantLock#newCondition()}.
|
||||
* @return the Condition object
|
||||
*/
|
||||
public Condition newCondition() {
|
||||
return lock.newCondition();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@ public class DataChecksum implements Checksum {
|
|||
bytesPerChecksum, checksums.array(), crcsOffset, fileName, basePos);
|
||||
return;
|
||||
}
|
||||
if (NativeCrc32.isAvailable()) {
|
||||
if (NativeCrc32.isAvailable() && data.isDirect()) {
|
||||
NativeCrc32.verifyChunkedSums(bytesPerChecksum, type.id, checksums, data,
|
||||
fileName, basePos);
|
||||
} else {
|
||||
|
|
|
@ -106,7 +106,6 @@ public class NodeHealthScriptRunner extends AbstractService {
|
|||
shexec.execute();
|
||||
} catch (ExitCodeException e) {
|
||||
// ignore the exit code of the script
|
||||
exceptionStackTrace = StringUtils.stringifyException(e);
|
||||
status = HealthCheckerExitStatus.FAILED_WITH_EXIT_CODE;
|
||||
// On Windows, we will not hit the Stream closed IOException
|
||||
// thrown by stdout buffered reader for timeout event.
|
||||
|
@ -163,7 +162,7 @@ public class NodeHealthScriptRunner extends AbstractService {
|
|||
setHealthStatus(false, exceptionStackTrace);
|
||||
break;
|
||||
case FAILED_WITH_EXIT_CODE:
|
||||
setHealthStatus(false, exceptionStackTrace);
|
||||
setHealthStatus(true, "", now);
|
||||
break;
|
||||
case FAILED:
|
||||
setHealthStatus(false, shexec.getOutput());
|
||||
|
|
|
@ -1313,10 +1313,19 @@
|
|||
<name>ipc.maximum.data.length</name>
|
||||
<value>67108864</value>
|
||||
<description>This indicates the maximum IPC message length (bytes) that can be
|
||||
accepted by the server. Messages larger than this value are rejected by
|
||||
server immediately. This setting should rarely need to be changed. It merits
|
||||
investigating whether the cause of long RPC messages can be fixed instead,
|
||||
e.g. by splitting into smaller messages.
|
||||
accepted by the server. Messages larger than this value are rejected by the
|
||||
immediately to avoid possible OOMs. This setting should rarely need to be
|
||||
changed.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.maximum.response.length</name>
|
||||
<value>134217728</value>
|
||||
<description>This indicates the maximum IPC message length (bytes) that can be
|
||||
accepted by the client. Messages larger than this value are rejected
|
||||
immediately to avoid possible OOMs. This setting should rarely need to be
|
||||
changed. Set to 0 to disable.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
|
|
@ -64,17 +64,17 @@ Administrators can configure individual daemons using the configuration options
|
|||
|
||||
| Daemon | Environment Variable |
|
||||
|:---- |:---- |
|
||||
| NameNode | HADOOP\_NAMENODE\_OPTS |
|
||||
| DataNode | HADOOP\_DATANODE\_OPTS |
|
||||
| Secondary NameNode | HADOOP\_SECONDARYNAMENODE\_OPTS |
|
||||
| NameNode | HDFS\_NAMENODE\_OPTS |
|
||||
| DataNode | HDFS\_DATANODE\_OPTS |
|
||||
| Secondary NameNode | HDFS\_SECONDARYNAMENODE\_OPTS |
|
||||
| ResourceManager | YARN\_RESOURCEMANAGER\_OPTS |
|
||||
| NodeManager | YARN\_NODEMANAGER\_OPTS |
|
||||
| WebAppProxy | YARN\_PROXYSERVER\_OPTS |
|
||||
| Map Reduce Job History Server | HADOOP\_JOB\_HISTORYSERVER\_OPTS |
|
||||
| Map Reduce Job History Server | MAPRED\_HISTORYSERVER\_OPTS |
|
||||
|
||||
For example, To configure Namenode to use parallelGC, the following statement should be added in hadoop-env.sh :
|
||||
For example, To configure Namenode to use parallelGC and a 4GB Java Heap, the following statement should be added in hadoop-env.sh :
|
||||
|
||||
export HADOOP_NAMENODE_OPTS="-XX:+UseParallelGC"
|
||||
export HDFS_NAMENODE_OPTS="-XX:+UseParallelGC -Xmx4g"
|
||||
|
||||
See `etc/hadoop/hadoop-env.sh` for other examples.
|
||||
|
||||
|
@ -91,13 +91,6 @@ It is also traditional to configure `HADOOP_HOME` in the system-wide shell envir
|
|||
HADOOP_HOME=/path/to/hadoop
|
||||
export HADOOP_HOME
|
||||
|
||||
| Daemon | Environment Variable |
|
||||
|:---- |:---- |
|
||||
| ResourceManager | YARN\_RESOURCEMANAGER\_HEAPSIZE |
|
||||
| NodeManager | YARN\_NODEMANAGER\_HEAPSIZE |
|
||||
| WebAppProxy | YARN\_PROXYSERVER\_HEAPSIZE |
|
||||
| Map Reduce Job History Server | HADOOP\_JOB\_HISTORYSERVER\_HEAPSIZE |
|
||||
|
||||
### Configuring the Hadoop Daemons
|
||||
|
||||
This section deals with important parameters to be specified in the given configuration files:
|
||||
|
|
|
@ -24,7 +24,7 @@ Apache Hadoop has many environment variables that control various aspects of the
|
|||
|
||||
### `HADOOP_CLIENT_OPTS`
|
||||
|
||||
This environment variable is used for almost all end-user operations. It can be used to set any Java options as well as any Apache Hadoop options via a system property definition. For example:
|
||||
This environment variable is used for all end-user, non-daemon operations. It can be used to set any Java options as well as any Apache Hadoop options via a system property definition. For example:
|
||||
|
||||
```bash
|
||||
HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /tmp
|
||||
|
@ -32,6 +32,18 @@ HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /
|
|||
|
||||
will increase the memory and send this command via a SOCKS proxy server.
|
||||
|
||||
### `(command)_(subcommand)_OPTS`
|
||||
|
||||
It is also possible to set options on a per subcommand basis. This allows for one to create special options for particular cases. The first part of the pattern is the command being used, but all uppercase. The second part of the command is the subcommand being used. Then finally followed by the string `_OPT`.
|
||||
|
||||
For example, to configure `mapred distcp` to use a 2GB heap, one would use:
|
||||
|
||||
```bash
|
||||
MAPRED_DISTCP_OPTS="-Xmx2g"
|
||||
```
|
||||
|
||||
These options will appear *after* `HADOOP_CLIENT_OPTS` during execution and will generally take precedence.
|
||||
|
||||
### `HADOOP_CLASSPATH`
|
||||
|
||||
NOTE: Site-wide settings should be configured via a shellprofile entry and permanent user-wide settings should be configured via ${HOME}/.hadooprc using the `hadoop_add_classpath` function. See below for more information.
|
||||
|
@ -56,6 +68,8 @@ For example:
|
|||
#
|
||||
|
||||
HADOOP_CLIENT_OPTS="-Xmx1g"
|
||||
MAPRED_DISTCP_OPTS="-Xmx2g"
|
||||
HADOOP_DISTCP_OPTS="-Xmx2g"
|
||||
```
|
||||
|
||||
The `.hadoop-env` file can also be used to extend functionality and teach Apache Hadoop new tricks. For example, to run hadoop commands accessing the server referenced in the environment variable `${HADOOP_SERVER}`, the following in the `.hadoop-env` will do just that:
|
||||
|
@ -71,11 +85,23 @@ One word of warning: not all of Unix Shell API routines are available or work c
|
|||
|
||||
## Administrator Environment
|
||||
|
||||
There are many environment variables that impact how the system operates. By far, the most important are the series of `_OPTS` variables that control how daemons work. These variables should contain all of the relevant settings for those daemons.
|
||||
In addition to the various XML files, there are two key capabilities for administrators to configure Apache Hadoop when using the Unix Shell:
|
||||
|
||||
More, detailed information is contained in `hadoop-env.sh` and the other env.sh files.
|
||||
* Many environment variables that impact how the system operates. This guide will only highlight some key ones. There is generally more information in the various `*-env.sh` files.
|
||||
|
||||
Advanced administrators may wish to supplement or do some platform-specific fixes to the existing scripts. In some systems, this means copying the errant script or creating a custom build with these changes. Apache Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work. Replacing functions is covered later under the Shell API documentation.
|
||||
* Supplement or do some platform-specific changes to the existing scripts. Apache Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work. Replacing functions is covered later under the Shell API documentation.
|
||||
|
||||
### `(command)_(subcommand)_OPTS`
|
||||
|
||||
By far, the most important are the series of `_OPTS` variables that control how daemons work. These variables should contain all of the relevant settings for those daemons.
|
||||
|
||||
Similar to the user commands above, all daemons will honor the `(command)_(subcommand)_OPTS` pattern. It is generally recommended that these be set in `hadoop-env.sh` to guarantee that the system will know which settings it should use on restart. Unlike user-facing subcommands, daemons will *NOT* honor `HADOOP_CLIENT_OPTS`.
|
||||
|
||||
In addition, daemons that run in an extra security mode also support `(command)_(subcommand)_SECURE_EXTRA_OPTS`. These options are *supplemental* to the generic `*_OPTS` and will appear after, therefore generally taking precedence.
|
||||
|
||||
### `(command)_(subcommand)_USER`
|
||||
|
||||
Apache Hadoop provides a way to do a user check per-subcommand. While this method is easily circumvented and should not be considered a security-feature, it does provide a mechanism by which to prevent accidents. For example, setting `HDFS_NAMENODE_USER=hdfs` will make the `hdfs namenode` and `hdfs --daemon start namenode` commands verify that the user running the commands are the hdfs user by checking the `USER` environment variable. This also works for non-daemons. Setting `HADOOP_DISTCP_USER=jane` will verify that `USER` is set to `jane` before being allowed to execute the `hadoop distcp` command.
|
||||
|
||||
## Developer and Advanced Administrator Environment
|
||||
|
||||
|
|
|
@ -16,21 +16,29 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
|
||||
package org.apache.hadoop.cli.util;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
/**
|
||||
* A pluggable object for altering the weights of apps in the fair scheduler,
|
||||
* which is used for example by {@link NewAppWeightBooster} to give higher
|
||||
* weight to new jobs so that short jobs finish faster.
|
||||
* Comparator for the Command line tests.
|
||||
*
|
||||
* This comparator searches for an exact line as 'expected'
|
||||
* in the string 'actual' and returns true if found
|
||||
*
|
||||
* May implement {@link Configurable} to access configuration parameters.
|
||||
*/
|
||||
@Private
|
||||
@Unstable
|
||||
public interface WeightAdjuster {
|
||||
public double adjustWeight(FSAppAttempt app, double curWeight);
|
||||
public class ExactLineComparator extends ComparatorBase {
|
||||
|
||||
@Override
|
||||
public boolean compare(String actual, String expected) {
|
||||
boolean success = false;
|
||||
StringTokenizer tokenizer = new StringTokenizer(actual, "\n\r");
|
||||
while (tokenizer.hasMoreTokens() && !success) {
|
||||
String actualToken = tokenizer.nextToken();
|
||||
success = actualToken.equals(expected);
|
||||
}
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
}
|
|
@ -19,7 +19,11 @@ package org.apache.hadoop.conf;
|
|||
|
||||
import java.io.StringWriter;
|
||||
import java.io.StringReader;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.core.HttpHeaders;
|
||||
import javax.xml.parsers.DocumentBuilder;
|
||||
import javax.xml.parsers.DocumentBuilderFactory;
|
||||
|
||||
|
@ -32,6 +36,7 @@ import org.xml.sax.InputSource;
|
|||
|
||||
import junit.framework.TestCase;
|
||||
import org.junit.Test;
|
||||
import org.mockito.Mockito;
|
||||
|
||||
/**
|
||||
* Basic test case that the ConfServlet can write configuration
|
||||
|
@ -47,6 +52,25 @@ public class TestConfServlet extends TestCase {
|
|||
return testConf;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testParseHeaders() throws Exception {
|
||||
HashMap<String, String> verifyMap = new HashMap<String, String>();
|
||||
verifyMap.put("text/plain", ConfServlet.FORMAT_XML);
|
||||
verifyMap.put(null, ConfServlet.FORMAT_XML);
|
||||
verifyMap.put("text/xml", ConfServlet.FORMAT_XML);
|
||||
verifyMap.put("application/xml", ConfServlet.FORMAT_XML);
|
||||
verifyMap.put("application/json", ConfServlet.FORMAT_JSON);
|
||||
|
||||
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||
for(String contentTypeExpected : verifyMap.keySet()) {
|
||||
String contenTypeActual = verifyMap.get(contentTypeExpected);
|
||||
Mockito.when(request.getHeader(HttpHeaders.ACCEPT))
|
||||
.thenReturn(contentTypeExpected);
|
||||
assertEquals(contenTypeActual,
|
||||
ConfServlet.parseAccecptHeader(request));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testWriteJson() throws Exception {
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.crypto;
|
||||
|
||||
import java.io.OutputStream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import static org.mockito.Mockito.*;
|
||||
|
||||
/**
|
||||
* To test proper closing of underlying stream of CryptoOutputStream.
|
||||
*/
|
||||
public class TestCryptoOutputStreamClosing {
|
||||
private static CryptoCodec codec;
|
||||
|
||||
@BeforeClass
|
||||
public static void init() throws Exception {
|
||||
codec = CryptoCodec.getInstance(new Configuration());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOutputStreamClosing() throws Exception {
|
||||
OutputStream outputStream = mock(OutputStream.class);
|
||||
CryptoOutputStream cos = new CryptoOutputStream(outputStream, codec,
|
||||
new byte[16], new byte[16], 0L, true);
|
||||
cos.close();
|
||||
verify(outputStream).close();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOutputStreamNotClosing() throws Exception {
|
||||
OutputStream outputStream = mock(OutputStream.class);
|
||||
CryptoOutputStream cos = new CryptoOutputStream(outputStream, codec,
|
||||
new byte[16], new byte[16], 0L, false);
|
||||
cos.close();
|
||||
verify(outputStream, never()).close();
|
||||
}
|
||||
|
||||
}
|
|
@ -18,24 +18,23 @@
|
|||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.*;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
||||
import java.io.*;
|
||||
import java.util.*;
|
||||
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Test;
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.StringTokenizer;
|
||||
|
||||
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
|
||||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* This class tests the local file system via the FileSystem abstraction.
|
||||
|
@ -60,18 +59,22 @@ public class TestLocalFileSystemPermission {
|
|||
return f;
|
||||
}
|
||||
|
||||
private Path writeFile(FileSystem fs, String name, FsPermission perm) throws IOException {
|
||||
private Path writeFile(FileSystem fs, String name, FsPermission perm)
|
||||
throws IOException {
|
||||
Path f = new Path(TEST_PATH_PREFIX + name);
|
||||
FSDataOutputStream stm = fs.create(f, perm, true, 2048, (short)1, 32 * 1024 * 1024, null);
|
||||
FSDataOutputStream stm = fs.create(f, perm, true, 2048, (short)1,
|
||||
32 * 1024 * 1024, null);
|
||||
stm.writeBytes("42\n");
|
||||
stm.close();
|
||||
return f;
|
||||
}
|
||||
|
||||
private void cleanup(FileSystem fs, Path name) throws IOException {
|
||||
assertTrue(fs.exists(name));
|
||||
fs.delete(name, true);
|
||||
assertTrue(!fs.exists(name));
|
||||
if (name!=null) {
|
||||
assertTrue(fs.exists(name));
|
||||
fs.delete(name, true);
|
||||
assertFalse(fs.exists(name));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -82,39 +85,33 @@ public class TestLocalFileSystemPermission {
|
|||
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "044");
|
||||
Path dir = new Path(TEST_PATH_PREFIX + "dir");
|
||||
localfs.mkdirs(dir);
|
||||
Path dir1 = new Path(TEST_PATH_PREFIX + "dir1");
|
||||
Path dir2 = new Path(TEST_PATH_PREFIX + "dir2");
|
||||
|
||||
try {
|
||||
FsPermission initialPermission = getPermission(localfs, dir);
|
||||
assertEquals(
|
||||
FsPermission.getDirDefault().applyUMask(FsPermission.getUMask(conf)),
|
||||
FsPermission.getDirDefault()
|
||||
.applyUMask(FsPermission.getUMask(conf)),
|
||||
initialPermission);
|
||||
} catch(Exception e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
}
|
||||
|
||||
FsPermission perm = new FsPermission((short)0755);
|
||||
Path dir1 = new Path(TEST_PATH_PREFIX + "dir1");
|
||||
localfs.mkdirs(dir1, perm);
|
||||
try {
|
||||
FsPermission initialPermission = getPermission(localfs, dir1);
|
||||
assertEquals(perm.applyUMask(FsPermission.getUMask(conf)), initialPermission);
|
||||
} catch(Exception e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
}
|
||||
FsPermission perm = new FsPermission((short)0755);
|
||||
|
||||
Path dir2 = new Path(TEST_PATH_PREFIX + "dir2");
|
||||
localfs.mkdirs(dir2);
|
||||
try {
|
||||
FsPermission initialPermission = getPermission(localfs, dir2);
|
||||
localfs.mkdirs(dir1, perm);
|
||||
|
||||
initialPermission = getPermission(localfs, dir1);
|
||||
assertEquals(perm.applyUMask(FsPermission.getUMask(conf)),
|
||||
initialPermission);
|
||||
|
||||
localfs.mkdirs(dir2);
|
||||
|
||||
initialPermission = getPermission(localfs, dir2);
|
||||
Path copyPath = new Path(TEST_PATH_PREFIX + "dir_copy");
|
||||
localfs.rename(dir2, copyPath);
|
||||
FsPermission copyPermission = getPermission(localfs, copyPath);
|
||||
assertEquals(copyPermission, initialPermission);
|
||||
assertEquals(initialPermission, copyPermission);
|
||||
dir2 = copyPath;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
|
||||
} finally {
|
||||
cleanup(localfs, dir);
|
||||
cleanup(localfs, dir1);
|
||||
|
@ -124,52 +121,42 @@ public class TestLocalFileSystemPermission {
|
|||
}
|
||||
}
|
||||
|
||||
/** Test LocalFileSystem.setPermission */
|
||||
/** Test LocalFileSystem.setPermission. */
|
||||
@Test
|
||||
public void testLocalFSsetPermission() throws IOException {
|
||||
assumeNotWindows();
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "044");
|
||||
LocalFileSystem localfs = FileSystem.getLocal(conf);
|
||||
Path f = null;
|
||||
Path f1 = null;
|
||||
Path f2 = null;
|
||||
String filename = "foo";
|
||||
Path f = writeFile(localfs, filename);
|
||||
String filename1 = "foo1";
|
||||
String filename2 = "foo2";
|
||||
FsPermission perm = new FsPermission((short)0755);
|
||||
|
||||
try {
|
||||
f = writeFile(localfs, filename);
|
||||
f1 = writeFile(localfs, filename1, perm);
|
||||
f2 = writeFile(localfs, filename2);
|
||||
|
||||
FsPermission initialPermission = getPermission(localfs, f);
|
||||
assertEquals(
|
||||
FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)),
|
||||
initialPermission);
|
||||
} catch(Exception e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
}
|
||||
|
||||
String filename1 = "foo1";
|
||||
FsPermission perm = new FsPermission((short)0755);
|
||||
Path f1 = writeFile(localfs, filename1, perm);
|
||||
try {
|
||||
FsPermission initialPermission = getPermission(localfs, f1);
|
||||
initialPermission = getPermission(localfs, f1);
|
||||
assertEquals(
|
||||
perm.applyUMask(FsPermission.getUMask(conf)), initialPermission);
|
||||
} catch(Exception e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
}
|
||||
|
||||
String filename2 = "foo2";
|
||||
Path f2 = writeFile(localfs, filename2);
|
||||
try {
|
||||
FsPermission initialPermission = getPermission(localfs, f2);
|
||||
initialPermission = getPermission(localfs, f2);
|
||||
Path copyPath = new Path(TEST_PATH_PREFIX + "/foo_copy");
|
||||
localfs.rename(f2, copyPath);
|
||||
FsPermission copyPermission = getPermission(localfs, copyPath);
|
||||
assertEquals(copyPermission, initialPermission);
|
||||
f2 = copyPath;
|
||||
} catch (Exception e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
// create files and manipulate them.
|
||||
FsPermission all = new FsPermission((short)0777);
|
||||
FsPermission none = new FsPermission((short)0);
|
||||
|
@ -179,8 +166,7 @@ public class TestLocalFileSystemPermission {
|
|||
|
||||
localfs.setPermission(f, all);
|
||||
assertEquals(all, getPermission(localfs, f));
|
||||
}
|
||||
finally {
|
||||
} finally {
|
||||
cleanup(localfs, f);
|
||||
cleanup(localfs, f1);
|
||||
if (localfs.exists(f2)) {
|
||||
|
@ -196,33 +182,19 @@ public class TestLocalFileSystemPermission {
|
|||
/** Test LocalFileSystem.setOwner. */
|
||||
@Test
|
||||
public void testLocalFSsetOwner() throws IOException {
|
||||
if (Path.WINDOWS) {
|
||||
LOGGER.info("Cannot run test for Windows");
|
||||
return;
|
||||
}
|
||||
assumeNotWindows();
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "044");
|
||||
LocalFileSystem localfs = FileSystem.getLocal(conf);
|
||||
String filename = "bar";
|
||||
Path f = writeFile(localfs, filename);
|
||||
List<String> groups = null;
|
||||
List<String> groups;
|
||||
try {
|
||||
groups = getGroups();
|
||||
LOGGER.info("{}: {}", filename, getPermission(localfs, f));
|
||||
}
|
||||
catch(IOException e) {
|
||||
LOGGER.error("Cannot run test", e);
|
||||
return;
|
||||
}
|
||||
if (groups == null || groups.size() < 1) {
|
||||
LOGGER.error("Cannot run test: need at least one group. groups={}",
|
||||
groups);
|
||||
return;
|
||||
}
|
||||
|
||||
// create files and manipulate them.
|
||||
try {
|
||||
// create files and manipulate them.
|
||||
String g0 = groups.get(0);
|
||||
localfs.setOwner(f, null, g0);
|
||||
assertEquals(g0, getGroup(localfs, f));
|
||||
|
@ -235,8 +207,9 @@ public class TestLocalFileSystemPermission {
|
|||
LOGGER.info("Not testing changing the group since user " +
|
||||
"belongs to only one group.");
|
||||
}
|
||||
}
|
||||
finally {cleanup(localfs, f);}
|
||||
} finally {
|
||||
cleanup(localfs, f);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -250,10 +223,7 @@ public class TestLocalFileSystemPermission {
|
|||
*/
|
||||
@Test
|
||||
public void testSetUmaskInRealTime() throws Exception {
|
||||
if (Path.WINDOWS) {
|
||||
LOGGER.info("Cannot run test for Windows");
|
||||
return;
|
||||
}
|
||||
assumeNotWindows();
|
||||
|
||||
LocalFileSystem localfs = FileSystem.getLocal(new Configuration());
|
||||
Configuration conf = localfs.getConf();
|
||||
|
@ -289,9 +259,10 @@ public class TestLocalFileSystemPermission {
|
|||
}
|
||||
|
||||
static List<String> getGroups() throws IOException {
|
||||
List<String> a = new ArrayList<String>();
|
||||
List<String> a = new ArrayList<>();
|
||||
String s = Shell.execCommand(Shell.getGroupsCommand());
|
||||
for(StringTokenizer t = new StringTokenizer(s); t.hasMoreTokens(); ) {
|
||||
StringTokenizer t = new StringTokenizer(s);
|
||||
while (t.hasMoreTokens()) {
|
||||
a.add(t.nextToken());
|
||||
}
|
||||
return a;
|
||||
|
|
|
@ -17,9 +17,14 @@
|
|||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.io.ObjectOutputStream;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.Arrays;
|
||||
|
@ -506,4 +511,19 @@ public class TestPath {
|
|||
assertFalse(Path.isWindowsAbsolutePath("C:test", false));
|
||||
assertFalse(Path.isWindowsAbsolutePath("/C:test", true));
|
||||
}
|
||||
|
||||
@Test(timeout = 30000)
|
||||
public void testSerDeser() throws Throwable {
|
||||
Path source = new Path("hdfs://localhost:4040/scratch");
|
||||
ByteArrayOutputStream baos = new ByteArrayOutputStream(256);
|
||||
try(ObjectOutputStream oos = new ObjectOutputStream(baos)) {
|
||||
oos.writeObject(source);
|
||||
}
|
||||
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
|
||||
try (ObjectInputStream ois = new ObjectInputStream(bais)) {
|
||||
Path deser = (Path) ois.readObject();
|
||||
Assert.assertEquals(source, deser);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,8 +17,13 @@
|
|||
*/
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.protobuf.BlockingService;
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.CommandLineParser;
|
||||
import org.apache.commons.cli.GnuParser;
|
||||
|
@ -29,6 +34,7 @@ import org.apache.commons.cli.ParseException;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.ipc.RPC.Server;
|
||||
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoRequestProto;
|
||||
import org.apache.hadoop.ipc.protobuf.TestProtos.EchoResponseProto;
|
||||
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
|
||||
|
@ -39,12 +45,8 @@ import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
|
|||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
/**
|
||||
* Benchmark for protobuf RPC.
|
||||
|
@ -66,7 +68,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
public int secondsToRun = 15;
|
||||
private int msgSize = 1024;
|
||||
public Class<? extends RpcEngine> rpcEngine =
|
||||
ProtobufRpcEngine.class;
|
||||
WritableRpcEngine.class;
|
||||
|
||||
private MyOptions(String args[]) {
|
||||
try {
|
||||
|
@ -133,7 +135,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
|
||||
opts.addOption(
|
||||
OptionBuilder.withLongOpt("engine").hasArg(true)
|
||||
.withArgName("protobuf")
|
||||
.withArgName("writable|protobuf")
|
||||
.withDescription("engine to use")
|
||||
.create('e'));
|
||||
|
||||
|
@ -182,6 +184,8 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
String eng = line.getOptionValue('e');
|
||||
if ("protobuf".equals(eng)) {
|
||||
rpcEngine = ProtobufRpcEngine.class;
|
||||
} else if ("writable".equals(eng)) {
|
||||
rpcEngine = WritableRpcEngine.class;
|
||||
} else {
|
||||
throw new ParseException("invalid engine: " + eng);
|
||||
}
|
||||
|
@ -233,6 +237,11 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
|
||||
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
|
||||
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
|
||||
} else if (opts.rpcEngine == WritableRpcEngine.class) {
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
|
||||
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
|
||||
.setVerbose(false).build();
|
||||
} else {
|
||||
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
|
||||
}
|
||||
|
@ -390,6 +399,15 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
|
|||
return responseProto.getMessage();
|
||||
}
|
||||
};
|
||||
} else if (opts.rpcEngine == WritableRpcEngine.class) {
|
||||
final TestProtocol proxy = RPC.getProxy(
|
||||
TestProtocol.class, TestProtocol.versionID, addr, conf);
|
||||
return new RpcServiceWrapper() {
|
||||
@Override
|
||||
public String doEcho(String msg) throws Exception {
|
||||
return proxy.echo(msg);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
throw new RuntimeException("unsupported engine: " + opts.rpcEngine);
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ import java.io.OutputStream;
|
|||
import java.lang.reflect.Method;
|
||||
import java.lang.reflect.Proxy;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -49,6 +50,8 @@ import java.util.Random;
|
|||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
@ -76,6 +79,9 @@ import org.apache.hadoop.ipc.Server.Connection;
|
|||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto;
|
||||
import org.apache.hadoop.net.ConnectTimeoutException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -112,6 +118,8 @@ public class TestIPC {
|
|||
public void setupConf() {
|
||||
conf = new Configuration();
|
||||
Client.setPingInterval(conf, PING_INTERVAL);
|
||||
// tests may enable security, so disable before each test
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
}
|
||||
|
||||
static final Random RANDOM = new Random();
|
||||
|
@ -123,8 +131,8 @@ public class TestIPC {
|
|||
|
||||
static ConnectionId getConnectionId(InetSocketAddress addr, int rpcTimeout,
|
||||
Configuration conf) throws IOException {
|
||||
return ConnectionId.getConnectionId(addr, null, null, rpcTimeout, null,
|
||||
conf);
|
||||
return ConnectionId.getConnectionId(addr, null,
|
||||
UserGroupInformation.getCurrentUser(), rpcTimeout, null, conf);
|
||||
}
|
||||
|
||||
static Writable call(Client client, InetSocketAddress addr,
|
||||
|
@ -1402,6 +1410,80 @@ public class TestIPC {
|
|||
client.stop();
|
||||
}
|
||||
|
||||
@Test(timeout=4000)
|
||||
public void testInsecureVersionMismatch() throws IOException {
|
||||
checkVersionMismatch();
|
||||
}
|
||||
|
||||
@Test(timeout=4000)
|
||||
public void testSecureVersionMismatch() throws IOException {
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
checkVersionMismatch();
|
||||
}
|
||||
|
||||
private void checkVersionMismatch() throws IOException {
|
||||
try (final ServerSocket listenSocket = new ServerSocket()) {
|
||||
listenSocket.bind(null);
|
||||
InetSocketAddress addr =
|
||||
(InetSocketAddress) listenSocket.getLocalSocketAddress();
|
||||
|
||||
// open a socket that accepts a client and immediately returns
|
||||
// a version mismatch exception.
|
||||
ExecutorService executor = Executors.newSingleThreadExecutor();
|
||||
executor.submit(new Runnable(){
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
Socket socket = listenSocket.accept();
|
||||
socket.getOutputStream().write(
|
||||
NetworkTraces.RESPONSE_TO_HADOOP_0_20_3_RPC);
|
||||
socket.close();
|
||||
} catch (Throwable t) {
|
||||
// ignore.
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
Client client = new Client(LongWritable.class, conf);
|
||||
call(client, 0, addr, conf);
|
||||
} catch (RemoteException re) {
|
||||
Assert.assertEquals(RPC.VersionMismatch.class.getName(),
|
||||
re.getClassName());
|
||||
Assert.assertEquals(NetworkTraces.HADOOP0_20_ERROR_MSG,
|
||||
re.getMessage());
|
||||
return;
|
||||
}
|
||||
Assert.fail("didn't get version mismatch");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRpcResponseLimit() throws Throwable {
|
||||
Server server = new TestServer(1, false);
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
server.start();
|
||||
|
||||
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH, 0);
|
||||
Client client = new Client(LongWritable.class, conf);
|
||||
call(client, 0, addr, conf);
|
||||
|
||||
conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH, 4);
|
||||
client = new Client(LongWritable.class, conf);
|
||||
try {
|
||||
call(client, 0, addr, conf);
|
||||
} catch (IOException ioe) {
|
||||
Throwable t = ioe.getCause();
|
||||
Assert.assertNotNull(t);
|
||||
Assert.assertEquals(RpcException.class, t.getClass());
|
||||
Assert.assertEquals("RPC response exceeds maximum data length",
|
||||
t.getMessage());
|
||||
return;
|
||||
}
|
||||
Assert.fail("didn't get limit exceeded");
|
||||
}
|
||||
|
||||
private void doIpcVersionTest(
|
||||
byte[] requestData,
|
||||
byte[] expectedResponse) throws IOException {
|
||||
|
|
|
@ -17,28 +17,252 @@
|
|||
*/
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.After;
|
||||
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos.TestProtobufRpcProto;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Before;
|
||||
import org.junit.After;
|
||||
import org.junit.Test;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
||||
public class TestMultipleProtocolServer extends TestRpcBase {
|
||||
|
||||
private static InetSocketAddress addr;
|
||||
private static RPC.Server server;
|
||||
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
super.setupConf();
|
||||
|
||||
server = setupTestServer(conf, 2);
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
|
||||
@ProtocolInfo(protocolName="Foo")
|
||||
interface Foo0 extends VersionedProtocol {
|
||||
public static final long versionID = 0L;
|
||||
String ping() throws IOException;
|
||||
|
||||
}
|
||||
|
||||
@ProtocolInfo(protocolName="Foo")
|
||||
interface Foo1 extends VersionedProtocol {
|
||||
public static final long versionID = 1L;
|
||||
String ping() throws IOException;
|
||||
String ping2() throws IOException;
|
||||
}
|
||||
|
||||
@ProtocolInfo(protocolName="Foo")
|
||||
interface FooUnimplemented extends VersionedProtocol {
|
||||
public static final long versionID = 2L;
|
||||
String ping() throws IOException;
|
||||
}
|
||||
|
||||
interface Mixin extends VersionedProtocol{
|
||||
public static final long versionID = 0L;
|
||||
void hello() throws IOException;
|
||||
}
|
||||
|
||||
interface Bar extends Mixin {
|
||||
public static final long versionID = 0L;
|
||||
int echo(int i) throws IOException;
|
||||
}
|
||||
|
||||
class Foo0Impl implements Foo0 {
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return Foo0.versionID;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
Class<? extends VersionedProtocol> inter;
|
||||
try {
|
||||
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||
getGenericInterfaces()[0];
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||
getProtocolVersion(protocol, clientVersion), inter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping() {
|
||||
return "Foo0";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class Foo1Impl implements Foo1 {
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return Foo1.versionID;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
Class<? extends VersionedProtocol> inter;
|
||||
try {
|
||||
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||
getGenericInterfaces()[0];
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||
getProtocolVersion(protocol, clientVersion), inter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping() {
|
||||
return "Foo1";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String ping2() {
|
||||
return "Foo1";
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
class BarImpl implements Bar {
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return Bar.versionID;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
Class<? extends VersionedProtocol> inter;
|
||||
try {
|
||||
inter = (Class<? extends VersionedProtocol>)getClass().
|
||||
getGenericInterfaces()[0];
|
||||
} catch (Exception e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return ProtocolSignature.getProtocolSignature(clientMethodsHash,
|
||||
getProtocolVersion(protocol, clientVersion), inter);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int echo(int i) {
|
||||
return i;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void hello() {
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
// create a server with two handlers
|
||||
server = new RPC.Builder(conf).setProtocol(Foo0.class)
|
||||
.setInstance(new Foo0Impl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Foo1.class, new Foo1Impl());
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Bar.class, new BarImpl());
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, Mixin.class, new BarImpl());
|
||||
|
||||
|
||||
// Add Protobuf server
|
||||
// Create server side implementation
|
||||
PBServerImpl pbServerImpl = new PBServerImpl();
|
||||
BlockingService service = TestProtobufRpcProto
|
||||
.newReflectiveBlockingService(pbServerImpl);
|
||||
server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService.class,
|
||||
service);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws Exception {
|
||||
server.stop();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void test1() throws IOException {
|
||||
ProtocolProxy<?> proxy;
|
||||
proxy = RPC.getProtocolProxy(Foo0.class, Foo0.versionID, addr, conf);
|
||||
|
||||
Foo0 foo0 = (Foo0)proxy.getProxy();
|
||||
Assert.assertEquals("Foo0", foo0.ping());
|
||||
|
||||
|
||||
proxy = RPC.getProtocolProxy(Foo1.class, Foo1.versionID, addr, conf);
|
||||
|
||||
|
||||
Foo1 foo1 = (Foo1)proxy.getProxy();
|
||||
Assert.assertEquals("Foo1", foo1.ping());
|
||||
Assert.assertEquals("Foo1", foo1.ping());
|
||||
|
||||
|
||||
proxy = RPC.getProtocolProxy(Bar.class, Foo1.versionID, addr, conf);
|
||||
|
||||
|
||||
Bar bar = (Bar)proxy.getProxy();
|
||||
Assert.assertEquals(99, bar.echo(99));
|
||||
|
||||
// Now test Mixin class method
|
||||
|
||||
Mixin mixin = bar;
|
||||
mixin.hello();
|
||||
}
|
||||
|
||||
|
||||
// Server does not implement the FooUnimplemented version of protocol Foo.
|
||||
// See that calls to it fail.
|
||||
@Test(expected=IOException.class)
|
||||
public void testNonExistingProtocol() throws IOException {
|
||||
ProtocolProxy<?> proxy;
|
||||
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
|
||||
FooUnimplemented.versionID, addr, conf);
|
||||
|
||||
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
|
||||
foo.ping();
|
||||
}
|
||||
|
||||
/**
|
||||
* getProtocolVersion of an unimplemented version should return highest version
|
||||
* Similarly getProtocolSignature should work.
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testNonExistingProtocol2() throws IOException {
|
||||
ProtocolProxy<?> proxy;
|
||||
proxy = RPC.getProtocolProxy(FooUnimplemented.class,
|
||||
FooUnimplemented.versionID, addr, conf);
|
||||
|
||||
FooUnimplemented foo = (FooUnimplemented)proxy.getProxy();
|
||||
Assert.assertEquals(Foo1.versionID,
|
||||
foo.getProtocolVersion(RPC.getProtocolName(FooUnimplemented.class),
|
||||
FooUnimplemented.versionID));
|
||||
foo.getProtocolSignature(RPC.getProtocolName(FooUnimplemented.class),
|
||||
FooUnimplemented.versionID, 0);
|
||||
}
|
||||
|
||||
@Test(expected=IOException.class)
|
||||
public void testIncorrectServerCreation() throws IOException {
|
||||
new RPC.Builder(conf).setProtocol(Foo1.class).setInstance(new Foo0Impl())
|
||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(2).setVerbose(false)
|
||||
.build();
|
||||
}
|
||||
|
||||
// Now test a PB service - a server hosts both PB and Writable Rpcs.
|
||||
@Test
|
||||
public void testPBService() throws Exception {
|
||||
|
|
|
@ -25,6 +25,19 @@ import org.junit.Test;
|
|||
|
||||
public class TestRPCCallBenchmark {
|
||||
|
||||
@Test(timeout=20000)
|
||||
public void testBenchmarkWithWritable() throws Exception {
|
||||
int rc = ToolRunner.run(new RPCCallBenchmark(),
|
||||
new String[] {
|
||||
"--clientThreads", "30",
|
||||
"--serverThreads", "30",
|
||||
"--time", "5",
|
||||
"--serverReaderThreads", "4",
|
||||
"--messageSize", "1024",
|
||||
"--engine", "writable"});
|
||||
assertEquals(0, rc);
|
||||
}
|
||||
|
||||
@Test(timeout=20000)
|
||||
public void testBenchmarkWithProto() throws Exception {
|
||||
int rc = ToolRunner.run(new RPCCallBenchmark(),
|
||||
|
|
|
@ -18,19 +18,27 @@
|
|||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
|
||||
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
|
||||
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
|
||||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcErrorCodeProto;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Unit test for supporting method-name based compatible RPCs. */
|
||||
public class TestRPCCompatibility {
|
||||
|
@ -41,7 +49,7 @@ public class TestRPCCompatibility {
|
|||
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(TestRPCCompatibility.class);
|
||||
|
||||
|
||||
private static Configuration conf = new Configuration();
|
||||
|
||||
public interface TestProtocol0 extends VersionedProtocol {
|
||||
|
@ -112,21 +120,6 @@ public class TestRPCCompatibility {
|
|||
@Before
|
||||
public void setUp() {
|
||||
ProtocolSignature.resetCache();
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol0.class, ProtobufRpcEngine.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol1.class, ProtobufRpcEngine.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol2.class, ProtobufRpcEngine.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol3.class, ProtobufRpcEngine.class);
|
||||
|
||||
RPC.setProtocolEngine(conf,
|
||||
TestProtocol4.class, ProtobufRpcEngine.class);
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -140,7 +133,117 @@ public class TestRPCCompatibility {
|
|||
server = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test // old client vs new server
|
||||
public void testVersion0ClientVersion1Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
TestImpl1 impl = new TestImpl1();
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
|
||||
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||
.setVerbose(false).build();
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
proxy = RPC.getProtocolProxy(
|
||||
TestProtocol0.class, TestProtocol0.versionID, addr, conf);
|
||||
|
||||
TestProtocol0 proxy0 = (TestProtocol0)proxy.getProxy();
|
||||
proxy0.ping();
|
||||
}
|
||||
|
||||
@Test // old client vs new server
|
||||
public void testVersion1ClientVersion0Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol0.class)
|
||||
.setInstance(new TestImpl0()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
proxy = RPC.getProtocolProxy(
|
||||
TestProtocol1.class, TestProtocol1.versionID, addr, conf);
|
||||
|
||||
TestProtocol1 proxy1 = (TestProtocol1)proxy.getProxy();
|
||||
proxy1.ping();
|
||||
try {
|
||||
proxy1.echo("hello");
|
||||
fail("Echo should fail");
|
||||
} catch(IOException e) {
|
||||
}
|
||||
}
|
||||
|
||||
private class Version2Client {
|
||||
|
||||
private TestProtocol2 proxy2;
|
||||
private ProtocolProxy<TestProtocol2> serverInfo;
|
||||
|
||||
private Version2Client() throws IOException {
|
||||
serverInfo = RPC.getProtocolProxy(
|
||||
TestProtocol2.class, TestProtocol2.versionID, addr, conf);
|
||||
proxy2 = serverInfo.getProxy();
|
||||
}
|
||||
|
||||
public int echo(int value) throws IOException, NumberFormatException {
|
||||
if (serverInfo.isMethodSupported("echo", int.class)) {
|
||||
System.out.println("echo int is supported");
|
||||
return -value; // use version 3 echo long
|
||||
} else { // server is version 2
|
||||
System.out.println("echo int is NOT supported");
|
||||
return Integer.parseInt(proxy2.echo(String.valueOf(value)));
|
||||
}
|
||||
}
|
||||
|
||||
public String echo(String value) throws IOException {
|
||||
return proxy2.echo(value);
|
||||
}
|
||||
|
||||
public void ping() throws IOException {
|
||||
proxy2.ping();
|
||||
}
|
||||
}
|
||||
|
||||
@Test // Compatible new client & old server
|
||||
public void testVersion2ClientVersion1Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
TestImpl1 impl = new TestImpl1();
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
|
||||
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||
.setVerbose(false).build();
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
|
||||
Version2Client client = new Version2Client();
|
||||
client.ping();
|
||||
assertEquals("hello", client.echo("hello"));
|
||||
|
||||
// echo(int) is not supported by server, so returning 3
|
||||
// This verifies that echo(int) and echo(String)'s hash codes are different
|
||||
assertEquals(3, client.echo(3));
|
||||
}
|
||||
|
||||
@Test // equal version client and server
|
||||
public void testVersion2ClientVersion2Server() throws Exception {
|
||||
// create a server with two handlers
|
||||
TestImpl2 impl = new TestImpl2();
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
|
||||
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||
.setVerbose(false).build();
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
Version2Client client = new Version2Client();
|
||||
|
||||
client.ping();
|
||||
assertEquals("hello", client.echo("hello"));
|
||||
|
||||
// now that echo(int) is supported by the server, echo(int) should return -3
|
||||
assertEquals(-3, client.echo(3));
|
||||
}
|
||||
|
||||
public interface TestProtocol3 {
|
||||
int echo(String value);
|
||||
int echo(int value);
|
||||
|
@ -194,4 +297,97 @@ public class TestRPCCompatibility {
|
|||
@Override
|
||||
int echo(int value) throws IOException;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testVersionMismatch() throws IOException {
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
|
||||
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
TestProtocol4 proxy = RPC.getProxy(TestProtocol4.class,
|
||||
TestProtocol4.versionID, addr, conf);
|
||||
try {
|
||||
proxy.echo(21);
|
||||
fail("The call must throw VersionMismatch exception");
|
||||
} catch (RemoteException ex) {
|
||||
Assert.assertEquals(RPC.VersionMismatch.class.getName(),
|
||||
ex.getClassName());
|
||||
Assert.assertTrue(ex.getErrorCode().equals(
|
||||
RpcErrorCodeProto.ERROR_RPC_VERSION_MISMATCH));
|
||||
} catch (IOException ex) {
|
||||
fail("Expected version mismatch but got " + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIsMethodSupported() throws IOException {
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol2.class)
|
||||
.setInstance(new TestImpl2()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
server.start();
|
||||
addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
TestProtocol2 proxy = RPC.getProxy(TestProtocol2.class,
|
||||
TestProtocol2.versionID, addr, conf);
|
||||
boolean supported = RpcClientUtil.isMethodSupported(proxy,
|
||||
TestProtocol2.class, RPC.RpcKind.RPC_WRITABLE,
|
||||
RPC.getProtocolVersion(TestProtocol2.class), "echo");
|
||||
Assert.assertTrue(supported);
|
||||
supported = RpcClientUtil.isMethodSupported(proxy,
|
||||
TestProtocol2.class, RPC.RpcKind.RPC_PROTOCOL_BUFFER,
|
||||
RPC.getProtocolVersion(TestProtocol2.class), "echo");
|
||||
Assert.assertFalse(supported);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that ProtocolMetaInfoServerSideTranslatorPB correctly looks up
|
||||
* the server registry to extract protocol signatures and versions.
|
||||
*/
|
||||
@Test
|
||||
public void testProtocolMetaInfoSSTranslatorPB() throws Exception {
|
||||
TestImpl1 impl = new TestImpl1();
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol1.class)
|
||||
.setInstance(impl).setBindAddress(ADDRESS).setPort(0).setNumHandlers(2)
|
||||
.setVerbose(false).build();
|
||||
server.addProtocol(RPC.RpcKind.RPC_WRITABLE, TestProtocol0.class, impl);
|
||||
server.start();
|
||||
|
||||
ProtocolMetaInfoServerSideTranslatorPB xlator =
|
||||
new ProtocolMetaInfoServerSideTranslatorPB(server);
|
||||
|
||||
GetProtocolSignatureResponseProto resp = xlator.getProtocolSignature(
|
||||
null,
|
||||
createGetProtocolSigRequestProto(TestProtocol1.class,
|
||||
RPC.RpcKind.RPC_PROTOCOL_BUFFER));
|
||||
//No signatures should be found
|
||||
Assert.assertEquals(0, resp.getProtocolSignatureCount());
|
||||
resp = xlator.getProtocolSignature(
|
||||
null,
|
||||
createGetProtocolSigRequestProto(TestProtocol1.class,
|
||||
RPC.RpcKind.RPC_WRITABLE));
|
||||
Assert.assertEquals(1, resp.getProtocolSignatureCount());
|
||||
ProtocolSignatureProto sig = resp.getProtocolSignatureList().get(0);
|
||||
Assert.assertEquals(TestProtocol1.versionID, sig.getVersion());
|
||||
boolean found = false;
|
||||
int expected = ProtocolSignature.getFingerprint(TestProtocol1.class
|
||||
.getMethod("echo", String.class));
|
||||
for (int m : sig.getMethodsList()) {
|
||||
if (expected == m) {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Assert.assertTrue(found);
|
||||
}
|
||||
|
||||
private GetProtocolSignatureRequestProto createGetProtocolSigRequestProto(
|
||||
Class<?> protocol, RPC.RpcKind rpcKind) {
|
||||
GetProtocolSignatureRequestProto.Builder builder =
|
||||
GetProtocolSignatureRequestProto.newBuilder();
|
||||
builder.setProtocol(protocol.getName());
|
||||
builder.setRpcKind(rpcKind.toString());
|
||||
return builder.build();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
package org.apache.hadoop.ipc;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.*;
|
||||
import org.apache.hadoop.ipc.TestRPC.TestProtocol;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -28,13 +30,11 @@ import java.net.ConnectException;
|
|||
import java.net.InetSocketAddress;
|
||||
import java.nio.channels.ClosedByInterruptException;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY;
|
||||
|
||||
/**
|
||||
* tests that the proxy can be interrupted
|
||||
*/
|
||||
public class TestRPCWaitForProxy extends TestRpcBase {
|
||||
public class TestRPCWaitForProxy extends Assert {
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
private static final Logger
|
||||
LOG = LoggerFactory.getLogger(TestRPCWaitForProxy.class);
|
||||
|
||||
|
@ -46,15 +46,14 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
|||
*
|
||||
* @throws Throwable any exception other than that which was expected
|
||||
*/
|
||||
@Test(timeout = 50000)
|
||||
@Test(timeout = 10000)
|
||||
public void testWaitForProxy() throws Throwable {
|
||||
RpcThread worker = new RpcThread(0);
|
||||
worker.start();
|
||||
worker.join();
|
||||
Throwable caught = worker.getCaught();
|
||||
Throwable cause = caught.getCause();
|
||||
Assert.assertNotNull("No exception was raised", cause);
|
||||
if (!(cause instanceof ConnectException)) {
|
||||
assertNotNull("No exception was raised", caught);
|
||||
if (!(caught instanceof ConnectException)) {
|
||||
throw caught;
|
||||
}
|
||||
}
|
||||
|
@ -70,11 +69,11 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
|||
RpcThread worker = new RpcThread(100);
|
||||
worker.start();
|
||||
Thread.sleep(1000);
|
||||
Assert.assertTrue("worker hasn't started", worker.waitStarted);
|
||||
assertTrue("worker hasn't started", worker.waitStarted);
|
||||
worker.interrupt();
|
||||
worker.join();
|
||||
Throwable caught = worker.getCaught();
|
||||
Assert.assertNotNull("No exception was raised", caught);
|
||||
assertNotNull("No exception was raised", caught);
|
||||
// looking for the root cause here, which can be wrapped
|
||||
// as part of the NetUtils work. Having this test look
|
||||
// a the type of exception there would be brittle to improvements
|
||||
|
@ -83,8 +82,6 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
|||
if (cause == null) {
|
||||
// no inner cause, use outer exception as root cause.
|
||||
cause = caught;
|
||||
} else if (cause.getCause() != null) {
|
||||
cause = cause.getCause();
|
||||
}
|
||||
if (!(cause instanceof InterruptedIOException)
|
||||
&& !(cause instanceof ClosedByInterruptException)) {
|
||||
|
@ -115,16 +112,12 @@ public class TestRPCWaitForProxy extends TestRpcBase {
|
|||
IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
|
||||
connectRetries);
|
||||
waitStarted = true;
|
||||
|
||||
short invalidPort = 20;
|
||||
InetSocketAddress invalidAddress = new InetSocketAddress(ADDRESS,
|
||||
invalidPort);
|
||||
TestRpcBase.TestRpcService proxy = RPC.getProxy(
|
||||
TestRpcBase.TestRpcService.class,
|
||||
1L, invalidAddress, conf);
|
||||
// Test echo method
|
||||
proxy.echo(null, newEchoRequest("hello"));
|
||||
|
||||
TestProtocol proxy = RPC.waitForProxy(TestProtocol.class,
|
||||
TestProtocol.versionID,
|
||||
new InetSocketAddress(ADDRESS, 20),
|
||||
config,
|
||||
15000L);
|
||||
proxy.echo("");
|
||||
} catch (Throwable throwable) {
|
||||
caught = throwable;
|
||||
}
|
||||
|
|
|
@ -112,8 +112,7 @@ public class TestRpcBase {
|
|||
return setupTestServer(builder);
|
||||
}
|
||||
|
||||
protected static RPC.Server setupTestServer(
|
||||
RPC.Builder builder) throws IOException {
|
||||
protected static RPC.Server setupTestServer(RPC.Builder builder) throws IOException {
|
||||
RPC.Server server = builder.build();
|
||||
|
||||
server.start();
|
||||
|
@ -176,21 +175,17 @@ public class TestRpcBase {
|
|||
public TestTokenIdentifier() {
|
||||
this(new Text(), new Text());
|
||||
}
|
||||
|
||||
public TestTokenIdentifier(Text tokenid) {
|
||||
this(tokenid, new Text());
|
||||
}
|
||||
|
||||
public TestTokenIdentifier(Text tokenid, Text realUser) {
|
||||
this.tokenid = tokenid == null ? new Text() : tokenid;
|
||||
this.realUser = realUser == null ? new Text() : realUser;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Text getKind() {
|
||||
return KIND_NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public UserGroupInformation getUser() {
|
||||
if (realUser.toString().isEmpty()) {
|
||||
|
@ -208,7 +203,6 @@ public class TestRpcBase {
|
|||
tokenid.readFields(in);
|
||||
realUser.readFields(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(DataOutput out) throws IOException {
|
||||
tokenid.write(out);
|
||||
|
@ -240,7 +234,7 @@ public class TestRpcBase {
|
|||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public Token<TestTokenIdentifier> selectToken(Text service,
|
||||
Collection<Token<? extends TokenIdentifier>> tokens) {
|
||||
Collection<Token<? extends TokenIdentifier>> tokens) {
|
||||
if (service == null) {
|
||||
return null;
|
||||
}
|
||||
|
@ -394,17 +388,19 @@ public class TestRpcBase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public TestProtos.UserResponseProto getAuthUser(
|
||||
public TestProtos.AuthUserResponseProto getAuthUser(
|
||||
RpcController controller, TestProtos.EmptyRequestProto request)
|
||||
throws ServiceException {
|
||||
UserGroupInformation authUser;
|
||||
UserGroupInformation authUser = null;
|
||||
try {
|
||||
authUser = UserGroupInformation.getCurrentUser();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
|
||||
return newUserResponse(authUser.getUserName());
|
||||
return TestProtos.AuthUserResponseProto.newBuilder()
|
||||
.setAuthUser(authUser.getUserName())
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -436,34 +432,6 @@ public class TestRpcBase {
|
|||
|
||||
return TestProtos.EmptyResponseProto.newBuilder().build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TestProtos.UserResponseProto getCurrentUser(
|
||||
RpcController controller,
|
||||
TestProtos.EmptyRequestProto request) throws ServiceException {
|
||||
String user;
|
||||
try {
|
||||
user = UserGroupInformation.getCurrentUser().toString();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException("Failed to get current user", e);
|
||||
}
|
||||
|
||||
return newUserResponse(user);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TestProtos.UserResponseProto getServerRemoteUser(
|
||||
RpcController controller,
|
||||
TestProtos.EmptyRequestProto request) throws ServiceException {
|
||||
String serverRemoteUser = Server.getRemoteUser().toString();
|
||||
return newUserResponse(serverRemoteUser);
|
||||
}
|
||||
|
||||
private TestProtos.UserResponseProto newUserResponse(String user) {
|
||||
return TestProtos.UserResponseProto.newBuilder()
|
||||
.setUser(user)
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
|
||||
|
@ -510,4 +478,8 @@ public class TestRpcBase {
|
|||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
protected static String convert(TestProtos.AuthUserResponseProto response) {
|
||||
return response.getAuthUser();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,55 +45,30 @@ import org.junit.runner.RunWith;
|
|||
import org.junit.runners.Parameterized;
|
||||
import org.junit.runners.Parameterized.Parameters;
|
||||
|
||||
import javax.security.auth.callback.Callback;
|
||||
import javax.security.auth.callback.CallbackHandler;
|
||||
import javax.security.auth.callback.NameCallback;
|
||||
import javax.security.auth.callback.PasswordCallback;
|
||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
||||
import javax.security.sasl.AuthorizeCallback;
|
||||
import javax.security.sasl.Sasl;
|
||||
import javax.security.sasl.SaslClient;
|
||||
import javax.security.sasl.SaslException;
|
||||
import javax.security.sasl.SaslServer;
|
||||
import javax.security.auth.callback.*;
|
||||
import javax.security.sasl.*;
|
||||
import java.io.IOException;
|
||||
import java.lang.annotation.Annotation;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.security.Security;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
|
||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
|
||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/** Unit tests for using Sasl over RPC. */
|
||||
@RunWith(Parameterized.class)
|
||||
public class TestSaslRPC extends TestRpcBase {
|
||||
@Parameters
|
||||
public static Collection<Object[]> data() {
|
||||
Collection<Object[]> params = new ArrayList<>();
|
||||
Collection<Object[]> params = new ArrayList<Object[]>();
|
||||
for (QualityOfProtection qop : QualityOfProtection.values()) {
|
||||
params.add(new Object[]{ new QualityOfProtection[]{qop},qop, null });
|
||||
}
|
||||
|
@ -139,7 +114,7 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
NONE(),
|
||||
VALID(),
|
||||
INVALID(),
|
||||
OTHER()
|
||||
OTHER();
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
|
@ -255,7 +230,7 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
final Server server = setupTestServer(conf, 5, sm);
|
||||
doDigestRpc(server, sm);
|
||||
} finally {
|
||||
SecurityUtil.setSecurityInfoProviders();
|
||||
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -284,7 +259,7 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
addr = NetUtils.getConnectAddress(server);
|
||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||
.getUserName()));
|
||||
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
|
||||
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
|
||||
SecurityUtil.setTokenService(token, addr);
|
||||
current.addToken(token);
|
||||
|
||||
|
@ -321,8 +296,8 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
|
||||
// set doPing to true
|
||||
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
|
||||
ConnectionId remoteId = ConnectionId.getConnectionId(
|
||||
new InetSocketAddress(0), TestRpcService.class, null, 0, null, newConf);
|
||||
ConnectionId remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
|
||||
TestRpcService.class, null, 0, null, newConf);
|
||||
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
|
||||
remoteId.getPingInterval());
|
||||
// set doPing to false
|
||||
|
@ -831,13 +806,13 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
final TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
boolean useSecretManager = (serverAuth != SIMPLE);
|
||||
if (enableSecretManager != null) {
|
||||
useSecretManager &= enableSecretManager;
|
||||
useSecretManager &= enableSecretManager.booleanValue();
|
||||
}
|
||||
if (forceSecretManager != null) {
|
||||
useSecretManager |= forceSecretManager;
|
||||
useSecretManager |= forceSecretManager.booleanValue();
|
||||
}
|
||||
final SecretManager<?> serverSm = useSecretManager ? sm : null;
|
||||
|
||||
|
||||
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
|
||||
@Override
|
||||
public Server run() throws IOException {
|
||||
|
@ -892,13 +867,13 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
proxy.ping(null, newEmptyRequest());
|
||||
// make sure the other side thinks we are who we said we are!!!
|
||||
assertEquals(clientUgi.getUserName(),
|
||||
proxy.getAuthUser(null, newEmptyRequest()).getUser());
|
||||
convert(proxy.getAuthUser(null, newEmptyRequest())));
|
||||
AuthMethod authMethod =
|
||||
convert(proxy.getAuthMethod(null, newEmptyRequest()));
|
||||
// verify sasl completed with correct QOP
|
||||
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
|
||||
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
||||
return authMethod != null ? authMethod.toString() : null;
|
||||
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
||||
return authMethod.toString();
|
||||
} catch (ServiceException se) {
|
||||
if (se.getCause() instanceof RemoteException) {
|
||||
throw (RemoteException) se.getCause();
|
||||
|
@ -923,18 +898,21 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
String actual) {
|
||||
assertEquals(expect.toString(), actual);
|
||||
}
|
||||
|
||||
private static void assertAuthEquals(Pattern expect, String actual) {
|
||||
|
||||
private static void assertAuthEquals(Pattern expect,
|
||||
String actual) {
|
||||
// this allows us to see the regexp and the value it didn't match
|
||||
if (!expect.matcher(actual).matches()) {
|
||||
fail(); // it failed
|
||||
assertEquals(expect, actual); // it failed
|
||||
} else {
|
||||
assertTrue(true); // it matched
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Class used to test overriding QOP values using SaslPropertiesResolver
|
||||
*/
|
||||
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver {
|
||||
static class AuthSaslPropertiesResolver extends SaslPropertiesResolver{
|
||||
|
||||
@Override
|
||||
public Map<String, String> getServerProperties(InetAddress address) {
|
||||
|
@ -943,7 +921,7 @@ public class TestSaslRPC extends TestRpcBase {
|
|||
return newPropertes;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
System.out.println("Testing Kerberos authentication over RPC");
|
||||
if (args.length != 2) {
|
||||
|
|
|
@ -17,35 +17,40 @@
|
|||
*/
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import com.google.protobuf.ServiceException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.TestRpcBase;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Enumeration;
|
||||
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.ProtocolSignature;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.VersionedProtocol;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenInfo;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSecretManager;
|
||||
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
|
||||
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSelector;
|
||||
import org.apache.commons.logging.*;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
|
||||
/**
|
||||
* Test do as effective user.
|
||||
*
|
||||
*/
|
||||
public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||
public class TestDoAsEffectiveUser {
|
||||
final private static String REAL_USER_NAME = "realUser1@HADOOP.APACHE.ORG";
|
||||
final private static String REAL_USER_SHORT_NAME = "realUser1";
|
||||
final private static String PROXY_USER_NAME = "proxyUser";
|
||||
|
@ -53,8 +58,8 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
final private static String GROUP2_NAME = "group2";
|
||||
final private static String[] GROUP_NAMES = new String[] { GROUP1_NAME,
|
||||
GROUP2_NAME };
|
||||
|
||||
private TestRpcService client;
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
private TestProtocol proxy;
|
||||
private static final Configuration masterConf = new Configuration();
|
||||
|
||||
|
||||
|
@ -77,7 +82,7 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
|
||||
private void configureSuperUserIPAddresses(Configuration conf,
|
||||
String superUserShortName) throws IOException {
|
||||
ArrayList<String> ipList = new ArrayList<>();
|
||||
ArrayList<String> ipList = new ArrayList<String>();
|
||||
Enumeration<NetworkInterface> netInterfaceList = NetworkInterface
|
||||
.getNetworkInterfaces();
|
||||
while (netInterfaceList.hasMoreElements()) {
|
||||
|
@ -125,19 +130,50 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
curUGI.toString());
|
||||
}
|
||||
|
||||
private void checkRemoteUgi(final UserGroupInformation ugi,
|
||||
final Configuration conf) throws Exception {
|
||||
@TokenInfo(TestTokenSelector.class)
|
||||
public interface TestProtocol extends VersionedProtocol {
|
||||
public static final long versionID = 1L;
|
||||
|
||||
String aMethod() throws IOException;
|
||||
String getServerRemoteUser() throws IOException;
|
||||
}
|
||||
|
||||
public class TestImpl implements TestProtocol {
|
||||
|
||||
@Override
|
||||
public String aMethod() throws IOException {
|
||||
return UserGroupInformation.getCurrentUser().toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getServerRemoteUser() throws IOException {
|
||||
return Server.getRemoteUser().toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getProtocolVersion(String protocol, long clientVersion)
|
||||
throws IOException {
|
||||
return TestProtocol.versionID;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProtocolSignature getProtocolSignature(String protocol,
|
||||
long clientVersion, int clientMethodsHash) throws IOException {
|
||||
return new ProtocolSignature(TestProtocol.versionID, null);
|
||||
}
|
||||
}
|
||||
|
||||
private void checkRemoteUgi(final Server server,
|
||||
final UserGroupInformation ugi, final Configuration conf)
|
||||
throws Exception {
|
||||
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws ServiceException {
|
||||
client = getClient(addr, conf);
|
||||
String currentUser = client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
String serverRemoteUser = client.getServerRemoteUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
|
||||
Assert.assertEquals(ugi.toString(), currentUser);
|
||||
Assert.assertEquals(ugi.toString(), serverRemoteUser);
|
||||
public Void run() throws IOException {
|
||||
proxy = RPC.getProxy(
|
||||
TestProtocol.class, TestProtocol.versionID,
|
||||
NetUtils.getConnectAddress(server), conf);
|
||||
Assert.assertEquals(ugi.toString(), proxy.aMethod());
|
||||
Assert.assertEquals(ugi.toString(), proxy.getServerRemoteUser());
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
@ -149,27 +185,29 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
||||
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5);
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(5).setVerbose(true).build();
|
||||
|
||||
refreshConf(conf);
|
||||
try {
|
||||
server.start();
|
||||
|
||||
UserGroupInformation realUserUgi = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
checkRemoteUgi(realUserUgi, conf);
|
||||
checkRemoteUgi(server, realUserUgi, conf);
|
||||
|
||||
UserGroupInformation proxyUserUgi =
|
||||
UserGroupInformation.createProxyUserForTesting(
|
||||
UserGroupInformation proxyUserUgi = UserGroupInformation.createProxyUserForTesting(
|
||||
PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
|
||||
checkRemoteUgi(proxyUserUgi, conf);
|
||||
checkRemoteUgi(server, proxyUserUgi, conf);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
Assert.fail();
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,25 +218,29 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||
"group1");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5);
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
|
||||
refreshConf(conf);
|
||||
try {
|
||||
server.start();
|
||||
|
||||
UserGroupInformation realUserUgi = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
checkRemoteUgi(realUserUgi, conf);
|
||||
checkRemoteUgi(server, realUserUgi, conf);
|
||||
|
||||
UserGroupInformation proxyUserUgi = UserGroupInformation
|
||||
.createProxyUserForTesting(PROXY_USER_NAME, realUserUgi, GROUP_NAMES);
|
||||
checkRemoteUgi(proxyUserUgi, conf);
|
||||
checkRemoteUgi(server, proxyUserUgi, conf);
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
Assert.fail();
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -214,14 +256,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||
"group1");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5);
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
|
||||
refreshConf(conf);
|
||||
|
||||
try {
|
||||
server.start();
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
UserGroupInformation realUserUgi = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
|
||||
|
@ -230,10 +275,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
String retVal = proxyUserUgi
|
||||
.doAs(new PrivilegedExceptionAction<String>() {
|
||||
@Override
|
||||
public String run() throws ServiceException {
|
||||
client = getClient(addr, conf);
|
||||
return client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
public String run() throws IOException {
|
||||
proxy = RPC.getProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, addr, conf);
|
||||
String ret = proxy.aMethod();
|
||||
return ret;
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -241,7 +287,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -250,14 +299,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
final Configuration conf = new Configuration();
|
||||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME), "group1");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 2);
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
|
||||
refreshConf(conf);
|
||||
|
||||
try {
|
||||
server.start();
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
UserGroupInformation realUserUgi = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
|
||||
|
@ -266,10 +318,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
String retVal = proxyUserUgi
|
||||
.doAs(new PrivilegedExceptionAction<String>() {
|
||||
@Override
|
||||
public String run() throws ServiceException {
|
||||
client = getClient(addr, conf);
|
||||
return client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
public String run() throws IOException {
|
||||
proxy = RPC.getProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, addr, conf);
|
||||
String ret = proxy.aMethod();
|
||||
return ret;
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -277,7 +330,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -285,12 +341,15 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
public void testRealUserGroupNotSpecified() throws IOException {
|
||||
final Configuration conf = new Configuration();
|
||||
configureSuperUserIPAddresses(conf, REAL_USER_SHORT_NAME);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 2);
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
|
||||
try {
|
||||
server.start();
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
UserGroupInformation realUserUgi = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
|
||||
|
@ -299,10 +358,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
String retVal = proxyUserUgi
|
||||
.doAs(new PrivilegedExceptionAction<String>() {
|
||||
@Override
|
||||
public String run() throws ServiceException {
|
||||
client = getClient(addr, conf);
|
||||
return client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
public String run() throws IOException {
|
||||
proxy = (TestProtocol) RPC.getProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, addr, conf);
|
||||
String ret = proxy.aMethod();
|
||||
return ret;
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -310,7 +370,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -321,14 +384,17 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
conf.setStrings(DefaultImpersonationProvider.getTestProvider().
|
||||
getProxySuperuserGroupConfKey(REAL_USER_SHORT_NAME),
|
||||
"group3");
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 2);
|
||||
Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(2).setVerbose(false).build();
|
||||
|
||||
refreshConf(conf);
|
||||
|
||||
try {
|
||||
server.start();
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
|
||||
UserGroupInformation realUserUgi = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
|
||||
|
@ -337,10 +403,11 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
String retVal = proxyUserUgi
|
||||
.doAs(new PrivilegedExceptionAction<String>() {
|
||||
@Override
|
||||
public String run() throws ServiceException {
|
||||
client = getClient(addr, conf);
|
||||
return client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
public String run() throws IOException {
|
||||
proxy = RPC.getProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, addr, conf);
|
||||
String ret = proxy.aMethod();
|
||||
return ret;
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -348,7 +415,10 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -362,17 +432,20 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
final Configuration conf = new Configuration(masterConf);
|
||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
final Server server = setupTestServer(conf, 5, sm);
|
||||
final Server server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestImpl()).setBindAddress(ADDRESS).setPort(0)
|
||||
.setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
|
||||
|
||||
server.start();
|
||||
|
||||
final UserGroupInformation current = UserGroupInformation
|
||||
.createRemoteUser(REAL_USER_NAME);
|
||||
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||
.getUserName()), new Text("SomeSuperUser"));
|
||||
Token<TestTokenIdentifier> token = new Token<>(tokenId,
|
||||
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
|
||||
sm);
|
||||
SecurityUtil.setTokenService(token, addr);
|
||||
UserGroupInformation proxyUserUgi = UserGroupInformation
|
||||
|
@ -380,19 +453,23 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
proxyUserUgi.addToken(token);
|
||||
|
||||
refreshConf(conf);
|
||||
|
||||
|
||||
String retVal = proxyUserUgi.doAs(new PrivilegedExceptionAction<String>() {
|
||||
@Override
|
||||
public String run() throws Exception {
|
||||
try {
|
||||
client = getClient(addr, conf);
|
||||
return client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
proxy = RPC.getProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, addr, conf);
|
||||
String ret = proxy.aMethod();
|
||||
return ret;
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
throw e;
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -409,34 +486,42 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
final Configuration newConf = new Configuration(masterConf);
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(newConf, TestRpcService.class,
|
||||
ProtobufRpcEngine.class);
|
||||
UserGroupInformation.setConfiguration(newConf);
|
||||
final Server server = setupTestServer(newConf, 5, sm);
|
||||
final Server server = new RPC.Builder(newConf)
|
||||
.setProtocol(TestProtocol.class).setInstance(new TestImpl())
|
||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
||||
.setSecretManager(sm).build();
|
||||
|
||||
server.start();
|
||||
|
||||
final UserGroupInformation current = UserGroupInformation
|
||||
.createUserForTesting(REAL_USER_NAME, GROUP_NAMES);
|
||||
|
||||
refreshConf(newConf);
|
||||
|
||||
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||
.getUserName()), new Text("SomeSuperUser"));
|
||||
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
|
||||
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
|
||||
sm);
|
||||
SecurityUtil.setTokenService(token, addr);
|
||||
current.addToken(token);
|
||||
String retVal = current.doAs(new PrivilegedExceptionAction<String>() {
|
||||
@Override
|
||||
public String run() throws Exception {
|
||||
try {
|
||||
client = getClient(addr, newConf);
|
||||
return client.getCurrentUser(null,
|
||||
newEmptyRequest()).getUser();
|
||||
proxy = RPC.getProxy(TestProtocol.class,
|
||||
TestProtocol.versionID, addr, newConf);
|
||||
String ret = proxy.aMethod();
|
||||
return ret;
|
||||
} catch (Exception e) {
|
||||
e.printStackTrace();
|
||||
throw e;
|
||||
} finally {
|
||||
stop(server, client);
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
|
|
@ -20,7 +20,6 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
|
@ -29,14 +28,11 @@ import org.apache.hadoop.security.token.Token;
|
|||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import org.junit.*;
|
||||
|
||||
import javax.security.auth.Subject;
|
||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||
import javax.security.auth.kerberos.KeyTab;
|
||||
import javax.security.auth.login.AppConfigurationEntry;
|
||||
import javax.security.auth.login.LoginContext;
|
||||
|
||||
|
@ -54,22 +50,9 @@ import java.util.Set;
|
|||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.getDoubleGauge;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNotNull;
|
||||
import static org.junit.Assert.assertNotSame;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertSame;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.apache.hadoop.ipc.TestSaslRPC.*;
|
||||
import static org.apache.hadoop.test.MetricsAsserts.*;
|
||||
import static org.junit.Assert.*;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
|
@ -126,7 +109,7 @@ public class TestUserGroupInformation {
|
|||
UserGroupInformation.setLoginUser(null);
|
||||
}
|
||||
|
||||
@Test(timeout = 30000)
|
||||
@Test (timeout = 30000)
|
||||
public void testSimpleLogin() throws IOException {
|
||||
tryLoginAuthenticationMethod(AuthenticationMethod.SIMPLE, true);
|
||||
}
|
||||
|
@ -1030,4 +1013,27 @@ public class TestUserGroupInformation {
|
|||
assertTrue(credsugiTokens.contains(token1));
|
||||
assertTrue(credsugiTokens.contains(token2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckTGTAfterLoginFromSubject() throws Exception {
|
||||
// security on, default is remove default realm
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
||||
// Login from a pre-set subject with a keytab
|
||||
final Subject subject = new Subject();
|
||||
KeyTab keytab = KeyTab.getInstance();
|
||||
subject.getPrivateCredentials().add(keytab);
|
||||
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
|
||||
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws IOException {
|
||||
UserGroupInformation.loginUserFromSubject(subject);
|
||||
// this should not throw.
|
||||
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
|
||||
return null;
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -91,7 +91,6 @@ public class TestNodeHealthScriptRunner {
|
|||
public void testNodeHealthScript() throws Exception {
|
||||
String errorScript = "echo ERROR\n echo \"Tracker not healthy\"";
|
||||
String normalScript = "echo \"I am all fine\"";
|
||||
String failWithExitCodeScript = "echo \"Not healthy\"; exit -1";
|
||||
String timeOutScript =
|
||||
Shell.WINDOWS ? "@echo off\nping -n 4 127.0.0.1 >nul\necho \"I am fine\""
|
||||
: "sleep 4\necho \"I am fine\"";
|
||||
|
@ -125,12 +124,6 @@ public class TestNodeHealthScriptRunner {
|
|||
nodeHealthScriptRunner.isHealthy());
|
||||
Assert.assertEquals("", nodeHealthScriptRunner.getHealthReport());
|
||||
|
||||
// Script which fails with exit code.
|
||||
writeNodeHealthScriptFile(failWithExitCodeScript, true);
|
||||
timerTask.run();
|
||||
Assert.assertFalse("Node health status reported healthy",
|
||||
nodeHealthScriptRunner.isHealthy());
|
||||
|
||||
// Timeout script.
|
||||
writeNodeHealthScriptFile(timeOutScript, true);
|
||||
timerTask.run();
|
||||
|
|
|
@ -88,6 +88,6 @@ message AuthMethodResponseProto {
|
|||
required string mechanismName = 2;
|
||||
}
|
||||
|
||||
message UserResponseProto {
|
||||
required string user = 1;
|
||||
message AuthUserResponseProto {
|
||||
required string authUser = 1;
|
||||
}
|
|
@ -40,11 +40,9 @@ service TestProtobufRpcProto {
|
|||
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
|
||||
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
|
||||
rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
|
||||
rpc getAuthUser(EmptyRequestProto) returns (UserResponseProto);
|
||||
rpc getAuthUser(EmptyRequestProto) returns (AuthUserResponseProto);
|
||||
rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
|
||||
rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
|
||||
rpc getCurrentUser(EmptyRequestProto) returns (UserResponseProto);
|
||||
rpc getServerRemoteUser(EmptyRequestProto) returns (UserResponseProto);
|
||||
}
|
||||
|
||||
service TestProtobufRpc2Proto {
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_subcommand_opts (daemonization false)" {
|
||||
HADOOP_OPTS="1"
|
||||
HADOOP_CLIENT_OPTS="2"
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="false"
|
||||
hadoop_add_client_opts
|
||||
[ "${HADOOP_OPTS}" = "1 2" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (daemonization true)" {
|
||||
HADOOP_OPTS="1"
|
||||
HADOOP_CLIENT_OPTS="2"
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
hadoop_add_client_opts
|
||||
[ "${HADOOP_OPTS}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (daemonization empty)" {
|
||||
HADOOP_OPTS="1"
|
||||
HADOOP_CLIENT_OPTS="2"
|
||||
unset HADOOP_SUBCMD_SUPPORTDAEMONIZATION
|
||||
hadoop_add_client_opts
|
||||
[ "${HADOOP_OPTS}" = "1 2" ]
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_subcommand_opts (missing param)" {
|
||||
HADOOP_OPTS="x"
|
||||
run hadoop_subcommand_opts testvar
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (simple not exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
hadoop_subcommand_opts hadoop subcommand
|
||||
[ "${HADOOP_OPTS}" = "x" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (hadoop simple exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
HADOOP_TEST_OPTS="y"
|
||||
hadoop_subcommand_opts hadoop test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (hadoop complex exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
HADOOP_TEST_OPTS="y z"
|
||||
hadoop_subcommand_opts hadoop test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y z" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (hdfs simple exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
HDFS_TEST_OPTS="y"
|
||||
hadoop_subcommand_opts hdfs test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (yarn simple exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
YARN_TEST_OPTS="y"
|
||||
hadoop_subcommand_opts yarn test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_opts (deprecation case)" {
|
||||
HADOOP_OPTS="x"
|
||||
HADOOP_NAMENODE_OPTS="y"
|
||||
hadoop_subcommand_opts hdfs namenode
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y" ]
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_subcommand_secure_opts (missing param)" {
|
||||
HADOOP_OPTS="x"
|
||||
run hadoop_subcommand_secure_opts testvar
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_secure_opts (simple not exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
hadoop_subcommand_secure_opts hadoop subcommand
|
||||
[ "${HADOOP_OPTS}" = "x" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_secure_opts (hadoop simple exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
HADOOP_TEST_SECURE_EXTRA_OPTS="y"
|
||||
hadoop_subcommand_secure_opts hadoop test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_secure_opts (hadoop complex exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
HADOOP_TEST_SECURE_EXTRA_OPTS="y z"
|
||||
hadoop_subcommand_secure_opts hadoop test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y z" ]
|
||||
}
|
||||
|
||||
@test "hadoop_subcommand_secure_opts (hdfs simple exist)" {
|
||||
HADOOP_OPTS="x"
|
||||
HDFS_TEST_SECURE_EXTRA_OPTS="y"
|
||||
hadoop_subcommand_secure_opts hdfs test
|
||||
echo "${HADOOP_OPTS}"
|
||||
[ "${HADOOP_OPTS}" = "x y" ]
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load hadoop-functions_test_helper
|
||||
|
||||
@test "hadoop_verify_user (hadoop: no setting)" {
|
||||
run hadoop_verify_user hadoop test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (yarn: no setting)" {
|
||||
run hadoop_verify_user yarn test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (hadoop: allow)" {
|
||||
HADOOP_TEST_USER=${USER}
|
||||
run hadoop_verify_user hadoop test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (yarn: allow)" {
|
||||
YARN_TEST_USER=${USER}
|
||||
run hadoop_verify_user yarn test
|
||||
[ "${status}" = "0" ]
|
||||
}
|
||||
|
||||
# colon isn't a valid username, so let's use it
|
||||
# this should fail regardless of who the user is
|
||||
# that is running the test code
|
||||
@test "hadoop_verify_user (hadoop: disallow)" {
|
||||
HADOOP_TEST_USER=:
|
||||
run hadoop_verify_user hadoop test
|
||||
[ "${status}" = "1" ]
|
||||
}
|
||||
|
||||
@test "hadoop_verify_user (yarn: disallow)" {
|
||||
YARN_TEST_USER=:
|
||||
run hadoop_verify_user yarn test
|
||||
[ "${status}" = "1" ]
|
||||
}
|
|
@ -533,7 +533,8 @@ public class DFSInputStream extends FSInputStream
|
|||
* Open a DataInputStream to a DataNode so that it can be read from.
|
||||
* We get block ID and the IDs of the destinations at startup, from the namenode.
|
||||
*/
|
||||
private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
|
||||
private synchronized DatanodeInfo blockSeekTo(long target)
|
||||
throws IOException {
|
||||
if (target >= getFileLength()) {
|
||||
throw new IOException("Attempted to read past end of file");
|
||||
}
|
||||
|
@ -962,14 +963,14 @@ public class DFSInputStream extends FSInputStream
|
|||
}
|
||||
|
||||
protected void fetchBlockByteRange(LocatedBlock block, long start, long end,
|
||||
byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
|
||||
ByteBuffer buf, CorruptedBlocks corruptedBlocks)
|
||||
throws IOException {
|
||||
block = refreshLocatedBlock(block);
|
||||
while (true) {
|
||||
DNAddrPair addressPair = chooseDataNode(block, null);
|
||||
try {
|
||||
actualGetFromOneDataNode(addressPair, block, start, end,
|
||||
buf, offset, corruptedBlocks);
|
||||
buf, corruptedBlocks);
|
||||
return;
|
||||
} catch (IOException e) {
|
||||
checkInterrupted(e); // check if the read has been interrupted
|
||||
|
@ -988,12 +989,10 @@ public class DFSInputStream extends FSInputStream
|
|||
return new Callable<ByteBuffer>() {
|
||||
@Override
|
||||
public ByteBuffer call() throws Exception {
|
||||
byte[] buf = bb.array();
|
||||
int offset = bb.position();
|
||||
try (TraceScope ignored = dfsClient.getTracer().
|
||||
newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
|
||||
actualGetFromOneDataNode(datanode, block, start, end, buf,
|
||||
offset, corruptedBlocks);
|
||||
actualGetFromOneDataNode(datanode, block, start, end, bb,
|
||||
corruptedBlocks);
|
||||
return bb;
|
||||
}
|
||||
}
|
||||
|
@ -1007,13 +1006,12 @@ public class DFSInputStream extends FSInputStream
|
|||
* @param block the located block containing the requested data
|
||||
* @param startInBlk the startInBlk offset of the block
|
||||
* @param endInBlk the endInBlk offset of the block
|
||||
* @param buf the given byte array into which the data is read
|
||||
* @param offset the offset in buf
|
||||
* @param buf the given byte buffer into which the data is read
|
||||
* @param corruptedBlocks map recording list of datanodes with corrupted
|
||||
* block replica
|
||||
*/
|
||||
void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block,
|
||||
final long startInBlk, final long endInBlk, byte[] buf, int offset,
|
||||
final long startInBlk, final long endInBlk, ByteBuffer buf,
|
||||
CorruptedBlocks corruptedBlocks)
|
||||
throws IOException {
|
||||
DFSClientFaultInjector.get().startFetchFromDatanode();
|
||||
|
@ -1031,7 +1029,22 @@ public class DFSInputStream extends FSInputStream
|
|||
DFSClientFaultInjector.get().fetchFromDatanodeException();
|
||||
reader = getBlockReader(block, startInBlk, len, datanode.addr,
|
||||
datanode.storageType, datanode.info);
|
||||
int nread = reader.readAll(buf, offset, len);
|
||||
|
||||
//Behave exactly as the readAll() call
|
||||
ByteBuffer tmp = buf.duplicate();
|
||||
tmp.limit(tmp.position() + len);
|
||||
tmp = tmp.slice();
|
||||
int nread = 0;
|
||||
int ret;
|
||||
while (true) {
|
||||
ret = reader.read(tmp);
|
||||
if (ret <= 0) {
|
||||
break;
|
||||
}
|
||||
nread += ret;
|
||||
}
|
||||
buf.position(buf.position() + nread);
|
||||
|
||||
IOUtilsClient.updateReadStatistics(readStatistics, nread, reader);
|
||||
dfsClient.updateFileSystemReadStats(
|
||||
reader.getNetworkDistance(), nread);
|
||||
|
@ -1098,7 +1111,7 @@ public class DFSInputStream extends FSInputStream
|
|||
* time. We then wait on which ever read returns first.
|
||||
*/
|
||||
private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
|
||||
long end, byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
|
||||
long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
|
||||
throws IOException {
|
||||
final DfsClientConf conf = dfsClient.getConf();
|
||||
ArrayList<Future<ByteBuffer>> futures = new ArrayList<>();
|
||||
|
@ -1130,8 +1143,8 @@ public class DFSInputStream extends FSInputStream
|
|||
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
|
||||
if (future != null) {
|
||||
ByteBuffer result = future.get();
|
||||
System.arraycopy(result.array(), result.position(), buf, offset,
|
||||
len);
|
||||
result.flip();
|
||||
buf.put(result);
|
||||
return;
|
||||
}
|
||||
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
|
||||
|
@ -1173,8 +1186,8 @@ public class DFSInputStream extends FSInputStream
|
|||
// cancel the rest.
|
||||
cancelAll(futures);
|
||||
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
|
||||
System.arraycopy(result.array(), result.position(), buf, offset,
|
||||
len);
|
||||
result.flip();
|
||||
buf.put(result);
|
||||
return;
|
||||
} catch (InterruptedException ie) {
|
||||
// Ignore and retry
|
||||
|
@ -1244,7 +1257,8 @@ public class DFSInputStream extends FSInputStream
|
|||
* access key from its memory since it's considered expired based on
|
||||
* the estimated expiration date.
|
||||
*/
|
||||
if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
|
||||
if (ex instanceof InvalidBlockTokenException ||
|
||||
ex instanceof InvalidToken) {
|
||||
DFSClient.LOG.info("Access token was invalid when connecting to "
|
||||
+ targetAddr + " : " + ex);
|
||||
return true;
|
||||
|
@ -1272,7 +1286,8 @@ public class DFSInputStream extends FSInputStream
|
|||
try (TraceScope scope = dfsClient.
|
||||
newReaderTraceScope("DFSInputStream#byteArrayPread",
|
||||
src, position, length)) {
|
||||
int retLen = pread(position, buffer, offset, length);
|
||||
ByteBuffer bb = ByteBuffer.wrap(buffer, offset, length);
|
||||
int retLen = pread(position, bb);
|
||||
if (retLen < length) {
|
||||
dfsClient.addRetLenToReaderScope(scope, retLen);
|
||||
}
|
||||
|
@ -1280,7 +1295,7 @@ public class DFSInputStream extends FSInputStream
|
|||
}
|
||||
}
|
||||
|
||||
private int pread(long position, byte[] buffer, int offset, int length)
|
||||
private int pread(long position, ByteBuffer buffer)
|
||||
throws IOException {
|
||||
// sanity checks
|
||||
dfsClient.checkOpen();
|
||||
|
@ -1292,6 +1307,7 @@ public class DFSInputStream extends FSInputStream
|
|||
if ((position < 0) || (position >= filelen)) {
|
||||
return -1;
|
||||
}
|
||||
int length = buffer.remaining();
|
||||
int realLen = length;
|
||||
if ((position + length) > filelen) {
|
||||
realLen = (int)(filelen - position);
|
||||
|
@ -1304,14 +1320,16 @@ public class DFSInputStream extends FSInputStream
|
|||
CorruptedBlocks corruptedBlocks = new CorruptedBlocks();
|
||||
for (LocatedBlock blk : blockRange) {
|
||||
long targetStart = position - blk.getStartOffset();
|
||||
long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart);
|
||||
int bytesToRead = (int) Math.min(remaining,
|
||||
blk.getBlockSize() - targetStart);
|
||||
long targetEnd = targetStart + bytesToRead - 1;
|
||||
try {
|
||||
if (dfsClient.isHedgedReadsEnabled() && !blk.isStriped()) {
|
||||
hedgedFetchBlockByteRange(blk, targetStart,
|
||||
targetStart + bytesToRead - 1, buffer, offset, corruptedBlocks);
|
||||
targetEnd, buffer, corruptedBlocks);
|
||||
} else {
|
||||
fetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - 1,
|
||||
buffer, offset, corruptedBlocks);
|
||||
fetchBlockByteRange(blk, targetStart, targetEnd,
|
||||
buffer, corruptedBlocks);
|
||||
}
|
||||
} finally {
|
||||
// Check and report if any block replicas are corrupted.
|
||||
|
@ -1323,7 +1341,6 @@ public class DFSInputStream extends FSInputStream
|
|||
|
||||
remaining -= bytesToRead;
|
||||
position += bytesToRead;
|
||||
offset += bytesToRead;
|
||||
}
|
||||
assert remaining == 0 : "Wrong number of bytes read.";
|
||||
return realLen;
|
||||
|
@ -1457,7 +1474,8 @@ public class DFSInputStream extends FSInputStream
|
|||
* If another node could not be found, then returns false.
|
||||
*/
|
||||
@Override
|
||||
public synchronized boolean seekToNewSource(long targetPos) throws IOException {
|
||||
public synchronized boolean seekToNewSource(long targetPos)
|
||||
throws IOException {
|
||||
if (currentNode == null) {
|
||||
return seekToBlockSource(targetPos);
|
||||
}
|
||||
|
|
|
@ -307,8 +307,8 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
stripeLimit - stripeBufOffset);
|
||||
|
||||
LocatedStripedBlock blockGroup = (LocatedStripedBlock) currentLocatedBlock;
|
||||
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy, cellSize,
|
||||
blockGroup, offsetInBlockGroup,
|
||||
AlignedStripe[] stripes = StripedBlockUtil.divideOneStripe(ecPolicy,
|
||||
cellSize, blockGroup, offsetInBlockGroup,
|
||||
offsetInBlockGroup + stripeRange.length - 1, curStripeBuf);
|
||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||
blockGroup, cellSize, dataBlkNum, parityBlkNum);
|
||||
|
@ -523,13 +523,13 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
*/
|
||||
@Override
|
||||
protected void fetchBlockByteRange(LocatedBlock block, long start,
|
||||
long end, byte[] buf, int offset, CorruptedBlocks corruptedBlocks)
|
||||
long end, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
|
||||
throws IOException {
|
||||
// Refresh the striped block group
|
||||
LocatedStripedBlock blockGroup = getBlockGroupAt(block.getStartOffset());
|
||||
|
||||
AlignedStripe[] stripes = StripedBlockUtil.divideByteRangeIntoStripes(
|
||||
ecPolicy, cellSize, blockGroup, start, end, buf, offset);
|
||||
ecPolicy, cellSize, blockGroup, start, end, buf);
|
||||
CompletionService<Void> readService = new ExecutorCompletionService<>(
|
||||
dfsClient.getStripedReadsThreadPool());
|
||||
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(
|
||||
|
@ -542,6 +542,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
blks, preaderInfos, corruptedBlocks);
|
||||
preader.readStripe();
|
||||
}
|
||||
buf.position(buf.position() + (int)(end - start + 1));
|
||||
} finally {
|
||||
for (BlockReaderInfo preaderInfo : preaderInfos) {
|
||||
closeReader(preaderInfo);
|
||||
|
@ -698,16 +699,15 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
}
|
||||
|
||||
private ByteBufferStrategy[] getReadStrategies(StripingChunk chunk) {
|
||||
if (chunk.byteBuffer != null) {
|
||||
ByteBufferStrategy strategy =
|
||||
new ByteBufferStrategy(chunk.byteBuffer, readStatistics, dfsClient);
|
||||
if (chunk.useByteBuffer()) {
|
||||
ByteBufferStrategy strategy = new ByteBufferStrategy(
|
||||
chunk.getByteBuffer(), readStatistics, dfsClient);
|
||||
return new ByteBufferStrategy[]{strategy};
|
||||
} else {
|
||||
ByteBufferStrategy[] strategies =
|
||||
new ByteBufferStrategy[chunk.byteArray.getOffsets().length];
|
||||
new ByteBufferStrategy[chunk.getChunkBuffer().getSlices().size()];
|
||||
for (int i = 0; i < strategies.length; i++) {
|
||||
ByteBuffer buffer = ByteBuffer.wrap(chunk.byteArray.buf(),
|
||||
chunk.byteArray.getOffsets()[i], chunk.byteArray.getLengths()[i]);
|
||||
ByteBuffer buffer = chunk.getChunkBuffer().getSlice(i);
|
||||
strategies[i] =
|
||||
new ByteBufferStrategy(buffer, readStatistics, dfsClient);
|
||||
}
|
||||
|
@ -814,7 +814,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
}
|
||||
|
||||
class PositionStripeReader extends StripeReader {
|
||||
private byte[][] decodeInputs = null;
|
||||
private ByteBuffer[] decodeInputs = null;
|
||||
|
||||
PositionStripeReader(CompletionService<Void> service,
|
||||
AlignedStripe alignedStripe, LocatedBlock[] targetBlocks,
|
||||
|
@ -836,8 +836,6 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
Preconditions.checkState(index >= dataBlkNum &&
|
||||
alignedStripe.chunks[index] == null);
|
||||
alignedStripe.chunks[index] = new StripingChunk(decodeInputs[index]);
|
||||
alignedStripe.chunks[index].addByteArraySlice(0,
|
||||
(int) alignedStripe.getSpanInBlock());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,8 @@ import java.util.concurrent.TimeUnit;
|
|||
@InterfaceAudience.Private
|
||||
public class StripedBlockUtil {
|
||||
|
||||
public static final Logger LOG = LoggerFactory.getLogger(StripedBlockUtil.class);
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(StripedBlockUtil.class);
|
||||
|
||||
/**
|
||||
* Parses a striped block group into individual blocks.
|
||||
|
@ -312,16 +313,17 @@ public class StripedBlockUtil {
|
|||
* schedule a new fetch request with the decoding input buffer as transfer
|
||||
* destination.
|
||||
*/
|
||||
public static byte[][] initDecodeInputs(AlignedStripe alignedStripe,
|
||||
public static ByteBuffer[] initDecodeInputs(AlignedStripe alignedStripe,
|
||||
int dataBlkNum, int parityBlkNum) {
|
||||
byte[][] decodeInputs =
|
||||
new byte[dataBlkNum + parityBlkNum][(int) alignedStripe.getSpanInBlock()];
|
||||
ByteBuffer[] decodeInputs = new ByteBuffer[dataBlkNum + parityBlkNum];
|
||||
for (int i = 0; i < decodeInputs.length; i++) {
|
||||
decodeInputs[i] = ByteBuffer.allocate(
|
||||
(int) alignedStripe.getSpanInBlock());
|
||||
}
|
||||
// read the full data aligned stripe
|
||||
for (int i = 0; i < dataBlkNum; i++) {
|
||||
if (alignedStripe.chunks[i] == null) {
|
||||
alignedStripe.chunks[i] = new StripingChunk(decodeInputs[i]);
|
||||
alignedStripe.chunks[i].addByteArraySlice(0,
|
||||
(int) alignedStripe.getSpanInBlock());
|
||||
}
|
||||
}
|
||||
return decodeInputs;
|
||||
|
@ -334,14 +336,21 @@ public class StripedBlockUtil {
|
|||
* When all pending requests have returned, this method should be called to
|
||||
* finalize decode input buffers.
|
||||
*/
|
||||
public static void finalizeDecodeInputs(final byte[][] decodeInputs,
|
||||
public static void finalizeDecodeInputs(final ByteBuffer[] decodeInputs,
|
||||
AlignedStripe alignedStripe) {
|
||||
for (int i = 0; i < alignedStripe.chunks.length; i++) {
|
||||
final StripingChunk chunk = alignedStripe.chunks[i];
|
||||
if (chunk != null && chunk.state == StripingChunk.FETCHED) {
|
||||
chunk.copyTo(decodeInputs[i]);
|
||||
if (chunk.useChunkBuffer()) {
|
||||
chunk.getChunkBuffer().copyTo(decodeInputs[i]);
|
||||
} else {
|
||||
chunk.getByteBuffer().flip();
|
||||
}
|
||||
} else if (chunk != null && chunk.state == StripingChunk.ALLZERO) {
|
||||
Arrays.fill(decodeInputs[i], (byte) 0);
|
||||
//ZERO it. Will be better handled in other following issue.
|
||||
byte[] emptyBytes = new byte[decodeInputs[i].limit()];
|
||||
decodeInputs[i].put(emptyBytes);
|
||||
decodeInputs[i].flip();
|
||||
} else {
|
||||
decodeInputs[i] = null;
|
||||
}
|
||||
|
@ -351,7 +360,7 @@ public class StripedBlockUtil {
|
|||
/**
|
||||
* Decode based on the given input buffers and erasure coding policy.
|
||||
*/
|
||||
public static void decodeAndFillBuffer(final byte[][] decodeInputs,
|
||||
public static void decodeAndFillBuffer(final ByteBuffer[] decodeInputs,
|
||||
AlignedStripe alignedStripe, int dataBlkNum, int parityBlkNum,
|
||||
RawErasureDecoder decoder) {
|
||||
// Step 1: prepare indices and output buffers for missing data units
|
||||
|
@ -364,8 +373,11 @@ public class StripedBlockUtil {
|
|||
}
|
||||
}
|
||||
decodeIndices = Arrays.copyOf(decodeIndices, pos);
|
||||
byte[][] decodeOutputs =
|
||||
new byte[decodeIndices.length][(int) alignedStripe.getSpanInBlock()];
|
||||
ByteBuffer[] decodeOutputs = new ByteBuffer[decodeIndices.length];
|
||||
for (int i = 0; i < decodeOutputs.length; i++) {
|
||||
decodeOutputs[i] = ByteBuffer.allocate(
|
||||
(int) alignedStripe.getSpanInBlock());
|
||||
}
|
||||
|
||||
// Step 2: decode into prepared output buffers
|
||||
decoder.decode(decodeInputs, decodeIndices, decodeOutputs);
|
||||
|
@ -374,8 +386,8 @@ public class StripedBlockUtil {
|
|||
for (int i = 0; i < decodeIndices.length; i++) {
|
||||
int missingBlkIdx = decodeIndices[i];
|
||||
StripingChunk chunk = alignedStripe.chunks[missingBlkIdx];
|
||||
if (chunk.state == StripingChunk.MISSING) {
|
||||
chunk.copyFrom(decodeOutputs[i]);
|
||||
if (chunk.state == StripingChunk.MISSING && chunk.useChunkBuffer()) {
|
||||
chunk.getChunkBuffer().copyFrom(decodeOutputs[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -402,7 +414,8 @@ public class StripedBlockUtil {
|
|||
|
||||
// Step 4: calculate each chunk's position in destination buffer. Since the
|
||||
// whole read range is within a single stripe, the logic is simpler here.
|
||||
int bufOffset = (int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
|
||||
int bufOffset =
|
||||
(int) (rangeStartInBlockGroup % ((long) cellSize * dataBlkNum));
|
||||
for (StripingCell cell : cells) {
|
||||
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
|
||||
long cellEnd = cellStart + cell.size - 1;
|
||||
|
@ -437,15 +450,14 @@ public class StripedBlockUtil {
|
|||
* @param rangeStartInBlockGroup The byte range's start offset in block group
|
||||
* @param rangeEndInBlockGroup The byte range's end offset in block group
|
||||
* @param buf Destination buffer of the read operation for the byte range
|
||||
* @param offsetInBuf Start offset into the destination buffer
|
||||
*
|
||||
* At most 5 stripes will be generated from each logical range, as
|
||||
* demonstrated in the header of {@link AlignedStripe}.
|
||||
*/
|
||||
public static AlignedStripe[] divideByteRangeIntoStripes(ErasureCodingPolicy ecPolicy,
|
||||
public static AlignedStripe[] divideByteRangeIntoStripes(
|
||||
ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, LocatedStripedBlock blockGroup,
|
||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup, byte[] buf,
|
||||
int offsetInBuf) {
|
||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup, ByteBuffer buf) {
|
||||
|
||||
// Step 0: analyze range and calculate basic parameters
|
||||
final int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
|
@ -462,7 +474,7 @@ public class StripedBlockUtil {
|
|||
AlignedStripe[] stripes = mergeRangesForInternalBlocks(ecPolicy, ranges);
|
||||
|
||||
// Step 4: calculate each chunk's position in destination buffer
|
||||
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf, offsetInBuf);
|
||||
calcualteChunkPositionsInBuf(cellSize, stripes, cells, buf);
|
||||
|
||||
// Step 5: prepare ALLZERO blocks
|
||||
prepareAllZeroChunks(blockGroup, stripes, cellSize, dataBlkNum);
|
||||
|
@ -476,7 +488,8 @@ public class StripedBlockUtil {
|
|||
* used by {@link DFSStripedOutputStream} in encoding
|
||||
*/
|
||||
@VisibleForTesting
|
||||
private static StripingCell[] getStripingCellsOfByteRange(ErasureCodingPolicy ecPolicy,
|
||||
private static StripingCell[] getStripingCellsOfByteRange(
|
||||
ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, LocatedStripedBlock blockGroup,
|
||||
long rangeStartInBlockGroup, long rangeEndInBlockGroup) {
|
||||
Preconditions.checkArgument(
|
||||
|
@ -511,7 +524,8 @@ public class StripedBlockUtil {
|
|||
* the physical byte range (inclusive) on each stored internal block.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
private static VerticalRange[] getRangesForInternalBlocks(ErasureCodingPolicy ecPolicy,
|
||||
private static VerticalRange[] getRangesForInternalBlocks(
|
||||
ErasureCodingPolicy ecPolicy,
|
||||
int cellSize, StripingCell[] cells) {
|
||||
int dataBlkNum = ecPolicy.getNumDataUnits();
|
||||
int parityBlkNum = ecPolicy.getNumParityUnits();
|
||||
|
@ -575,8 +589,7 @@ public class StripedBlockUtil {
|
|||
}
|
||||
|
||||
private static void calcualteChunkPositionsInBuf(int cellSize,
|
||||
AlignedStripe[] stripes, StripingCell[] cells, byte[] buf,
|
||||
int offsetInBuf) {
|
||||
AlignedStripe[] stripes, StripingCell[] cells, ByteBuffer buf) {
|
||||
/**
|
||||
* | <--------------- AlignedStripe --------------->|
|
||||
*
|
||||
|
@ -598,6 +611,7 @@ public class StripedBlockUtil {
|
|||
for (StripingCell cell : cells) {
|
||||
long cellStart = cell.idxInInternalBlk * cellSize + cell.offset;
|
||||
long cellEnd = cellStart + cell.size - 1;
|
||||
StripingChunk chunk;
|
||||
for (AlignedStripe s : stripes) {
|
||||
long stripeEnd = s.getOffsetInBlock() + s.getSpanInBlock() - 1;
|
||||
long overlapStart = Math.max(cellStart, s.getOffsetInBlock());
|
||||
|
@ -606,11 +620,13 @@ public class StripedBlockUtil {
|
|||
if (overLapLen <= 0) {
|
||||
continue;
|
||||
}
|
||||
if (s.chunks[cell.idxInStripe] == null) {
|
||||
s.chunks[cell.idxInStripe] = new StripingChunk(buf);
|
||||
chunk = s.chunks[cell.idxInStripe];
|
||||
if (chunk == null) {
|
||||
chunk = new StripingChunk();
|
||||
s.chunks[cell.idxInStripe] = chunk;
|
||||
}
|
||||
s.chunks[cell.idxInStripe].addByteArraySlice(
|
||||
(int)(offsetInBuf + done + overlapStart - cellStart), overLapLen);
|
||||
chunk.getChunkBuffer().addSlice(buf,
|
||||
(int) (done + overlapStart - cellStart), overLapLen);
|
||||
}
|
||||
done += cell.size;
|
||||
}
|
||||
|
@ -833,88 +849,89 @@ public class StripedBlockUtil {
|
|||
*/
|
||||
public int state = REQUESTED;
|
||||
|
||||
public final ChunkByteArray byteArray;
|
||||
public final ByteBuffer byteBuffer;
|
||||
private final ChunkByteBuffer chunkBuffer;
|
||||
private final ByteBuffer byteBuffer;
|
||||
|
||||
public StripingChunk(byte[] buf) {
|
||||
this.byteArray = new ChunkByteArray(buf);
|
||||
public StripingChunk() {
|
||||
this.chunkBuffer = new ChunkByteBuffer();
|
||||
byteBuffer = null;
|
||||
}
|
||||
|
||||
public StripingChunk(ByteBuffer buf) {
|
||||
this.byteArray = null;
|
||||
this.chunkBuffer = null;
|
||||
this.byteBuffer = buf;
|
||||
}
|
||||
|
||||
public StripingChunk(int state) {
|
||||
this.byteArray = null;
|
||||
this.chunkBuffer = null;
|
||||
this.byteBuffer = null;
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
public void addByteArraySlice(int offset, int length) {
|
||||
assert byteArray != null;
|
||||
byteArray.offsetsInBuf.add(offset);
|
||||
byteArray.lengthsInBuf.add(length);
|
||||
public boolean useByteBuffer(){
|
||||
return byteBuffer != null;
|
||||
}
|
||||
|
||||
void copyTo(byte[] target) {
|
||||
assert byteArray != null;
|
||||
byteArray.copyTo(target);
|
||||
public boolean useChunkBuffer() {
|
||||
return chunkBuffer != null;
|
||||
}
|
||||
|
||||
void copyFrom(byte[] src) {
|
||||
assert byteArray != null;
|
||||
byteArray.copyFrom(src);
|
||||
public ByteBuffer getByteBuffer() {
|
||||
assert byteBuffer != null;
|
||||
return byteBuffer;
|
||||
}
|
||||
|
||||
public ChunkByteBuffer getChunkBuffer() {
|
||||
assert chunkBuffer != null;
|
||||
return chunkBuffer;
|
||||
}
|
||||
}
|
||||
|
||||
public static class ChunkByteArray {
|
||||
private final byte[] buf;
|
||||
private final List<Integer> offsetsInBuf;
|
||||
private final List<Integer> lengthsInBuf;
|
||||
/**
|
||||
* A utility to manage ByteBuffer slices for a reader.
|
||||
*/
|
||||
public static class ChunkByteBuffer {
|
||||
private final List<ByteBuffer> slices;
|
||||
|
||||
ChunkByteArray(byte[] buf) {
|
||||
this.buf = buf;
|
||||
this.offsetsInBuf = new ArrayList<>();
|
||||
this.lengthsInBuf = new ArrayList<>();
|
||||
ChunkByteBuffer() {
|
||||
this.slices = new ArrayList<>();
|
||||
}
|
||||
|
||||
public int[] getOffsets() {
|
||||
int[] offsets = new int[offsetsInBuf.size()];
|
||||
for (int i = 0; i < offsets.length; i++) {
|
||||
offsets[i] = offsetsInBuf.get(i);
|
||||
public void addSlice(ByteBuffer buffer, int offset, int len) {
|
||||
ByteBuffer tmp = buffer.duplicate();
|
||||
tmp.position(buffer.position() + offset);
|
||||
tmp.limit(buffer.position() + offset + len);
|
||||
slices.add(tmp.slice());
|
||||
}
|
||||
|
||||
public ByteBuffer getSlice(int i) {
|
||||
return slices.get(i);
|
||||
}
|
||||
|
||||
public List<ByteBuffer> getSlices() {
|
||||
return slices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Note: target will be ready-to-read state after the call.
|
||||
*/
|
||||
void copyTo(ByteBuffer target) {
|
||||
for (ByteBuffer slice : slices) {
|
||||
slice.flip();
|
||||
target.put(slice);
|
||||
}
|
||||
return offsets;
|
||||
target.flip();
|
||||
}
|
||||
|
||||
public int[] getLengths() {
|
||||
int[] lens = new int[this.lengthsInBuf.size()];
|
||||
for (int i = 0; i < lens.length; i++) {
|
||||
lens[i] = this.lengthsInBuf.get(i);
|
||||
}
|
||||
return lens;
|
||||
}
|
||||
|
||||
public byte[] buf() {
|
||||
return buf;
|
||||
}
|
||||
|
||||
void copyTo(byte[] target) {
|
||||
int posInBuf = 0;
|
||||
for (int i = 0; i < offsetsInBuf.size(); i++) {
|
||||
System.arraycopy(buf, offsetsInBuf.get(i),
|
||||
target, posInBuf, lengthsInBuf.get(i));
|
||||
posInBuf += lengthsInBuf.get(i);
|
||||
}
|
||||
}
|
||||
|
||||
void copyFrom(byte[] src) {
|
||||
int srcPos = 0;
|
||||
for (int j = 0; j < offsetsInBuf.size(); j++) {
|
||||
System.arraycopy(src, srcPos, buf, offsetsInBuf.get(j),
|
||||
lengthsInBuf.get(j));
|
||||
srcPos += lengthsInBuf.get(j);
|
||||
void copyFrom(ByteBuffer src) {
|
||||
ByteBuffer tmp;
|
||||
int len;
|
||||
for (ByteBuffer slice : slices) {
|
||||
len = slice.remaining();
|
||||
tmp = src.duplicate();
|
||||
tmp.limit(tmp.position() + len);
|
||||
slice.put(tmp);
|
||||
src.position(src.position() + len);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -183,6 +183,7 @@ public class URLConnectionFactory {
|
|||
return openConnection(url, false);
|
||||
} catch (AuthenticationException e) {
|
||||
// Unreachable
|
||||
LOG.error("Open connection {} failed", url, e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
|||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
|
||||
import org.apache.hadoop.lib.wsrs.EnumSetParam;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
@ -181,6 +182,8 @@ public class HttpFSFileSystem extends FileSystem
|
|||
public static final String ACL_ENTRIES_JSON = "entries";
|
||||
public static final String ACL_BIT_JSON = "aclBit";
|
||||
|
||||
public static final String ENC_BIT_JSON = "encBit";
|
||||
|
||||
public static final int HTTP_TEMPORARY_REDIRECT = 307;
|
||||
|
||||
private static final String HTTP_GET = "GET";
|
||||
|
@ -955,6 +958,21 @@ public class HttpFSFileSystem extends FileSystem
|
|||
return createAclStatus(json);
|
||||
}
|
||||
|
||||
/** Convert a string to a FsPermission object. */
|
||||
static FsPermission toFsPermission(JSONObject json) {
|
||||
final String s = (String) json.get(PERMISSION_JSON);
|
||||
final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON);
|
||||
final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON);
|
||||
FsPermission perm = new FsPermission(Short.parseShort(s, 8));
|
||||
final boolean aBit = (aclBit != null) ? aclBit : false;
|
||||
final boolean eBit = (encBit != null) ? encBit : false;
|
||||
if (aBit || eBit) {
|
||||
return new FsPermissionExtension(perm, aBit, eBit);
|
||||
} else {
|
||||
return perm;
|
||||
}
|
||||
}
|
||||
|
||||
private FileStatus createFileStatus(Path parent, JSONObject json) {
|
||||
String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
|
||||
Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
|
||||
|
@ -962,8 +980,7 @@ public class HttpFSFileSystem extends FileSystem
|
|||
long len = (Long) json.get(LENGTH_JSON);
|
||||
String owner = (String) json.get(OWNER_JSON);
|
||||
String group = (String) json.get(GROUP_JSON);
|
||||
FsPermission permission =
|
||||
new FsPermission(Short.parseShort((String) json.get(PERMISSION_JSON), 8));
|
||||
final FsPermission permission = toFsPermission(json);
|
||||
long aTime = (Long) json.get(ACCESS_TIME_JSON);
|
||||
long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
|
||||
long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
|
|||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.protocol.AclException;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.lib.service.FileSystemAccess;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
@ -54,148 +53,59 @@ import java.util.Map.Entry;
|
|||
public class FSOperations {
|
||||
|
||||
/**
|
||||
* This class is used to group a FileStatus and an AclStatus together.
|
||||
* It's needed for the GETFILESTATUS and LISTSTATUS calls, which take
|
||||
* most info from the FileStatus and a wee bit from the AclStatus.
|
||||
* @param fileStatus a FileStatus object
|
||||
* @return JSON map suitable for wire transport
|
||||
*/
|
||||
private static class StatusPair {
|
||||
private FileStatus fileStatus;
|
||||
private AclStatus aclStatus;
|
||||
|
||||
/**
|
||||
* Simple constructor
|
||||
* @param fileStatus Existing FileStatus object
|
||||
* @param aclStatus Existing AclStatus object
|
||||
*/
|
||||
public StatusPair(FileStatus fileStatus, AclStatus aclStatus) {
|
||||
this.fileStatus = fileStatus;
|
||||
this.aclStatus = aclStatus;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create one StatusPair by performing the underlying calls to
|
||||
* fs.getFileStatus and fs.getAclStatus
|
||||
* @param fs The FileSystem where 'path' lives
|
||||
* @param path The file/directory to query
|
||||
* @throws IOException
|
||||
*/
|
||||
public StatusPair(FileSystem fs, Path path) throws IOException {
|
||||
fileStatus = fs.getFileStatus(path);
|
||||
aclStatus = null;
|
||||
try {
|
||||
aclStatus = fs.getAclStatus(path);
|
||||
} catch (AclException e) {
|
||||
/*
|
||||
* The cause is almost certainly an "ACLS aren't enabled"
|
||||
* exception, so leave aclStatus at null and carry on.
|
||||
*/
|
||||
} catch (UnsupportedOperationException e) {
|
||||
/* Ditto above - this is the case for a local file system */
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a Map suitable for conversion into JSON format
|
||||
* @return The JSONish Map
|
||||
*/
|
||||
public Map<String,Object> toJson() {
|
||||
Map<String,Object> json = new LinkedHashMap<String,Object>();
|
||||
json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(true));
|
||||
return json;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return in inner part of the JSON for the status - used by both the
|
||||
* GETFILESTATUS and LISTSTATUS calls.
|
||||
* @param emptyPathSuffix Whether or not to include PATH_SUFFIX_JSON
|
||||
* @return The JSONish Map
|
||||
*/
|
||||
public Map<String,Object> toJsonInner(boolean emptyPathSuffix) {
|
||||
Map<String,Object> json = new LinkedHashMap<String,Object>();
|
||||
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
|
||||
(emptyPathSuffix) ? "" : fileStatus.getPath().getName());
|
||||
json.put(HttpFSFileSystem.TYPE_JSON,
|
||||
HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
|
||||
json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
|
||||
json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
|
||||
json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
|
||||
json.put(HttpFSFileSystem.PERMISSION_JSON,
|
||||
HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
|
||||
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
|
||||
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
|
||||
fileStatus.getModificationTime());
|
||||
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
|
||||
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
|
||||
if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
|
||||
json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
|
||||
}
|
||||
return json;
|
||||
}
|
||||
private static Map<String, Object> toJson(FileStatus fileStatus) {
|
||||
Map<String, Object> json = new LinkedHashMap<>();
|
||||
json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(fileStatus, true));
|
||||
return json;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple class used to contain and operate upon a list of StatusPair
|
||||
* objects. Used by LISTSTATUS.
|
||||
* @param fileStatuses list of FileStatus objects
|
||||
* @return JSON map suitable for wire transport
|
||||
*/
|
||||
private static class StatusPairs {
|
||||
private StatusPair[] statusPairs;
|
||||
|
||||
/**
|
||||
* Construct a list of StatusPair objects
|
||||
* @param fs The FileSystem where 'path' lives
|
||||
* @param path The directory to query
|
||||
* @param filter A possible filter for entries in the directory
|
||||
* @throws IOException
|
||||
*/
|
||||
public StatusPairs(FileSystem fs, Path path, PathFilter filter)
|
||||
throws IOException {
|
||||
/* Grab all the file statuses at once in an array */
|
||||
FileStatus[] fileStatuses = fs.listStatus(path, filter);
|
||||
|
||||
/* We'll have an array of StatusPairs of the same length */
|
||||
AclStatus aclStatus = null;
|
||||
statusPairs = new StatusPair[fileStatuses.length];
|
||||
|
||||
/*
|
||||
* For each FileStatus, attempt to acquire an AclStatus. If the
|
||||
* getAclStatus throws an exception, we assume that ACLs are turned
|
||||
* off entirely and abandon the attempt.
|
||||
*/
|
||||
boolean useAcls = true; // Assume ACLs work until proven otherwise
|
||||
for (int i = 0; i < fileStatuses.length; i++) {
|
||||
if (useAcls) {
|
||||
try {
|
||||
aclStatus = fs.getAclStatus(fileStatuses[i].getPath());
|
||||
} catch (AclException e) {
|
||||
/* Almost certainly due to an "ACLs not enabled" exception */
|
||||
aclStatus = null;
|
||||
useAcls = false;
|
||||
} catch (UnsupportedOperationException e) {
|
||||
/* Ditto above - this is the case for a local file system */
|
||||
aclStatus = null;
|
||||
useAcls = false;
|
||||
}
|
||||
}
|
||||
statusPairs[i] = new StatusPair(fileStatuses[i], aclStatus);
|
||||
}
|
||||
@SuppressWarnings({"unchecked"})
|
||||
private static Map<String, Object> toJson(FileStatus[] fileStatuses) {
|
||||
Map<String, Object> json = new LinkedHashMap<>();
|
||||
Map<String, Object> inner = new LinkedHashMap<>();
|
||||
JSONArray statuses = new JSONArray();
|
||||
for (FileStatus f : fileStatuses) {
|
||||
statuses.add(toJsonInner(f, false));
|
||||
}
|
||||
inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
|
||||
json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
|
||||
return json;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a Map suitable for conversion into JSON.
|
||||
* @return A JSONish Map
|
||||
*/
|
||||
@SuppressWarnings({"unchecked"})
|
||||
public Map<String,Object> toJson() {
|
||||
Map<String,Object> json = new LinkedHashMap<String,Object>();
|
||||
Map<String,Object> inner = new LinkedHashMap<String,Object>();
|
||||
JSONArray statuses = new JSONArray();
|
||||
for (StatusPair s : statusPairs) {
|
||||
statuses.add(s.toJsonInner(false));
|
||||
}
|
||||
inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
|
||||
json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
|
||||
return json;
|
||||
/**
|
||||
* Not meant to be called directly except by the other toJson functions.
|
||||
*/
|
||||
private static Map<String, Object> toJsonInner(FileStatus fileStatus,
|
||||
boolean emptyPathSuffix) {
|
||||
Map<String, Object> json = new LinkedHashMap<String, Object>();
|
||||
json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
|
||||
(emptyPathSuffix) ? "" : fileStatus.getPath().getName());
|
||||
json.put(HttpFSFileSystem.TYPE_JSON,
|
||||
HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
|
||||
json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
|
||||
json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
|
||||
json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
|
||||
json.put(HttpFSFileSystem.PERMISSION_JSON,
|
||||
HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
|
||||
json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
|
||||
json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
|
||||
fileStatus.getModificationTime());
|
||||
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
|
||||
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
|
||||
if (fileStatus.getPermission().getAclBit()) {
|
||||
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
|
||||
}
|
||||
if (fileStatus.getPermission().getEncryptedBit()) {
|
||||
json.put(HttpFSFileSystem.ENC_BIT_JSON, true);
|
||||
}
|
||||
return json;
|
||||
}
|
||||
|
||||
/** Converts an <code>AclStatus</code> object into a JSON object.
|
||||
|
@ -637,8 +547,8 @@ public class FSOperations {
|
|||
*/
|
||||
@Override
|
||||
public Map execute(FileSystem fs) throws IOException {
|
||||
StatusPair sp = new StatusPair(fs, path);
|
||||
return sp.toJson();
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
return toJson(status);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -703,8 +613,8 @@ public class FSOperations {
|
|||
*/
|
||||
@Override
|
||||
public Map execute(FileSystem fs) throws IOException {
|
||||
StatusPairs sp = new StatusPairs(fs, path, filter);
|
||||
return sp.toJson();
|
||||
FileStatus[] fileStatuses = fs.listStatus(path, filter);
|
||||
return toJson(fileStatuses);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.test.HadoopUsersConfTestHelper;
|
|||
import org.apache.hadoop.test.TestDir;
|
||||
import org.apache.hadoop.test.TestDirHelper;
|
||||
import org.apache.hadoop.test.TestHdfs;
|
||||
import org.apache.hadoop.test.TestHdfsHelper;
|
||||
import org.apache.hadoop.test.TestJetty;
|
||||
import org.apache.hadoop.test.TestJettyHelper;
|
||||
import org.junit.Assert;
|
||||
|
@ -66,6 +67,11 @@ import java.util.Collection;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@RunWith(value = Parameterized.class)
|
||||
public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
||||
|
||||
|
@ -81,9 +87,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
|
||||
private void createHttpFSServer() throws Exception {
|
||||
File homeDir = TestDirHelper.getTestDir();
|
||||
Assert.assertTrue(new File(homeDir, "conf").mkdir());
|
||||
Assert.assertTrue(new File(homeDir, "log").mkdir());
|
||||
Assert.assertTrue(new File(homeDir, "temp").mkdir());
|
||||
assertTrue(new File(homeDir, "conf").mkdir());
|
||||
assertTrue(new File(homeDir, "log").mkdir());
|
||||
assertTrue(new File(homeDir, "temp").mkdir());
|
||||
HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
|
||||
|
||||
File secretFile = new File(new File(homeDir, "conf"), "secret");
|
||||
|
@ -143,7 +149,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
Assert.assertNotNull(fs);
|
||||
URI uri = new URI(getScheme() + "://" +
|
||||
TestJettyHelper.getJettyURL().toURI().getAuthority());
|
||||
Assert.assertEquals(fs.getUri(), uri);
|
||||
assertEquals(fs.getUri(), uri);
|
||||
fs.close();
|
||||
}
|
||||
|
||||
|
@ -156,7 +162,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.close();
|
||||
fs = getHttpFSFileSystem();
|
||||
InputStream is = fs.open(new Path(path.toUri().getPath()));
|
||||
Assert.assertEquals(is.read(), 1);
|
||||
assertEquals(is.read(), 1);
|
||||
is.close();
|
||||
fs.close();
|
||||
}
|
||||
|
@ -173,12 +179,12 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = FileSystem.get(getProxiedFSConf());
|
||||
FileStatus status = fs.getFileStatus(path);
|
||||
if (!isLocalFS()) {
|
||||
Assert.assertEquals(status.getReplication(), 2);
|
||||
Assert.assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
|
||||
assertEquals(status.getReplication(), 2);
|
||||
assertEquals(status.getBlockSize(), 100 * 1024 * 1024);
|
||||
}
|
||||
Assert.assertEquals(status.getPermission(), permission);
|
||||
assertEquals(status.getPermission(), permission);
|
||||
InputStream is = fs.open(path);
|
||||
Assert.assertEquals(is.read(), 1);
|
||||
assertEquals(is.read(), 1);
|
||||
is.close();
|
||||
fs.close();
|
||||
}
|
||||
|
@ -216,9 +222,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.close();
|
||||
fs = FileSystem.get(getProxiedFSConf());
|
||||
InputStream is = fs.open(path);
|
||||
Assert.assertEquals(is.read(), 1);
|
||||
Assert.assertEquals(is.read(), 2);
|
||||
Assert.assertEquals(is.read(), -1);
|
||||
assertEquals(is.read(), 1);
|
||||
assertEquals(is.read(), 2);
|
||||
assertEquals(is.read(), -1);
|
||||
is.close();
|
||||
fs.close();
|
||||
}
|
||||
|
@ -239,10 +245,10 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
final int newLength = blockSize;
|
||||
|
||||
boolean isReady = fs.truncate(file, newLength);
|
||||
Assert.assertTrue("Recovery is not expected.", isReady);
|
||||
assertTrue("Recovery is not expected.", isReady);
|
||||
|
||||
FileStatus fileStatus = fs.getFileStatus(file);
|
||||
Assert.assertEquals(fileStatus.getLen(), newLength);
|
||||
assertEquals(fileStatus.getLen(), newLength);
|
||||
AppendTestUtil.checkFullFile(fs, file, newLength, data, file.toString());
|
||||
|
||||
fs.close();
|
||||
|
@ -266,9 +272,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.concat(path1, new Path[]{path2, path3});
|
||||
fs.close();
|
||||
fs = FileSystem.get(config);
|
||||
Assert.assertTrue(fs.exists(path1));
|
||||
Assert.assertFalse(fs.exists(path2));
|
||||
Assert.assertFalse(fs.exists(path3));
|
||||
assertTrue(fs.exists(path1));
|
||||
assertFalse(fs.exists(path2));
|
||||
assertFalse(fs.exists(path3));
|
||||
fs.close();
|
||||
}
|
||||
}
|
||||
|
@ -284,8 +290,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.rename(oldPath, newPath);
|
||||
fs.close();
|
||||
fs = FileSystem.get(getProxiedFSConf());
|
||||
Assert.assertFalse(fs.exists(oldPath));
|
||||
Assert.assertTrue(fs.exists(newPath));
|
||||
assertFalse(fs.exists(oldPath));
|
||||
assertTrue(fs.exists(newPath));
|
||||
fs.close();
|
||||
}
|
||||
|
||||
|
@ -299,8 +305,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.mkdirs(foe);
|
||||
|
||||
FileSystem hoopFs = getHttpFSFileSystem();
|
||||
Assert.assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
|
||||
Assert.assertFalse(fs.exists(foo));
|
||||
assertTrue(hoopFs.delete(new Path(foo.toUri().getPath()), false));
|
||||
assertFalse(fs.exists(foo));
|
||||
try {
|
||||
hoopFs.delete(new Path(bar.toUri().getPath()), false);
|
||||
Assert.fail();
|
||||
|
@ -308,13 +314,13 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
} catch (Exception ex) {
|
||||
Assert.fail();
|
||||
}
|
||||
Assert.assertTrue(fs.exists(bar));
|
||||
Assert.assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
|
||||
Assert.assertFalse(fs.exists(bar));
|
||||
assertTrue(fs.exists(bar));
|
||||
assertTrue(hoopFs.delete(new Path(bar.toUri().getPath()), true));
|
||||
assertFalse(fs.exists(bar));
|
||||
|
||||
Assert.assertTrue(fs.exists(foe));
|
||||
Assert.assertTrue(hoopFs.delete(foe, true));
|
||||
Assert.assertFalse(fs.exists(foe));
|
||||
assertTrue(fs.exists(foe));
|
||||
assertTrue(hoopFs.delete(foe, true));
|
||||
assertFalse(fs.exists(foe));
|
||||
|
||||
hoopFs.close();
|
||||
fs.close();
|
||||
|
@ -333,19 +339,20 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
FileStatus status2 = fs.getFileStatus(new Path(path.toUri().getPath()));
|
||||
fs.close();
|
||||
|
||||
Assert.assertEquals(status2.getPermission(), status1.getPermission());
|
||||
Assert.assertEquals(status2.getPath().toUri().getPath(), status1.getPath().toUri().getPath());
|
||||
Assert.assertEquals(status2.getReplication(), status1.getReplication());
|
||||
Assert.assertEquals(status2.getBlockSize(), status1.getBlockSize());
|
||||
Assert.assertEquals(status2.getAccessTime(), status1.getAccessTime());
|
||||
Assert.assertEquals(status2.getModificationTime(), status1.getModificationTime());
|
||||
Assert.assertEquals(status2.getOwner(), status1.getOwner());
|
||||
Assert.assertEquals(status2.getGroup(), status1.getGroup());
|
||||
Assert.assertEquals(status2.getLen(), status1.getLen());
|
||||
assertEquals(status2.getPermission(), status1.getPermission());
|
||||
assertEquals(status2.getPath().toUri().getPath(),
|
||||
status1.getPath().toUri().getPath());
|
||||
assertEquals(status2.getReplication(), status1.getReplication());
|
||||
assertEquals(status2.getBlockSize(), status1.getBlockSize());
|
||||
assertEquals(status2.getAccessTime(), status1.getAccessTime());
|
||||
assertEquals(status2.getModificationTime(), status1.getModificationTime());
|
||||
assertEquals(status2.getOwner(), status1.getOwner());
|
||||
assertEquals(status2.getGroup(), status1.getGroup());
|
||||
assertEquals(status2.getLen(), status1.getLen());
|
||||
|
||||
FileStatus[] stati = fs.listStatus(path.getParent());
|
||||
Assert.assertEquals(stati.length, 1);
|
||||
Assert.assertEquals(stati[0].getPath().getName(), path.getName());
|
||||
assertEquals(stati.length, 1);
|
||||
assertEquals(stati[0].getPath().getName(), path.getName());
|
||||
}
|
||||
|
||||
private void testWorkingdirectory() throws Exception {
|
||||
|
@ -359,14 +366,15 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
}
|
||||
Path httpFSWorkingDir = fs.getWorkingDirectory();
|
||||
fs.close();
|
||||
Assert.assertEquals(httpFSWorkingDir.toUri().getPath(),
|
||||
assertEquals(httpFSWorkingDir.toUri().getPath(),
|
||||
workingDir.toUri().getPath());
|
||||
|
||||
fs = getHttpFSFileSystem();
|
||||
fs.setWorkingDirectory(new Path("/tmp"));
|
||||
workingDir = fs.getWorkingDirectory();
|
||||
fs.close();
|
||||
Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
|
||||
assertEquals(workingDir.toUri().getPath(),
|
||||
new Path("/tmp").toUri().getPath());
|
||||
}
|
||||
|
||||
private void testMkdirs() throws Exception {
|
||||
|
@ -375,7 +383,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.mkdirs(path);
|
||||
fs.close();
|
||||
fs = FileSystem.get(getProxiedFSConf());
|
||||
Assert.assertTrue(fs.exists(path));
|
||||
assertTrue(fs.exists(path));
|
||||
fs.close();
|
||||
}
|
||||
|
||||
|
@ -400,8 +408,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs.close();
|
||||
long atNew = status1.getAccessTime();
|
||||
long mtNew = status1.getModificationTime();
|
||||
Assert.assertEquals(mtNew, mt - 10);
|
||||
Assert.assertEquals(atNew, at - 20);
|
||||
assertEquals(mtNew, mt - 10);
|
||||
assertEquals(atNew, at - 20);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -419,7 +427,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
FileStatus status1 = fs.getFileStatus(path);
|
||||
fs.close();
|
||||
FsPermission permission2 = status1.getPermission();
|
||||
Assert.assertEquals(permission2, permission1);
|
||||
assertEquals(permission2, permission1);
|
||||
|
||||
//sticky bit
|
||||
fs = getHttpFSFileSystem();
|
||||
|
@ -431,8 +439,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
status1 = fs.getFileStatus(path);
|
||||
fs.close();
|
||||
permission2 = status1.getPermission();
|
||||
Assert.assertTrue(permission2.getStickyBit());
|
||||
Assert.assertEquals(permission2, permission1);
|
||||
assertTrue(permission2.getStickyBit());
|
||||
assertEquals(permission2, permission1);
|
||||
}
|
||||
|
||||
private void testSetOwner() throws Exception {
|
||||
|
@ -454,8 +462,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = FileSystem.get(getProxiedFSConf());
|
||||
FileStatus status1 = fs.getFileStatus(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(status1.getOwner(), user);
|
||||
Assert.assertEquals(status1.getGroup(), group);
|
||||
assertEquals(status1.getOwner(), user);
|
||||
assertEquals(status1.getGroup(), group);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -475,7 +483,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = FileSystem.get(getProxiedFSConf());
|
||||
FileStatus status1 = fs.getFileStatus(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(status1.getReplication(), (short) 1);
|
||||
assertEquals(status1.getReplication(), (short) 1);
|
||||
}
|
||||
|
||||
private void testChecksum() throws Exception {
|
||||
|
@ -491,9 +499,10 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = getHttpFSFileSystem();
|
||||
FileChecksum httpChecksum = fs.getFileChecksum(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(httpChecksum.getAlgorithmName(), hdfsChecksum.getAlgorithmName());
|
||||
Assert.assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
|
||||
Assert.assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
|
||||
assertEquals(httpChecksum.getAlgorithmName(),
|
||||
hdfsChecksum.getAlgorithmName());
|
||||
assertEquals(httpChecksum.getLength(), hdfsChecksum.getLength());
|
||||
assertArrayEquals(httpChecksum.getBytes(), hdfsChecksum.getBytes());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -508,12 +517,17 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = getHttpFSFileSystem();
|
||||
ContentSummary httpContentSummary = fs.getContentSummary(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(httpContentSummary.getDirectoryCount(), hdfsContentSummary.getDirectoryCount());
|
||||
Assert.assertEquals(httpContentSummary.getFileCount(), hdfsContentSummary.getFileCount());
|
||||
Assert.assertEquals(httpContentSummary.getLength(), hdfsContentSummary.getLength());
|
||||
Assert.assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
|
||||
Assert.assertEquals(httpContentSummary.getSpaceConsumed(), hdfsContentSummary.getSpaceConsumed());
|
||||
Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
|
||||
assertEquals(httpContentSummary.getDirectoryCount(),
|
||||
hdfsContentSummary.getDirectoryCount());
|
||||
assertEquals(httpContentSummary.getFileCount(),
|
||||
hdfsContentSummary.getFileCount());
|
||||
assertEquals(httpContentSummary.getLength(),
|
||||
hdfsContentSummary.getLength());
|
||||
assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota());
|
||||
assertEquals(httpContentSummary.getSpaceConsumed(),
|
||||
hdfsContentSummary.getSpaceConsumed());
|
||||
assertEquals(httpContentSummary.getSpaceQuota(),
|
||||
hdfsContentSummary.getSpaceQuota());
|
||||
}
|
||||
|
||||
/** Set xattr */
|
||||
|
@ -552,11 +566,11 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = FileSystem.get(getProxiedFSConf());
|
||||
Map<String, byte[]> xAttrs = fs.getXAttrs(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(4, xAttrs.size());
|
||||
Assert.assertArrayEquals(value1, xAttrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xAttrs.get(name2));
|
||||
Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
|
||||
Assert.assertArrayEquals(value4, xAttrs.get(name4));
|
||||
assertEquals(4, xAttrs.size());
|
||||
assertArrayEquals(value1, xAttrs.get(name1));
|
||||
assertArrayEquals(value2, xAttrs.get(name2));
|
||||
assertArrayEquals(new byte[0], xAttrs.get(name3));
|
||||
assertArrayEquals(value4, xAttrs.get(name4));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -595,16 +609,16 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
names.add(name4);
|
||||
Map<String, byte[]> xAttrs = fs.getXAttrs(path, names);
|
||||
fs.close();
|
||||
Assert.assertEquals(4, xAttrs.size());
|
||||
Assert.assertArrayEquals(value1, xAttrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xAttrs.get(name2));
|
||||
Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
|
||||
Assert.assertArrayEquals(value4, xAttrs.get(name4));
|
||||
assertEquals(4, xAttrs.size());
|
||||
assertArrayEquals(value1, xAttrs.get(name1));
|
||||
assertArrayEquals(value2, xAttrs.get(name2));
|
||||
assertArrayEquals(new byte[0], xAttrs.get(name3));
|
||||
assertArrayEquals(value4, xAttrs.get(name4));
|
||||
|
||||
// Get specific xattr
|
||||
fs = getHttpFSFileSystem();
|
||||
byte[] value = fs.getXAttr(path, name1);
|
||||
Assert.assertArrayEquals(value1, value);
|
||||
assertArrayEquals(value1, value);
|
||||
final String name5 = "a1";
|
||||
try {
|
||||
value = fs.getXAttr(path, name5);
|
||||
|
@ -618,11 +632,11 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = getHttpFSFileSystem();
|
||||
xAttrs = fs.getXAttrs(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(4, xAttrs.size());
|
||||
Assert.assertArrayEquals(value1, xAttrs.get(name1));
|
||||
Assert.assertArrayEquals(value2, xAttrs.get(name2));
|
||||
Assert.assertArrayEquals(new byte[0], xAttrs.get(name3));
|
||||
Assert.assertArrayEquals(value4, xAttrs.get(name4));
|
||||
assertEquals(4, xAttrs.size());
|
||||
assertArrayEquals(value1, xAttrs.get(name1));
|
||||
assertArrayEquals(value2, xAttrs.get(name2));
|
||||
assertArrayEquals(new byte[0], xAttrs.get(name3));
|
||||
assertArrayEquals(value4, xAttrs.get(name4));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,8 +681,8 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
fs = FileSystem.get(getProxiedFSConf());
|
||||
Map<String, byte[]> xAttrs = fs.getXAttrs(path);
|
||||
fs.close();
|
||||
Assert.assertEquals(1, xAttrs.size());
|
||||
Assert.assertArrayEquals(value2, xAttrs.get(name2));
|
||||
assertEquals(1, xAttrs.size());
|
||||
assertArrayEquals(value2, xAttrs.get(name2));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -700,11 +714,11 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
|
||||
fs = getHttpFSFileSystem();
|
||||
List<String> names = fs.listXAttrs(path);
|
||||
Assert.assertEquals(4, names.size());
|
||||
Assert.assertTrue(names.contains(name1));
|
||||
Assert.assertTrue(names.contains(name2));
|
||||
Assert.assertTrue(names.contains(name3));
|
||||
Assert.assertTrue(names.contains(name4));
|
||||
assertEquals(4, names.size());
|
||||
assertTrue(names.contains(name1));
|
||||
assertTrue(names.contains(name2));
|
||||
assertTrue(names.contains(name3));
|
||||
assertTrue(names.contains(name4));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -715,18 +729,26 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
* @throws Exception
|
||||
*/
|
||||
private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
|
||||
Assert.assertTrue(a.getOwner().equals(b.getOwner()));
|
||||
Assert.assertTrue(a.getGroup().equals(b.getGroup()));
|
||||
Assert.assertTrue(a.isStickyBit() == b.isStickyBit());
|
||||
Assert.assertTrue(a.getEntries().size() == b.getEntries().size());
|
||||
assertTrue(a.getOwner().equals(b.getOwner()));
|
||||
assertTrue(a.getGroup().equals(b.getGroup()));
|
||||
assertTrue(a.isStickyBit() == b.isStickyBit());
|
||||
assertTrue(a.getEntries().size() == b.getEntries().size());
|
||||
for (AclEntry e : a.getEntries()) {
|
||||
Assert.assertTrue(b.getEntries().contains(e));
|
||||
assertTrue(b.getEntries().contains(e));
|
||||
}
|
||||
for (AclEntry e : b.getEntries()) {
|
||||
Assert.assertTrue(a.getEntries().contains(e));
|
||||
assertTrue(a.getEntries().contains(e));
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertSameAclBit(FileSystem expected, FileSystem actual,
|
||||
Path path) throws IOException {
|
||||
FileStatus expectedFileStatus = expected.getFileStatus(path);
|
||||
FileStatus actualFileStatus = actual.getFileStatus(path);
|
||||
assertEquals(actualFileStatus.getPermission().getAclBit(),
|
||||
expectedFileStatus.getPermission().getAclBit());
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple ACL tests on a file: Set an acl, add an acl, remove one acl,
|
||||
* and remove all acls.
|
||||
|
@ -755,26 +777,31 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
AclStatus proxyAclStat = proxyFs.getAclStatus(path);
|
||||
AclStatus httpfsAclStat = httpfs.getAclStatus(path);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, path);
|
||||
|
||||
httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
|
||||
proxyAclStat = proxyFs.getAclStatus(path);
|
||||
httpfsAclStat = httpfs.getAclStatus(path);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, path);
|
||||
|
||||
httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
|
||||
proxyAclStat = proxyFs.getAclStatus(path);
|
||||
httpfsAclStat = httpfs.getAclStatus(path);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, path);
|
||||
|
||||
httpfs.removeAclEntries(path, AclEntry.parseAclSpec(rmAclUser1, false));
|
||||
proxyAclStat = proxyFs.getAclStatus(path);
|
||||
httpfsAclStat = httpfs.getAclStatus(path);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, path);
|
||||
|
||||
httpfs.removeAcl(path);
|
||||
proxyAclStat = proxyFs.getAclStatus(path);
|
||||
httpfsAclStat = httpfs.getAclStatus(path);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, path);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -797,25 +824,46 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
|
||||
AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, dir);
|
||||
|
||||
/* Set a default ACL on the directory */
|
||||
httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
|
||||
proxyAclStat = proxyFs.getAclStatus(dir);
|
||||
httpfsAclStat = httpfs.getAclStatus(dir);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, dir);
|
||||
|
||||
/* Remove the default ACL */
|
||||
httpfs.removeDefaultAcl(dir);
|
||||
proxyAclStat = proxyFs.getAclStatus(dir);
|
||||
httpfsAclStat = httpfs.getAclStatus(dir);
|
||||
assertSameAcls(httpfsAclStat, proxyAclStat);
|
||||
assertSameAclBit(httpfs, proxyFs, dir);
|
||||
}
|
||||
|
||||
private void testEncryption() throws Exception {
|
||||
if (isLocalFS()) {
|
||||
return;
|
||||
}
|
||||
FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
|
||||
FileSystem httpFs = getHttpFSFileSystem();
|
||||
FileStatus proxyStatus = proxyFs.getFileStatus(TestHdfsHelper
|
||||
.ENCRYPTED_FILE);
|
||||
assertTrue(proxyStatus.isEncrypted());
|
||||
FileStatus httpStatus = httpFs.getFileStatus(TestHdfsHelper
|
||||
.ENCRYPTED_FILE);
|
||||
assertTrue(httpStatus.isEncrypted());
|
||||
proxyStatus = proxyFs.getFileStatus(new Path("/"));
|
||||
httpStatus = httpFs.getFileStatus(new Path("/"));
|
||||
assertFalse(proxyStatus.isEncrypted());
|
||||
assertFalse(httpStatus.isEncrypted());
|
||||
}
|
||||
|
||||
protected enum Operation {
|
||||
GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
|
||||
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
|
||||
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR,
|
||||
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS
|
||||
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION
|
||||
}
|
||||
|
||||
private void operation(Operation op) throws Exception {
|
||||
|
@ -889,6 +937,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
|||
case LIST_XATTRS:
|
||||
testListXAttrs();
|
||||
break;
|
||||
case ENCRYPTION:
|
||||
testEncryption();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,14 @@ import java.io.File;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.junit.Test;
|
||||
import org.junit.runners.model.FrameworkMethod;
|
||||
|
@ -129,6 +133,9 @@ public class TestHdfsHelper extends TestDirHelper {
|
|||
return new Configuration(conf);
|
||||
}
|
||||
|
||||
public static final Path ENCRYPTION_ZONE = new Path("/ez");
|
||||
public static final Path ENCRYPTED_FILE = new Path("/ez/encfile");
|
||||
|
||||
private static MiniDFSCluster MINI_DFS = null;
|
||||
|
||||
private static synchronized MiniDFSCluster startMiniHdfs(Configuration conf) throws Exception {
|
||||
|
@ -148,14 +155,28 @@ public class TestHdfsHelper extends TestDirHelper {
|
|||
conf.set("hadoop.security.authentication", "simple");
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
|
||||
FileSystemTestHelper helper = new FileSystemTestHelper();
|
||||
final String jceksPath = JavaKeyStoreProvider.SCHEME_NAME + "://file" +
|
||||
new Path(helper.getTestRootDir(), "test.jks").toUri();
|
||||
conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, jceksPath);
|
||||
MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
|
||||
builder.numDataNodes(2);
|
||||
MiniDFSCluster miniHdfs = builder.build();
|
||||
FileSystem fileSystem = miniHdfs.getFileSystem();
|
||||
final String testkey = "testkey";
|
||||
DFSTestUtil.createKey(testkey, miniHdfs, conf);
|
||||
|
||||
DistributedFileSystem fileSystem = miniHdfs.getFileSystem();
|
||||
fileSystem.getClient().setKeyProvider(miniHdfs.getNameNode()
|
||||
.getNamesystem().getProvider());
|
||||
|
||||
fileSystem.mkdirs(new Path("/tmp"));
|
||||
fileSystem.mkdirs(new Path("/user"));
|
||||
fileSystem.setPermission(new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
|
||||
fileSystem.setPermission(new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
|
||||
fileSystem.mkdirs(ENCRYPTION_ZONE);
|
||||
fileSystem.createEncryptionZone(ENCRYPTION_ZONE, testkey);
|
||||
fileSystem.create(ENCRYPTED_FILE).close();
|
||||
|
||||
MINI_DFS = miniHdfs;
|
||||
}
|
||||
return MINI_DFS;
|
||||
|
|
|
@ -114,7 +114,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||
jthr = invokeMethod(env, &jVal, INSTANCE, file->file,
|
||||
"org/apache/hadoop/hdfs/client/HdfsDataInputStream",
|
||||
"getReadStatistics",
|
||||
"()Lorg/apache/hadoop/hdfs/DFSInputStream$ReadStatistics;");
|
||||
"()Lorg/apache/hadoop/hdfs/ReadStatistics;");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
"hdfsFileGetReadStatistics: getReadStatistics failed");
|
||||
|
@ -127,7 +127,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||
goto done;
|
||||
}
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||
"getTotalBytesRead", "()J");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
|
@ -137,7 +137,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||
s->totalBytesRead = jVal.j;
|
||||
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||
"getTotalLocalBytesRead", "()J");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
|
@ -147,7 +147,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||
s->totalLocalBytesRead = jVal.j;
|
||||
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||
"getTotalShortCircuitBytesRead", "()J");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
|
@ -156,7 +156,7 @@ int hdfsFileGetReadStatistics(hdfsFile file,
|
|||
}
|
||||
s->totalShortCircuitBytesRead = jVal.j;
|
||||
jthr = invokeMethod(env, &jVal, INSTANCE, readStats,
|
||||
"org/apache/hadoop/hdfs/DFSInputStream$ReadStatistics",
|
||||
"org/apache/hadoop/hdfs/ReadStatistics",
|
||||
"getTotalZeroCopyBytesRead", "()J");
|
||||
if (jthr) {
|
||||
ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
|
||||
|
|
|
@ -79,8 +79,6 @@ function hdfscmd_case
|
|||
balancer)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.balancer.Balancer
|
||||
hadoop_debug "Appending HADOOP_BALANCER_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_BALANCER_OPTS}"
|
||||
;;
|
||||
cacheadmin)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.CacheAdmin
|
||||
|
@ -103,13 +101,8 @@ function hdfscmd_case
|
|||
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_DN_PID_DIR}"
|
||||
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_DN_LOG_DIR}"
|
||||
|
||||
hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
|
||||
hadoop_debug "Appending HADOOP_DN_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS} ${HADOOP_DN_SECURE_EXTRA_OPTS}"
|
||||
HADOOP_CLASSNAME="org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter"
|
||||
else
|
||||
hadoop_debug "Appending HADOOP_DATANODE_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_DATANODE_OPTS}"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.datanode.DataNode'
|
||||
fi
|
||||
;;
|
||||
|
@ -118,18 +111,12 @@ function hdfscmd_case
|
|||
;;
|
||||
dfs)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.fs.FsShell
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
dfsadmin)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSAdmin
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
diskbalancer)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancerCLI
|
||||
;;
|
||||
envvars)
|
||||
echo "JAVA_HOME='${JAVA_HOME}'"
|
||||
|
@ -144,16 +131,12 @@ function hdfscmd_case
|
|||
;;
|
||||
erasurecode)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
fetchdt)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
|
||||
;;
|
||||
fsck)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSck
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
getconf)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.GetConf
|
||||
|
@ -163,14 +146,10 @@ function hdfscmd_case
|
|||
;;
|
||||
haadmin)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DFSHAAdmin
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
journalnode)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.qjournal.server.JournalNode'
|
||||
hadoop_debug "Appending HADOOP_JOURNALNODE_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOURNALNODE_OPTS}"
|
||||
;;
|
||||
jmxget)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.JMXGet
|
||||
|
@ -181,14 +160,10 @@ function hdfscmd_case
|
|||
mover)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.mover.Mover
|
||||
hadoop_debug "Appending HADOOP_MOVER_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_MOVER_OPTS}"
|
||||
;;
|
||||
namenode)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.NameNode'
|
||||
hadoop_debug "Appending HADOOP_NAMENODE_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NAMENODE_OPTS}"
|
||||
hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
|
||||
;;
|
||||
nfs3)
|
||||
|
@ -201,13 +176,8 @@ function hdfscmd_case
|
|||
HADOOP_SECURE_PID_DIR="${HADOOP_SECURE_PID_DIR:-$HADOOP_SECURE_NFS3_PID_DIR}"
|
||||
HADOOP_SECURE_LOG_DIR="${HADOOP_SECURE_LOG_DIR:-$HADOOP_SECURE_NFS3_LOG_DIR}"
|
||||
|
||||
hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
|
||||
hadoop_debug "Appending HADOOP_NFS3_SECURE_EXTRA_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS} ${HADOOP_NFS3_SECURE_EXTRA_OPTS}"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.PrivilegedNfsGatewayStarter
|
||||
else
|
||||
hadoop_debug "Appending HADOOP_NFS3_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_NFS3_OPTS}"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
|
||||
fi
|
||||
;;
|
||||
|
@ -223,14 +193,10 @@ function hdfscmd_case
|
|||
portmap)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap
|
||||
hadoop_debug "Appending HADOOP_PORTMAP_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_PORTMAP_OPTS}"
|
||||
;;
|
||||
secondarynamenode)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
|
||||
hadoop_debug "Appending HADOOP_SECONDARYNAMENODE_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_SECONDARYNAMENODE_OPTS}"
|
||||
hadoop_add_param HADOOP_OPTS hdfs.audit.logger "-Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER}"
|
||||
;;
|
||||
snapshotDiff)
|
||||
|
@ -245,8 +211,6 @@ function hdfscmd_case
|
|||
zkfc)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.DFSZKFailoverController'
|
||||
hadoop_debug "Appending HADOOP_ZKFC_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_ZKFC_OPTS}"
|
||||
;;
|
||||
*)
|
||||
HADOOP_CLASSNAME="${subcmd}"
|
||||
|
@ -282,6 +246,8 @@ fi
|
|||
HADOOP_SUBCMD=$1
|
||||
shift
|
||||
|
||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
HADOOP_SUBCMD_ARGS=("$@")
|
||||
|
||||
if declare -f hdfs_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
|
||||
|
@ -291,15 +257,20 @@ else
|
|||
hdfscmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
|
||||
hadoop_verify_user "${HADOOP_SUBCMD}"
|
||||
hadoop_add_client_opts
|
||||
|
||||
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
|
||||
hadoop_common_worker_mode_execute "${HADOOP_HDFS_HOME}/bin/hdfs" "${HADOOP_USER_PARAMS[@]}"
|
||||
exit $?
|
||||
fi
|
||||
|
||||
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
|
||||
|
||||
hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
|
|
|
@ -26,7 +26,7 @@ function hadoop_subproject_init
|
|||
export HADOOP_HDFS_ENV_PROCESSED=true
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# at some point in time, someone thought it would be a good idea to
|
||||
# create separate vars for every subproject. *sigh*
|
||||
# let's perform some overrides and setup some defaults for bw compat
|
||||
|
@ -42,23 +42,31 @@ function hadoop_subproject_init
|
|||
hadoop_deprecate_envvar HADOOP_HDFS_NICENESS HADOOP_NICENESS
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_HDFS_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
|
||||
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_HDFS_PID_DIR HADOOP_PID_DIR
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_HDFS_ROOT_LOGGER HADOOP_ROOT_LOGGER
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_HDFS_IDENT_STRING HADOOP_IDENT_STRING
|
||||
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_DN_SECURE_EXTRA_OPTS HDFS_DATANODE_SECURE_EXTRA_OPTS
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_NFS3_SECURE_EXTRA_OPTS HDFS_NFS3_SECURE_EXTRA_OPTS
|
||||
|
||||
|
||||
HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME}"
|
||||
|
||||
|
||||
# turn on the defaults
|
||||
export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-INFO,NullAppender}
|
||||
export HADOOP_NAMENODE_OPTS=${HADOOP_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
|
||||
export HADOOP_SECONDARYNAMENODE_OPTS=${HADOOP_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
|
||||
export HADOOP_DATANODE_OPTS=${HADOOP_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"}
|
||||
export HADOOP_DN_SECURE_EXTRA_OPTS=${HADOOP_DN_SECURE_EXTRA_OPTS:-"-jvm server"}
|
||||
export HADOOP_NFS3_SECURE_EXTRA_OPTS=${HADOOP_NFS3_SECURE_EXTRA_OPTS:-"-jvm server"}
|
||||
export HADOOP_PORTMAP_OPTS=${HADOOP_PORTMAP_OPTS:-"-Xmx512m"}
|
||||
export HDFS_NAMENODE_OPTS=${HDFS_NAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
|
||||
export HDFS_SECONDARYNAMENODE_OPTS=${HDFS_SECONDARYNAMENODE_OPTS:-"-Dhadoop.security.logger=INFO,RFAS"}
|
||||
export HDFS_DATANODE_OPTS=${HDFS_DATANODE_OPTS:-"-Dhadoop.security.logger=ERROR,RFAS"}
|
||||
export HDFS_PORTMAP_OPTS=${HDFS_PORTMAP_OPTS:-"-Xmx512m"}
|
||||
|
||||
# depending upon what is being used to start Java, these may need to be
|
||||
# set empty. (thus no colon)
|
||||
export HDFS_DATANODE_SECURE_EXTRA_OPTS=${HDFS_DATANODE_SECURE_EXTRA_OPTS-"-jvm server"}
|
||||
export HDFS_NFS3_SECURE_EXTRA_OPTS=${HDFS_NFS3_SECURE_EXTRA_OPTS-"-jvm server"}
|
||||
}
|
||||
|
||||
if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
|
||||
|
|
|
@ -419,6 +419,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_KEY =
|
||||
"dfs.namenode.read-lock-reporting-threshold-ms";
|
||||
public static final long DFS_NAMENODE_READ_LOCK_REPORTING_THRESHOLD_MS_DEFAULT = 5000L;
|
||||
// Threshold for how long the lock warnings must be suppressed
|
||||
public static final String DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY =
|
||||
"dfs.lock.suppress.warning.interval";
|
||||
public static final long DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT =
|
||||
10000; //ms
|
||||
|
||||
public static final String DFS_UPGRADE_DOMAIN_FACTOR = "dfs.namenode.upgrade.domain.factor";
|
||||
public static final int DFS_UPGRADE_DOMAIN_FACTOR_DEFAULT = DFS_REPLICATION_DEFAULT;
|
||||
|
|
|
@ -0,0 +1,185 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
import java.util.concurrent.locks.ReentrantLock;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
/**
|
||||
* This is a debugging class that can be used by callers to track
|
||||
* whether a specifc lock is being held for too long and periodically
|
||||
* log a warning and stack trace, if so.
|
||||
*
|
||||
* The logged warnings are throttled so that logs are not spammed.
|
||||
*
|
||||
* A new instance of InstrumentedLock can be created for each object
|
||||
* that needs to be instrumented.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class InstrumentedLock implements Lock {
|
||||
|
||||
private final Lock lock;
|
||||
private final Log logger;
|
||||
private final String name;
|
||||
private final Timer clock;
|
||||
|
||||
/** Minimum gap between two lock warnings. */
|
||||
private final long minLoggingGap;
|
||||
/** Threshold for detecting long lock held time. */
|
||||
private final long lockWarningThreshold;
|
||||
|
||||
// Tracking counters for lock statistics.
|
||||
private volatile long lockAcquireTimestamp;
|
||||
private final AtomicLong lastLogTimestamp;
|
||||
private final AtomicLong warningsSuppressed = new AtomicLong(0);
|
||||
|
||||
/**
|
||||
* Create a instrumented lock instance which logs a warning message
|
||||
* when lock held time is above given threshold.
|
||||
*
|
||||
* @param name the identifier of the lock object
|
||||
* @param logger this class does not have its own logger, will log to the
|
||||
* given logger instead
|
||||
* @param minLoggingGapMs the minimum time gap between two log messages,
|
||||
* this is to avoid spamming to many logs
|
||||
* @param lockWarningThresholdMs the time threshold to view lock held
|
||||
* time as being "too long"
|
||||
*/
|
||||
public InstrumentedLock(String name, Log logger, long minLoggingGapMs,
|
||||
long lockWarningThresholdMs) {
|
||||
this(name, logger, new ReentrantLock(),
|
||||
minLoggingGapMs, lockWarningThresholdMs);
|
||||
}
|
||||
|
||||
public InstrumentedLock(String name, Log logger, Lock lock,
|
||||
long minLoggingGapMs, long lockWarningThresholdMs) {
|
||||
this(name, logger, lock,
|
||||
minLoggingGapMs, lockWarningThresholdMs, new Timer());
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
InstrumentedLock(String name, Log logger, Lock lock,
|
||||
long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
|
||||
this.name = name;
|
||||
this.lock = lock;
|
||||
this.clock = clock;
|
||||
this.logger = logger;
|
||||
minLoggingGap = minLoggingGapMs;
|
||||
lockWarningThreshold = lockWarningThresholdMs;
|
||||
lastLogTimestamp = new AtomicLong(
|
||||
clock.monotonicNow() - Math.max(minLoggingGap, lockWarningThreshold));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void lock() {
|
||||
lock.lock();
|
||||
lockAcquireTimestamp = clock.monotonicNow();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void lockInterruptibly() throws InterruptedException {
|
||||
lock.lockInterruptibly();
|
||||
lockAcquireTimestamp = clock.monotonicNow();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryLock() {
|
||||
if (lock.tryLock()) {
|
||||
lockAcquireTimestamp = clock.monotonicNow();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean tryLock(long time, TimeUnit unit) throws InterruptedException {
|
||||
if (lock.tryLock(time, unit)) {
|
||||
lockAcquireTimestamp = clock.monotonicNow();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unlock() {
|
||||
long localLockReleaseTime = clock.monotonicNow();
|
||||
long localLockAcquireTime = lockAcquireTimestamp;
|
||||
lock.unlock();
|
||||
check(localLockAcquireTime, localLockReleaseTime);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Condition newCondition() {
|
||||
return lock.newCondition();
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
void logWarning(long lockHeldTime, long suppressed) {
|
||||
logger.warn(String.format("Lock held time above threshold: " +
|
||||
"lock identifier: %s " +
|
||||
"lockHeldTimeMs=%d ms. Suppressed %d lock warnings. " +
|
||||
"The stack trace is: %s" ,
|
||||
name, lockHeldTime, suppressed,
|
||||
StringUtils.getStackTrace(Thread.currentThread())));
|
||||
}
|
||||
|
||||
/**
|
||||
* Log a warning if the lock was held for too long.
|
||||
*
|
||||
* Should be invoked by the caller immediately AFTER releasing the lock.
|
||||
*
|
||||
* @param acquireTime - timestamp just after acquiring the lock.
|
||||
* @param releaseTime - timestamp just before releasing the lock.
|
||||
*/
|
||||
private void check(long acquireTime, long releaseTime) {
|
||||
if (!logger.isWarnEnabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
final long lockHeldTime = releaseTime - acquireTime;
|
||||
if (lockWarningThreshold - lockHeldTime < 0) {
|
||||
long now;
|
||||
long localLastLogTs;
|
||||
do {
|
||||
now = clock.monotonicNow();
|
||||
localLastLogTs = lastLogTimestamp.get();
|
||||
long deltaSinceLastLog = now - localLastLogTs;
|
||||
// check should print log or not
|
||||
if (deltaSinceLastLog - minLoggingGap < 0) {
|
||||
warningsSuppressed.incrementAndGet();
|
||||
return;
|
||||
}
|
||||
} while (!lastLogTimestamp.compareAndSet(localLastLogTs, now));
|
||||
long suppressed = warningsSuppressed.getAndSet(0);
|
||||
logWarning(lockHeldTime, suppressed);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -2624,7 +2624,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
} while (storageBlock != null);
|
||||
}
|
||||
|
||||
// Iterate any remaing blocks that have not been reported and remove them
|
||||
// Iterate any remaining blocks that have not been reported and remove them
|
||||
while (storageBlocksIterator.hasNext()) {
|
||||
toRemove.add(storageBlocksIterator.next());
|
||||
}
|
||||
|
@ -2677,7 +2677,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
corruptReplicas.isReplicaCorrupt(storedBlock, dn))) {
|
||||
// Add replica if appropriate. If the replica was previously corrupt
|
||||
// but now okay, it might need to be updated.
|
||||
toAdd.add(new BlockInfoToAdd(storedBlock, replica));
|
||||
toAdd.add(new BlockInfoToAdd(storedBlock, new Block(replica)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1179,10 +1179,11 @@ class BPServiceActor implements Runnable {
|
|||
resetBlockReportTime = false;
|
||||
} else {
|
||||
/* say the last block report was at 8:20:14. The current report
|
||||
* should have started around 9:20:14 (default 1 hour interval).
|
||||
* should have started around 14:20:14 (default 6 hour interval).
|
||||
* If current time is :
|
||||
* 1) normal like 9:20:18, next report should be at 10:20:14
|
||||
* 2) unexpected like 11:35:43, next report should be at 12:20:14
|
||||
* 1) normal like 14:20:18, next report should be at 20:20:14.
|
||||
* 2) unexpected like 21:35:43, next report should be at 2:20:14
|
||||
* on the next day.
|
||||
*/
|
||||
nextBlockReportTime +=
|
||||
(((monotonicNow() - nextBlockReportTime + blockReportIntervalMs) /
|
||||
|
|
|
@ -121,17 +121,23 @@ public class DiskBalancer {
|
|||
*/
|
||||
public void shutdown() {
|
||||
lock.lock();
|
||||
boolean needShutdown = false;
|
||||
try {
|
||||
this.isDiskBalancerEnabled = false;
|
||||
this.currentResult = Result.NO_PLAN;
|
||||
if ((this.future != null) && (!this.future.isDone())) {
|
||||
this.currentResult = Result.PLAN_CANCELLED;
|
||||
this.blockMover.setExitFlag();
|
||||
shutdownExecutor();
|
||||
scheduler.shutdown();
|
||||
needShutdown = true;
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
// no need to hold lock while shutting down executor.
|
||||
if (needShutdown) {
|
||||
shutdownExecutor();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -139,7 +145,6 @@ public class DiskBalancer {
|
|||
*/
|
||||
private void shutdownExecutor() {
|
||||
final int secondsTowait = 10;
|
||||
scheduler.shutdown();
|
||||
try {
|
||||
if (!scheduler.awaitTermination(secondsTowait, TimeUnit.SECONDS)) {
|
||||
scheduler.shutdownNow();
|
||||
|
@ -228,6 +233,7 @@ public class DiskBalancer {
|
|||
*/
|
||||
public void cancelPlan(String planID) throws DiskBalancerException {
|
||||
lock.lock();
|
||||
boolean needShutdown = false;
|
||||
try {
|
||||
checkDiskBalancerEnabled();
|
||||
if (this.planID == null ||
|
||||
|
@ -239,13 +245,18 @@ public class DiskBalancer {
|
|||
DiskBalancerException.Result.NO_SUCH_PLAN);
|
||||
}
|
||||
if (!this.future.isDone()) {
|
||||
this.blockMover.setExitFlag();
|
||||
shutdownExecutor();
|
||||
this.currentResult = Result.PLAN_CANCELLED;
|
||||
this.blockMover.setExitFlag();
|
||||
scheduler.shutdown();
|
||||
needShutdown = true;
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
// no need to hold lock while shutting down executor.
|
||||
if (needShutdown) {
|
||||
shutdownExecutor();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -490,14 +501,11 @@ public class DiskBalancer {
|
|||
public void run() {
|
||||
Thread.currentThread().setName("DiskBalancerThread");
|
||||
LOG.info("Executing Disk balancer plan. Plan File: {}, Plan ID: {}",
|
||||
planFile, planID);
|
||||
try {
|
||||
for (Map.Entry<VolumePair, DiskBalancerWorkItem> entry :
|
||||
workMap.entrySet()) {
|
||||
blockMover.copyBlocks(entry.getKey(), entry.getValue());
|
||||
}
|
||||
} finally {
|
||||
blockMover.setExitFlag();
|
||||
planFile, planID);
|
||||
for (Map.Entry<VolumePair, DiskBalancerWorkItem> entry :
|
||||
workMap.entrySet()) {
|
||||
blockMover.setRunnable();
|
||||
blockMover.copyBlocks(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -846,8 +854,8 @@ public class DiskBalancer {
|
|||
|
||||
if (item.getErrorCount() >= getMaxError(item)) {
|
||||
item.setErrMsg("Error count exceeded.");
|
||||
LOG.info("Maximum error count exceeded. Error count: {} Max error:{} "
|
||||
, item.getErrorCount(), item.getMaxDiskErrors());
|
||||
LOG.info("Maximum error count exceeded. Error count: {} Max error:{} ",
|
||||
item.getErrorCount(), item.getMaxDiskErrors());
|
||||
}
|
||||
|
||||
return null;
|
||||
|
@ -951,7 +959,8 @@ public class DiskBalancer {
|
|||
LOG.error("Exceeded the max error count. source {}, dest: {} " +
|
||||
"error count: {}", source.getBasePath(),
|
||||
dest.getBasePath(), item.getErrorCount());
|
||||
break;
|
||||
this.setExitFlag();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check for the block tolerance constraint.
|
||||
|
@ -960,7 +969,8 @@ public class DiskBalancer {
|
|||
"blocks.",
|
||||
source.getBasePath(), dest.getBasePath(),
|
||||
item.getBytesCopied(), item.getBlocksCopied());
|
||||
break;
|
||||
this.setExitFlag();
|
||||
continue;
|
||||
}
|
||||
|
||||
ExtendedBlock block = getNextBlock(poolIters, item);
|
||||
|
@ -968,7 +978,8 @@ public class DiskBalancer {
|
|||
if (block == null) {
|
||||
LOG.error("No source blocks, exiting the copy. Source: {}, " +
|
||||
"dest:{}", source.getBasePath(), dest.getBasePath());
|
||||
break;
|
||||
this.setExitFlag();
|
||||
continue;
|
||||
}
|
||||
|
||||
// check if someone told us exit, treat this as an interruption
|
||||
|
@ -976,7 +987,7 @@ public class DiskBalancer {
|
|||
// for the thread, since both getNextBlock and moveBlocAcrossVolume
|
||||
// can take some time.
|
||||
if (!shouldRun()) {
|
||||
break;
|
||||
continue;
|
||||
}
|
||||
|
||||
long timeUsed;
|
||||
|
@ -995,7 +1006,8 @@ public class DiskBalancer {
|
|||
LOG.error("Destination volume: {} does not have enough space to" +
|
||||
" accommodate a block. Block Size: {} Exiting from" +
|
||||
" copyBlocks.", dest.getBasePath(), block.getNumBytes());
|
||||
break;
|
||||
this.setExitFlag();
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG.debug("Moved block with size {} from {} to {}",
|
||||
|
|
|
@ -40,6 +40,8 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.management.NotCompliantMBeanException;
|
||||
import javax.management.ObjectName;
|
||||
|
@ -60,6 +62,7 @@ import org.apache.hadoop.fs.StorageType;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.ExtendedBlockId;
|
||||
import org.apache.hadoop.hdfs.InstrumentedLock;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
|
@ -267,6 +270,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
private final int maxDataLength;
|
||||
|
||||
private final AutoCloseableLock datasetLock;
|
||||
private final Condition datasetLockCondition;
|
||||
|
||||
/**
|
||||
* An FSDataset has a directory where it loads its data files.
|
||||
|
@ -278,7 +282,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
this.dataStorage = storage;
|
||||
this.conf = conf;
|
||||
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
|
||||
this.datasetLock = new AutoCloseableLock();
|
||||
this.datasetLock = new AutoCloseableLock(
|
||||
new InstrumentedLock(getClass().getName(), LOG,
|
||||
conf.getTimeDuration(
|
||||
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,
|
||||
TimeUnit.MILLISECONDS),
|
||||
300));
|
||||
this.datasetLockCondition = datasetLock.newCondition();
|
||||
|
||||
// The number of volumes required for operation is the total number
|
||||
// of volumes minus the number of failed volumes we can tolerate.
|
||||
volFailuresTolerated = datanode.getDnConf().getVolFailuresTolerated();
|
||||
|
@ -515,7 +527,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
// Disable the volume from the service.
|
||||
asyncDiskService.removeVolume(sd.getCurrentDir());
|
||||
volumes.removeVolume(absRoot, clearFailure);
|
||||
volumes.waitVolumeRemoved(5000, this);
|
||||
volumes.waitVolumeRemoved(5000, datasetLockCondition);
|
||||
|
||||
// Removed all replica information for the blocks on the volume.
|
||||
// Unlike updating the volumeMap in addVolume(), this operation does
|
||||
|
|
|
@ -31,6 +31,8 @@ import java.util.Map;
|
|||
import java.util.TreeMap;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.Condition;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
|
@ -41,6 +43,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
|
|||
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
|
@ -52,7 +55,8 @@ class FsVolumeList {
|
|||
Collections.synchronizedMap(new TreeMap<String, VolumeFailureInfo>());
|
||||
private final ConcurrentLinkedQueue<FsVolumeImpl> volumesBeingRemoved =
|
||||
new ConcurrentLinkedQueue<>();
|
||||
private Object checkDirsMutex = new Object();
|
||||
private final AutoCloseableLock checkDirsLock;
|
||||
private final Condition checkDirsLockCondition;
|
||||
|
||||
private final VolumeChoosingPolicy<FsVolumeImpl> blockChooser;
|
||||
private final BlockScanner blockScanner;
|
||||
|
@ -62,6 +66,8 @@ class FsVolumeList {
|
|||
VolumeChoosingPolicy<FsVolumeImpl> blockChooser) {
|
||||
this.blockChooser = blockChooser;
|
||||
this.blockScanner = blockScanner;
|
||||
this.checkDirsLock = new AutoCloseableLock();
|
||||
this.checkDirsLockCondition = checkDirsLock.newCondition();
|
||||
for (VolumeFailureInfo volumeFailureInfo: initialVolumeFailureInfos) {
|
||||
volumeFailureInfos.put(volumeFailureInfo.getFailedStorageLocation(),
|
||||
volumeFailureInfo);
|
||||
|
@ -224,12 +230,12 @@ class FsVolumeList {
|
|||
/**
|
||||
* Calls {@link FsVolumeImpl#checkDirs()} on each volume.
|
||||
*
|
||||
* Use checkDirsMutext to allow only one instance of checkDirs() call
|
||||
* Use {@link checkDirsLock} to allow only one instance of checkDirs() call.
|
||||
*
|
||||
* @return list of all the failed volumes.
|
||||
*/
|
||||
Set<File> checkDirs() {
|
||||
synchronized(checkDirsMutex) {
|
||||
try (AutoCloseableLock lock = checkDirsLock.acquire()) {
|
||||
Set<File> failedVols = null;
|
||||
|
||||
// Make a copy of volumes for performing modification
|
||||
|
@ -260,7 +266,7 @@ class FsVolumeList {
|
|||
+ " failure volumes.");
|
||||
}
|
||||
|
||||
waitVolumeRemoved(5000, checkDirsMutex);
|
||||
waitVolumeRemoved(5000, checkDirsLockCondition);
|
||||
return failedVols;
|
||||
}
|
||||
}
|
||||
|
@ -271,13 +277,13 @@ class FsVolumeList {
|
|||
*
|
||||
* @param sleepMillis interval to recheck.
|
||||
*/
|
||||
void waitVolumeRemoved(int sleepMillis, Object monitor) {
|
||||
void waitVolumeRemoved(int sleepMillis, Condition condition) {
|
||||
while (!checkVolumesRemoved()) {
|
||||
if (FsDatasetImpl.LOG.isDebugEnabled()) {
|
||||
FsDatasetImpl.LOG.debug("Waiting for volume reference to be released.");
|
||||
}
|
||||
try {
|
||||
monitor.wait(sleepMillis);
|
||||
condition.await(sleepMillis, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
FsDatasetImpl.LOG.info("Thread interrupted when waiting for "
|
||||
+ "volume reference to be released.");
|
||||
|
|
|
@ -38,7 +38,8 @@ public class DiskBalancerException extends IOException {
|
|||
INVALID_MOVE,
|
||||
INTERNAL_ERROR,
|
||||
NO_SUCH_PLAN,
|
||||
UNKNOWN_KEY
|
||||
UNKNOWN_KEY,
|
||||
INVALID_NODE,
|
||||
}
|
||||
|
||||
private final Result result;
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -44,9 +44,10 @@ public class CancelCommand extends Command {
|
|||
*/
|
||||
public CancelCommand(Configuration conf) {
|
||||
super(conf);
|
||||
addValidCommandParameters(DiskBalancer.CANCEL, "Cancels a running plan.");
|
||||
addValidCommandParameters(DiskBalancer.NODE, "Node to run the command " +
|
||||
"against in node:port format.");
|
||||
addValidCommandParameters(DiskBalancerCLI.CANCEL,
|
||||
"Cancels a running plan.");
|
||||
addValidCommandParameters(DiskBalancerCLI.NODE,
|
||||
"Node to run the command against in node:port format.");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -57,20 +58,20 @@ public class CancelCommand extends Command {
|
|||
@Override
|
||||
public void execute(CommandLine cmd) throws Exception {
|
||||
LOG.info("Executing \"Cancel plan\" command.");
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.CANCEL));
|
||||
verifyCommandOptions(DiskBalancer.CANCEL, cmd);
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.CANCEL));
|
||||
verifyCommandOptions(DiskBalancerCLI.CANCEL, cmd);
|
||||
|
||||
// We can cancel a plan using datanode address and plan ID
|
||||
// that you can read from a datanode using queryStatus
|
||||
if(cmd.hasOption(DiskBalancer.NODE)) {
|
||||
String nodeAddress = cmd.getOptionValue(DiskBalancer.NODE);
|
||||
String planHash = cmd.getOptionValue(DiskBalancer.CANCEL);
|
||||
if(cmd.hasOption(DiskBalancerCLI.NODE)) {
|
||||
String nodeAddress = cmd.getOptionValue(DiskBalancerCLI.NODE);
|
||||
String planHash = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
|
||||
cancelPlanUsingHash(nodeAddress, planHash);
|
||||
} else {
|
||||
// Or you can cancel a plan using the plan file. If the user
|
||||
// points us to the plan file, we can compute the hash as well as read
|
||||
// the address of the datanode from the plan file.
|
||||
String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
|
||||
String planFile = cmd.getOptionValue(DiskBalancerCLI.CANCEL);
|
||||
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
|
||||
"Invalid plan file specified.");
|
||||
String planData = null;
|
||||
|
@ -142,6 +143,6 @@ public class CancelCommand extends Command {
|
|||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer -cancel <planFile> | -cancel " +
|
||||
"<planID> -node <hostname>",
|
||||
header, DiskBalancer.getCancelOptions(), footer);
|
||||
header, DiskBalancerCLI.getCancelOptions(), footer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,13 +37,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
|
@ -256,6 +257,7 @@ public abstract class Command extends Configured {
|
|||
throws IOException {
|
||||
Set<String> nodeNames = null;
|
||||
List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
|
||||
List<String> invalidNodeList = Lists.newArrayList();
|
||||
|
||||
if ((listArg == null) || listArg.isEmpty()) {
|
||||
return nodeList;
|
||||
|
@ -269,10 +271,22 @@ public abstract class Command extends Configured {
|
|||
|
||||
if (node != null) {
|
||||
nodeList.add(node);
|
||||
} else {
|
||||
invalidNodeList.add(name);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!invalidNodeList.isEmpty()) {
|
||||
String invalidNodes = StringUtils.join(invalidNodeList.toArray(), ",");
|
||||
String warnMsg = String.format(
|
||||
"The node(s) '%s' not found. "
|
||||
+ "Please make sure that '%s' exists in the cluster.",
|
||||
invalidNodes, invalidNodes);
|
||||
throw new DiskBalancerException(warnMsg,
|
||||
DiskBalancerException.Result.INVALID_NODE);
|
||||
}
|
||||
|
||||
return nodeList;
|
||||
}
|
||||
|
||||
|
@ -418,7 +432,7 @@ public abstract class Command extends Configured {
|
|||
* @return default top number of nodes.
|
||||
*/
|
||||
protected int getDefaultTop() {
|
||||
return DiskBalancer.DEFAULT_TOP;
|
||||
return DiskBalancerCLI.DEFAULT_TOP;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -437,7 +451,7 @@ public abstract class Command extends Configured {
|
|||
protected int parseTopNodes(final CommandLine cmd, final StrBuilder result) {
|
||||
String outputLine = "";
|
||||
int nodes = 0;
|
||||
final String topVal = cmd.getOptionValue(DiskBalancer.TOP);
|
||||
final String topVal = cmd.getOptionValue(DiskBalancerCLI.TOP);
|
||||
if (StringUtils.isBlank(topVal)) {
|
||||
outputLine = String.format(
|
||||
"No top limit specified, using default top value %d.",
|
||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -46,7 +46,8 @@ public class ExecuteCommand extends Command {
|
|||
*/
|
||||
public ExecuteCommand(Configuration conf) {
|
||||
super(conf);
|
||||
addValidCommandParameters(DiskBalancer.EXECUTE, "Executes a given plan.");
|
||||
addValidCommandParameters(DiskBalancerCLI.EXECUTE,
|
||||
"Executes a given plan.");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -57,10 +58,10 @@ public class ExecuteCommand extends Command {
|
|||
@Override
|
||||
public void execute(CommandLine cmd) throws Exception {
|
||||
LOG.info("Executing \"execute plan\" command");
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.EXECUTE));
|
||||
verifyCommandOptions(DiskBalancer.EXECUTE, cmd);
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.EXECUTE));
|
||||
verifyCommandOptions(DiskBalancerCLI.EXECUTE, cmd);
|
||||
|
||||
String planFile = cmd.getOptionValue(DiskBalancer.EXECUTE);
|
||||
String planFile = cmd.getOptionValue(DiskBalancerCLI.EXECUTE);
|
||||
Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
|
||||
"Invalid plan file specified.");
|
||||
|
||||
|
@ -88,7 +89,7 @@ public class ExecuteCommand extends Command {
|
|||
String planHash = DigestUtils.shaHex(planData);
|
||||
try {
|
||||
// TODO : Support skipping date check.
|
||||
dataNode.submitDiskBalancerPlan(planHash, DiskBalancer.PLAN_VERSION,
|
||||
dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION,
|
||||
planFile, planData, false);
|
||||
} catch (DiskBalancerException ex) {
|
||||
LOG.error("Submitting plan on {} failed. Result: {}, Message: {}",
|
||||
|
@ -111,6 +112,6 @@ public class ExecuteCommand extends Command {
|
|||
|
||||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer -execute <planfile>",
|
||||
header, DiskBalancer.getExecuteOptions(), footer);
|
||||
header, DiskBalancerCLI.getExecuteOptions(), footer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import com.google.common.base.Preconditions;
|
|||
import org.apache.commons.cli.CommandLine;
|
||||
import org.apache.commons.cli.HelpFormatter;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
|
||||
/**
|
||||
* Help Command prints out detailed help about each command.
|
||||
|
@ -37,7 +37,7 @@ public class HelpCommand extends Command {
|
|||
*/
|
||||
public HelpCommand(Configuration conf) {
|
||||
super(conf);
|
||||
addValidCommandParameters(DiskBalancer.HELP, "Help Command");
|
||||
addValidCommandParameters(DiskBalancerCLI.HELP, "Help Command");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -53,9 +53,9 @@ public class HelpCommand extends Command {
|
|||
return;
|
||||
}
|
||||
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.HELP));
|
||||
verifyCommandOptions(DiskBalancer.HELP, cmd);
|
||||
String helpCommand = cmd.getOptionValue(DiskBalancer.HELP);
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.HELP));
|
||||
verifyCommandOptions(DiskBalancerCLI.HELP, cmd);
|
||||
String helpCommand = cmd.getOptionValue(DiskBalancerCLI.HELP);
|
||||
if (helpCommand == null || helpCommand.isEmpty()) {
|
||||
this.printHelp();
|
||||
return;
|
||||
|
@ -65,19 +65,19 @@ public class HelpCommand extends Command {
|
|||
helpCommand = helpCommand.toLowerCase();
|
||||
Command command = null;
|
||||
switch (helpCommand) {
|
||||
case DiskBalancer.PLAN:
|
||||
case DiskBalancerCLI.PLAN:
|
||||
command = new PlanCommand(getConf());
|
||||
break;
|
||||
case DiskBalancer.EXECUTE:
|
||||
case DiskBalancerCLI.EXECUTE:
|
||||
command = new ExecuteCommand(getConf());
|
||||
break;
|
||||
case DiskBalancer.QUERY:
|
||||
case DiskBalancerCLI.QUERY:
|
||||
command = new QueryCommand(getConf());
|
||||
break;
|
||||
case DiskBalancer.CANCEL:
|
||||
case DiskBalancerCLI.CANCEL:
|
||||
command = new CancelCommand(getConf());
|
||||
break;
|
||||
case DiskBalancer.REPORT:
|
||||
case DiskBalancerCLI.REPORT:
|
||||
command = new ReportCommand(getConf(), null);
|
||||
break;
|
||||
default:
|
||||
|
@ -102,7 +102,7 @@ public class HelpCommand extends Command {
|
|||
|
||||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer [command] [options]",
|
||||
header, DiskBalancer.getHelpOptions(), "");
|
||||
header, DiskBalancerCLI.getHelpOptions(), "");
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
|||
.DiskBalancerDataNode;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.List;
|
||||
|
||||
|
@ -53,18 +53,18 @@ public class PlanCommand extends Command {
|
|||
this.thresholdPercentage = 1;
|
||||
this.bandwidth = 0;
|
||||
this.maxError = 0;
|
||||
addValidCommandParameters(DiskBalancer.OUTFILE, "Output directory in " +
|
||||
addValidCommandParameters(DiskBalancerCLI.OUTFILE, "Output directory in " +
|
||||
"HDFS. The generated plan will be written to a file in this " +
|
||||
"directory.");
|
||||
addValidCommandParameters(DiskBalancer.BANDWIDTH, "Maximum Bandwidth to " +
|
||||
"be used while copying.");
|
||||
addValidCommandParameters(DiskBalancer.THRESHOLD, "Percentage skew that " +
|
||||
"we tolerate before diskbalancer starts working.");
|
||||
addValidCommandParameters(DiskBalancer.MAXERROR, "Max errors to tolerate " +
|
||||
"between 2 disks");
|
||||
addValidCommandParameters(DiskBalancer.VERBOSE, "Run plan command in " +
|
||||
addValidCommandParameters(DiskBalancerCLI.BANDWIDTH,
|
||||
"Maximum Bandwidth to be used while copying.");
|
||||
addValidCommandParameters(DiskBalancerCLI.THRESHOLD,
|
||||
"Percentage skew that we tolerate before diskbalancer starts working.");
|
||||
addValidCommandParameters(DiskBalancerCLI.MAXERROR,
|
||||
"Max errors to tolerate between 2 disks");
|
||||
addValidCommandParameters(DiskBalancerCLI.VERBOSE, "Run plan command in " +
|
||||
"verbose mode.");
|
||||
addValidCommandParameters(DiskBalancer.PLAN, "Plan Command");
|
||||
addValidCommandParameters(DiskBalancerCLI.PLAN, "Plan Command");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -77,36 +77,37 @@ public class PlanCommand extends Command {
|
|||
@Override
|
||||
public void execute(CommandLine cmd) throws Exception {
|
||||
LOG.debug("Processing Plan Command.");
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.PLAN));
|
||||
verifyCommandOptions(DiskBalancer.PLAN, cmd);
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
|
||||
verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
|
||||
|
||||
if (cmd.getOptionValue(DiskBalancer.PLAN) == null) {
|
||||
if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
|
||||
throw new IllegalArgumentException("A node name is required to create a" +
|
||||
" plan.");
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.BANDWIDTH)) {
|
||||
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancer
|
||||
if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
|
||||
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
|
||||
.BANDWIDTH));
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.MAXERROR)) {
|
||||
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancer
|
||||
if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
|
||||
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI
|
||||
.MAXERROR));
|
||||
}
|
||||
|
||||
readClusterInfo(cmd);
|
||||
String output = null;
|
||||
if (cmd.hasOption(DiskBalancer.OUTFILE)) {
|
||||
output = cmd.getOptionValue(DiskBalancer.OUTFILE);
|
||||
if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
|
||||
output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
|
||||
}
|
||||
setOutputPath(output);
|
||||
|
||||
// -plan nodename is the command line argument.
|
||||
DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancer.PLAN));
|
||||
DiskBalancerDataNode node =
|
||||
getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
|
||||
if (node == null) {
|
||||
throw new IllegalArgumentException("Unable to find the specified node. " +
|
||||
cmd.getOptionValue(DiskBalancer.PLAN));
|
||||
cmd.getOptionValue(DiskBalancerCLI.PLAN));
|
||||
}
|
||||
this.thresholdPercentage = getThresholdPercentage(cmd);
|
||||
|
||||
|
@ -124,8 +125,8 @@ public class PlanCommand extends Command {
|
|||
|
||||
|
||||
try (FSDataOutputStream beforeStream = create(String.format(
|
||||
DiskBalancer.BEFORE_TEMPLATE,
|
||||
cmd.getOptionValue(DiskBalancer.PLAN)))) {
|
||||
DiskBalancerCLI.BEFORE_TEMPLATE,
|
||||
cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
|
||||
beforeStream.write(getCluster().toJson()
|
||||
.getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
|
@ -133,17 +134,17 @@ public class PlanCommand extends Command {
|
|||
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
|
||||
LOG.info("Writing plan to : {}", getOutputPath());
|
||||
try (FSDataOutputStream planStream = create(String.format(
|
||||
DiskBalancer.PLAN_TEMPLATE,
|
||||
cmd.getOptionValue(DiskBalancer.PLAN)))) {
|
||||
DiskBalancerCLI.PLAN_TEMPLATE,
|
||||
cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
|
||||
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
|
||||
}
|
||||
} else {
|
||||
LOG.info("No plan generated. DiskBalancing not needed for node: {} " +
|
||||
"threshold used: {}", cmd.getOptionValue(DiskBalancer.PLAN),
|
||||
"threshold used: {}", cmd.getOptionValue(DiskBalancerCLI.PLAN),
|
||||
this.thresholdPercentage);
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.VERBOSE) && plans.size() > 0) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
|
||||
printToScreen(plans);
|
||||
}
|
||||
}
|
||||
|
@ -162,8 +163,8 @@ public class PlanCommand extends Command {
|
|||
" will balance the data.";
|
||||
|
||||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer -plan " +
|
||||
"<hostname> [options]", header, DiskBalancer.getPlanOptions(), footer);
|
||||
helpFormatter.printHelp("hdfs diskbalancer -plan <hostname> [options]",
|
||||
header, DiskBalancerCLI.getPlanOptions(), footer);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -174,8 +175,8 @@ public class PlanCommand extends Command {
|
|||
*/
|
||||
private double getThresholdPercentage(CommandLine cmd) {
|
||||
Double value = 0.0;
|
||||
if (cmd.hasOption(DiskBalancer.THRESHOLD)) {
|
||||
value = Double.parseDouble(cmd.getOptionValue(DiskBalancer.THRESHOLD));
|
||||
if (cmd.hasOption(DiskBalancerCLI.THRESHOLD)) {
|
||||
value = Double.parseDouble(cmd.getOptionValue(DiskBalancerCLI.THRESHOLD));
|
||||
}
|
||||
|
||||
if ((value <= 0.0) || (value > 100.0)) {
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
/**
|
||||
|
@ -42,9 +42,10 @@ public class QueryCommand extends Command {
|
|||
*/
|
||||
public QueryCommand(Configuration conf) {
|
||||
super(conf);
|
||||
addValidCommandParameters(DiskBalancer.QUERY, "Queries the status of disk" +
|
||||
" plan running on a given datanode.");
|
||||
addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
|
||||
addValidCommandParameters(DiskBalancerCLI.QUERY,
|
||||
"Queries the status of disk plan running on a given datanode.");
|
||||
addValidCommandParameters(DiskBalancerCLI.VERBOSE,
|
||||
"Prints verbose results.");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -55,9 +56,9 @@ public class QueryCommand extends Command {
|
|||
@Override
|
||||
public void execute(CommandLine cmd) throws Exception {
|
||||
LOG.info("Executing \"query plan\" command.");
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.QUERY));
|
||||
verifyCommandOptions(DiskBalancer.QUERY, cmd);
|
||||
String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
|
||||
verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
|
||||
String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
|
||||
Preconditions.checkNotNull(nodeName);
|
||||
nodeName = nodeName.trim();
|
||||
String nodeAddress = nodeName;
|
||||
|
@ -79,7 +80,7 @@ public class QueryCommand extends Command {
|
|||
workStatus.getPlanID(),
|
||||
workStatus.getResult().toString());
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.VERBOSE)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
|
||||
System.out.printf("%s", workStatus.currentStateString());
|
||||
}
|
||||
} catch (DiskBalancerException ex) {
|
||||
|
@ -101,6 +102,6 @@ public class QueryCommand extends Command {
|
|||
|
||||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer -query <hostname> [options]",
|
||||
header, DiskBalancer.getQueryOptions(), footer);
|
||||
header, DiskBalancerCLI.getQueryOptions(), footer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,10 +27,11 @@ import org.apache.commons.cli.HelpFormatter;
|
|||
import org.apache.commons.lang.StringUtils;
|
||||
import org.apache.commons.lang.text.StrBuilder;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancer;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
|
@ -52,15 +53,15 @@ public class ReportCommand extends Command {
|
|||
super(conf);
|
||||
this.out = out;
|
||||
|
||||
addValidCommandParameters(DiskBalancer.REPORT,
|
||||
addValidCommandParameters(DiskBalancerCLI.REPORT,
|
||||
"Report volume information of nodes.");
|
||||
|
||||
String desc = String.format(
|
||||
"Top number of nodes to be processed. Default: %d", getDefaultTop());
|
||||
addValidCommandParameters(DiskBalancer.TOP, desc);
|
||||
addValidCommandParameters(DiskBalancerCLI.TOP, desc);
|
||||
|
||||
desc = String.format("Print out volume information for a DataNode.");
|
||||
addValidCommandParameters(DiskBalancer.NODE, desc);
|
||||
desc = String.format("Print out volume information for DataNode(s).");
|
||||
addValidCommandParameters(DiskBalancerCLI.NODE, desc);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,8 +70,8 @@ public class ReportCommand extends Command {
|
|||
String outputLine = "Processing report command";
|
||||
recordOutput(result, outputLine);
|
||||
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancer.REPORT));
|
||||
verifyCommandOptions(DiskBalancer.REPORT, cmd);
|
||||
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.REPORT));
|
||||
verifyCommandOptions(DiskBalancerCLI.REPORT, cmd);
|
||||
readClusterInfo(cmd);
|
||||
|
||||
final String nodeFormat =
|
||||
|
@ -81,9 +82,9 @@ public class ReportCommand extends Command {
|
|||
"[%s: volume-%s] - %.2f used: %d/%d, %.2f free: %d/%d, "
|
||||
+ "isFailed: %s, isReadOnly: %s, isSkip: %s, isTransient: %s.";
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.NODE)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.NODE)) {
|
||||
/*
|
||||
* Reporting volume information for a specific DataNode
|
||||
* Reporting volume information for specific DataNode(s)
|
||||
*/
|
||||
handleNodeReport(cmd, result, nodeFormatWithoutSequence, volumeFormat);
|
||||
|
||||
|
@ -133,84 +134,100 @@ public class ReportCommand extends Command {
|
|||
final String nodeFormat, final String volumeFormat) throws Exception {
|
||||
String outputLine = "";
|
||||
/*
|
||||
* get value that identifies a DataNode from command line, it could be UUID,
|
||||
* IP address or host name.
|
||||
* get value that identifies DataNode(s) from command line, it could be
|
||||
* UUID, IP address or host name.
|
||||
*/
|
||||
final String nodeVal = cmd.getOptionValue(DiskBalancer.NODE);
|
||||
final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
|
||||
|
||||
if (StringUtils.isBlank(nodeVal)) {
|
||||
outputLine = "The value for '-node' is neither specified or empty.";
|
||||
recordOutput(result, outputLine);
|
||||
} else {
|
||||
/*
|
||||
* Reporting volume information for a specific DataNode
|
||||
* Reporting volume information for specific DataNode(s)
|
||||
*/
|
||||
outputLine = String.format(
|
||||
"Reporting volume information for DataNode '%s'.", nodeVal);
|
||||
"Reporting volume information for DataNode(s) '%s'.", nodeVal);
|
||||
recordOutput(result, outputLine);
|
||||
|
||||
final String trueStr = "True";
|
||||
final String falseStr = "False";
|
||||
DiskBalancerDataNode dbdn = getNode(nodeVal);
|
||||
// get storage path of datanode
|
||||
populatePathNames(dbdn);
|
||||
List<DiskBalancerDataNode> dbdns = Lists.newArrayList();
|
||||
try {
|
||||
dbdns = getNodes(nodeVal);
|
||||
} catch (DiskBalancerException e) {
|
||||
// If there are some invalid nodes that contained in nodeVal,
|
||||
// the exception will be threw.
|
||||
recordOutput(result, e.getMessage());
|
||||
return;
|
||||
}
|
||||
|
||||
if (dbdn == null) {
|
||||
outputLine = String.format(
|
||||
"Can't find a DataNode that matches '%s'.", nodeVal);
|
||||
recordOutput(result, outputLine);
|
||||
} else {
|
||||
result.appendln(String.format(nodeFormat,
|
||||
dbdn.getDataNodeName(),
|
||||
dbdn.getDataNodeIP(),
|
||||
dbdn.getDataNodePort(),
|
||||
dbdn.getDataNodeUUID(),
|
||||
dbdn.getVolumeCount(),
|
||||
dbdn.getNodeDataDensity()));
|
||||
|
||||
List<String> volumeList = Lists.newArrayList();
|
||||
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
|
||||
for (DiskBalancerVolume vol : vset.getVolumes()) {
|
||||
volumeList.add(String.format(volumeFormat,
|
||||
vol.getStorageType(),
|
||||
vol.getPath(),
|
||||
vol.getUsedRatio(),
|
||||
vol.getUsed(),
|
||||
vol.getCapacity(),
|
||||
vol.getFreeRatio(),
|
||||
vol.getFreeSpace(),
|
||||
vol.getCapacity(),
|
||||
vol.isFailed() ? trueStr : falseStr,
|
||||
vol.isReadOnly() ? trueStr : falseStr,
|
||||
vol.isSkip() ? trueStr : falseStr,
|
||||
vol.isTransient() ? trueStr : falseStr));
|
||||
}
|
||||
if (!dbdns.isEmpty()) {
|
||||
for (DiskBalancerDataNode node : dbdns) {
|
||||
recordNodeReport(result, node, nodeFormat, volumeFormat);
|
||||
result.append(System.lineSeparator());
|
||||
}
|
||||
|
||||
Collections.sort(volumeList);
|
||||
result.appendln(
|
||||
StringUtils.join(volumeList.toArray(), System.lineSeparator()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Put node report lines to string buffer.
|
||||
*/
|
||||
private void recordNodeReport(StrBuilder result, DiskBalancerDataNode dbdn,
|
||||
final String nodeFormat, final String volumeFormat) throws Exception {
|
||||
final String trueStr = "True";
|
||||
final String falseStr = "False";
|
||||
|
||||
// get storage path of datanode
|
||||
populatePathNames(dbdn);
|
||||
result.appendln(String.format(nodeFormat,
|
||||
dbdn.getDataNodeName(),
|
||||
dbdn.getDataNodeIP(),
|
||||
dbdn.getDataNodePort(),
|
||||
dbdn.getDataNodeUUID(),
|
||||
dbdn.getVolumeCount(),
|
||||
dbdn.getNodeDataDensity()));
|
||||
|
||||
List<String> volumeList = Lists.newArrayList();
|
||||
for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
|
||||
for (DiskBalancerVolume vol : vset.getVolumes()) {
|
||||
volumeList.add(String.format(volumeFormat,
|
||||
vol.getStorageType(),
|
||||
vol.getPath(),
|
||||
vol.getUsedRatio(),
|
||||
vol.getUsed(),
|
||||
vol.getCapacity(),
|
||||
vol.getFreeRatio(),
|
||||
vol.getFreeSpace(),
|
||||
vol.getCapacity(),
|
||||
vol.isFailed() ? trueStr : falseStr,
|
||||
vol.isReadOnly() ? trueStr: falseStr,
|
||||
vol.isSkip() ? trueStr : falseStr,
|
||||
vol.isTransient() ? trueStr : falseStr));
|
||||
}
|
||||
}
|
||||
|
||||
Collections.sort(volumeList);
|
||||
result.appendln(
|
||||
StringUtils.join(volumeList.toArray(), System.lineSeparator()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Prints the help message.
|
||||
*/
|
||||
@Override
|
||||
public void printHelp() {
|
||||
String header = "Report command reports the volume information of a given" +
|
||||
" datanode, or prints out the list of nodes that will benefit from " +
|
||||
"running disk balancer. Top defaults to " + getDefaultTop();
|
||||
String header = "Report command reports the volume information of given" +
|
||||
" datanode(s), or prints out the list of nodes that will benefit " +
|
||||
"from running disk balancer. Top defaults to " + getDefaultTop();
|
||||
String footer = ". E.g.:\n"
|
||||
+ "hdfs diskbalancer -report\n"
|
||||
+ "hdfs diskbalancer -report -top 5\n"
|
||||
+ "hdfs diskbalancer -report "
|
||||
+ "-node {DataNodeID | IP | Hostname}";
|
||||
+ "-node [<DataNodeID|IP|Hostname>,...]";
|
||||
|
||||
HelpFormatter helpFormatter = new HelpFormatter();
|
||||
helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
|
||||
"-report [options]",
|
||||
header, DiskBalancer.getReportOptions(), footer);
|
||||
header, DiskBalancerCLI.getReportOptions(), footer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -168,6 +168,7 @@ import org.apache.hadoop.ipc.RetryCache.CacheEntry;
|
|||
import org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.StandbyException;
|
||||
import org.apache.hadoop.ipc.WritableRpcEngine;
|
||||
import org.apache.hadoop.ipc.RefreshRegistry;
|
||||
import org.apache.hadoop.ipc.RefreshResponse;
|
||||
import org.apache.hadoop.net.Node;
|
||||
|
@ -316,6 +317,8 @@ public class NameNodeRpcServer implements NamenodeProtocols {
|
|||
new TraceAdminProtocolServerSideTranslatorPB(this);
|
||||
BlockingService traceAdminService = TraceAdminService
|
||||
.newReflectiveBlockingService(traceAdminXlator);
|
||||
|
||||
WritableRpcEngine.ensureInitialized();
|
||||
|
||||
InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf);
|
||||
if (serviceRpcAddr != null) {
|
||||
|
|
|
@ -50,7 +50,7 @@ import java.io.PrintStream;
|
|||
* At very high level diskbalancer computes a set of moves that will make disk
|
||||
* utilization equal and then those moves are executed by the datanode.
|
||||
*/
|
||||
public class DiskBalancer extends Configured implements Tool {
|
||||
public class DiskBalancerCLI extends Configured implements Tool {
|
||||
/**
|
||||
* Computes a plan for a given set of nodes.
|
||||
*/
|
||||
|
@ -126,7 +126,7 @@ public class DiskBalancer extends Configured implements Tool {
|
|||
*/
|
||||
public static final String PLAN_TEMPLATE = "%s.plan.json";
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(DiskBalancer.class);
|
||||
LoggerFactory.getLogger(DiskBalancerCLI.class);
|
||||
|
||||
private static final Options PLAN_OPTIONS = new Options();
|
||||
private static final Options EXECUTE_OPTIONS = new Options();
|
||||
|
@ -140,7 +140,7 @@ public class DiskBalancer extends Configured implements Tool {
|
|||
*
|
||||
* @param conf
|
||||
*/
|
||||
public DiskBalancer(Configuration conf) {
|
||||
public DiskBalancerCLI(Configuration conf) {
|
||||
super(conf);
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ public class DiskBalancer extends Configured implements Tool {
|
|||
* @throws Exception
|
||||
*/
|
||||
public static void main(String[] argv) throws Exception {
|
||||
DiskBalancer shell = new DiskBalancer(new HdfsConfiguration());
|
||||
DiskBalancerCLI shell = new DiskBalancerCLI(new HdfsConfiguration());
|
||||
int res = 0;
|
||||
try {
|
||||
res = ToolRunner.run(shell, argv);
|
||||
|
@ -446,27 +446,27 @@ public class DiskBalancer extends Configured implements Tool {
|
|||
private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
|
||||
throws Exception {
|
||||
Command currentCommand = null;
|
||||
if (cmd.hasOption(DiskBalancer.PLAN)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.PLAN)) {
|
||||
currentCommand = new PlanCommand(getConf());
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.EXECUTE)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.EXECUTE)) {
|
||||
currentCommand = new ExecuteCommand(getConf());
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.QUERY)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.QUERY)) {
|
||||
currentCommand = new QueryCommand(getConf());
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.CANCEL)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.CANCEL)) {
|
||||
currentCommand = new CancelCommand(getConf());
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.REPORT)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.REPORT)) {
|
||||
currentCommand = new ReportCommand(getConf(), out);
|
||||
}
|
||||
|
||||
if (cmd.hasOption(DiskBalancer.HELP)) {
|
||||
if (cmd.hasOption(DiskBalancerCLI.HELP)) {
|
||||
currentCommand = new HelpCommand(getConf());
|
||||
}
|
||||
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSImageUtil;
|
|||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.FileSummary;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection;
|
||||
import org.apache.hadoop.util.LimitInputStream;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
|
@ -75,11 +76,14 @@ final class FileDistributionCalculator {
|
|||
private long totalSpace;
|
||||
private long maxFileSize;
|
||||
|
||||
private boolean formatOutput = false;
|
||||
|
||||
FileDistributionCalculator(Configuration conf, long maxSize, int steps,
|
||||
PrintStream out) {
|
||||
boolean formatOutput, PrintStream out) {
|
||||
this.conf = conf;
|
||||
this.maxSize = maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize;
|
||||
this.steps = steps == 0 ? INTERVAL_DEFAULT : steps;
|
||||
this.formatOutput = formatOutput;
|
||||
this.out = out;
|
||||
long numIntervals = this.maxSize / this.steps;
|
||||
// avoid OutOfMemoryError when allocating an array
|
||||
|
@ -148,10 +152,20 @@ final class FileDistributionCalculator {
|
|||
|
||||
private void output() {
|
||||
// write the distribution into the output file
|
||||
out.print("Size\tNumFiles\n");
|
||||
out.print((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
|
||||
for (int i = 0; i < distribution.length; i++) {
|
||||
if (distribution[i] != 0) {
|
||||
out.print(((long) i * steps) + "\t" + distribution[i]);
|
||||
if (formatOutput) {
|
||||
out.print((i == 0 ? "[" : "(")
|
||||
+ StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * steps))
|
||||
+ ", "
|
||||
+ StringUtils.byteDesc((long)
|
||||
(i == distribution.length - 1 ? maxFileSize : i * steps))
|
||||
+ "]\t" + distribution[i]);
|
||||
} else {
|
||||
out.print(((long) i * steps) + "\t" + distribution[i]);
|
||||
}
|
||||
|
||||
out.print('\n');
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.tools.offlineImageViewer;
|
|||
import java.io.IOException;
|
||||
import java.util.LinkedList;
|
||||
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
/**
|
||||
* File size distribution visitor.
|
||||
*
|
||||
|
@ -67,6 +69,7 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
|
|||
private FileContext current;
|
||||
|
||||
private boolean inInode = false;
|
||||
private boolean formatOutput = false;
|
||||
|
||||
/**
|
||||
* File or directory information.
|
||||
|
@ -78,12 +81,12 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
|
|||
int replication;
|
||||
}
|
||||
|
||||
public FileDistributionVisitor(String filename,
|
||||
long maxSize,
|
||||
int step) throws IOException {
|
||||
public FileDistributionVisitor(String filename, long maxSize, int step,
|
||||
boolean formatOutput) throws IOException {
|
||||
super(filename, false);
|
||||
this.maxSize = (maxSize == 0 ? MAX_SIZE_DEFAULT : maxSize);
|
||||
this.step = (step == 0 ? INTERVAL_DEFAULT : step);
|
||||
this.formatOutput = formatOutput;
|
||||
long numIntervals = this.maxSize / this.step;
|
||||
if(numIntervals >= Integer.MAX_VALUE)
|
||||
throw new IOException("Too many distribution intervals " + numIntervals);
|
||||
|
@ -113,9 +116,22 @@ class FileDistributionVisitor extends TextWriterImageVisitor {
|
|||
|
||||
private void output() throws IOException {
|
||||
// write the distribution into the output file
|
||||
write("Size\tNumFiles\n");
|
||||
for(int i = 0; i < distribution.length; i++)
|
||||
write(((long)i * step) + "\t" + distribution[i] + "\n");
|
||||
write((formatOutput ? "Size Range" : "Size") + "\tNumFiles\n");
|
||||
for (int i = 0; i < distribution.length; i++) {
|
||||
if (distribution[i] > 0) {
|
||||
if (formatOutput) {
|
||||
write((i == 0 ? "[" : "(")
|
||||
+ StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * step))
|
||||
+ ", "
|
||||
+ StringUtils.byteDesc((long)
|
||||
(i == distribution.length - 1 ? maxFileSize : i * step))
|
||||
+ "]\t"
|
||||
+ distribution[i] + "\n");
|
||||
} else {
|
||||
write(((long) i * step) + "\t" + distribution[i] + "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
System.out.println("totalFiles = " + totalFiles);
|
||||
System.out.println("totalDirectories = " + totalDirectories);
|
||||
System.out.println("totalBlocks = " + totalBlocks);
|
||||
|
|
|
@ -46,61 +46,63 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.PositionTrackingIn
|
|||
public class OfflineImageViewer {
|
||||
public static final Log LOG = LogFactory.getLog(OfflineImageViewer.class);
|
||||
|
||||
private final static String usage =
|
||||
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n" +
|
||||
"Offline Image Viewer\n" +
|
||||
"View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n" +
|
||||
"saving the results in OUTPUTFILE.\n" +
|
||||
"\n" +
|
||||
"The oiv utility will attempt to parse correctly formed image files\n" +
|
||||
"and will abort fail with mal-formed image files.\n" +
|
||||
"\n" +
|
||||
"The tool works offline and does not require a running cluster in\n" +
|
||||
"order to process an image file.\n" +
|
||||
"\n" +
|
||||
"The following image processors are available:\n" +
|
||||
" * Ls: The default image processor generates an lsr-style listing\n" +
|
||||
" of the files in the namespace, with the same fields in the same\n" +
|
||||
" order. Note that in order to correctly determine file sizes,\n" +
|
||||
" this formatter cannot skip blocks and will override the\n" +
|
||||
" -skipBlocks option.\n" +
|
||||
" * Indented: This processor enumerates over all of the elements in\n" +
|
||||
" the fsimage file, using levels of indentation to delineate\n" +
|
||||
" sections within the file.\n" +
|
||||
" * Delimited: Generate a text file with all of the elements common\n" +
|
||||
" to both inodes and inodes-under-construction, separated by a\n" +
|
||||
" delimiter. The default delimiter is \u0001, though this may be\n" +
|
||||
" changed via the -delimiter argument. This processor also overrides\n" +
|
||||
" the -skipBlocks option for the same reason as the Ls processor\n" +
|
||||
" * XML: This processor creates an XML document with all elements of\n" +
|
||||
" the fsimage enumerated, suitable for further analysis by XML\n" +
|
||||
" tools.\n" +
|
||||
" * FileDistribution: This processor analyzes the file size\n" +
|
||||
" distribution in the image.\n" +
|
||||
" -maxSize specifies the range [0, maxSize] of file sizes to be\n" +
|
||||
" analyzed (128GB by default).\n" +
|
||||
" -step defines the granularity of the distribution. (2MB by default)\n" +
|
||||
" * NameDistribution: This processor analyzes the file names\n" +
|
||||
" in the image and prints total number of file names and how frequently\n" +
|
||||
" file names are reused.\n" +
|
||||
"\n" +
|
||||
"Required command line arguments:\n" +
|
||||
"-i,--inputFile <arg> FSImage file to process.\n" +
|
||||
"-o,--outputFile <arg> Name of output file. If the specified\n" +
|
||||
" file exists, it will be overwritten.\n" +
|
||||
"\n" +
|
||||
"Optional command line arguments:\n" +
|
||||
"-p,--processor <arg> Select which type of processor to apply\n" +
|
||||
" against image file." +
|
||||
" (Ls|XML|Delimited|Indented|FileDistribution).\n" +
|
||||
"-h,--help Display usage information and exit\n" +
|
||||
"-printToScreen For processors that write to a file, also\n" +
|
||||
" output to screen. On large image files this\n" +
|
||||
" will dramatically increase processing time.\n" +
|
||||
"-skipBlocks Skip inodes' blocks information. May\n" +
|
||||
" significantly decrease output.\n" +
|
||||
" (default = false).\n" +
|
||||
"-delimiter <arg> Delimiting string to use with Delimited processor\n";
|
||||
private final static String usage =
|
||||
"Usage: bin/hdfs oiv_legacy [OPTIONS] -i INPUTFILE -o OUTPUTFILE\n"
|
||||
+ "Offline Image Viewer\n"
|
||||
+ "View a Hadoop fsimage INPUTFILE using the specified PROCESSOR,\n"
|
||||
+ "saving the results in OUTPUTFILE.\n"
|
||||
+ "\n"
|
||||
+ "The oiv utility will attempt to parse correctly formed image files\n"
|
||||
+ "and will abort fail with mal-formed image files.\n"
|
||||
+ "\n"
|
||||
+ "The tool works offline and does not require a running cluster in\n"
|
||||
+ "order to process an image file.\n"
|
||||
+ "\n"
|
||||
+ "The following image processors are available:\n"
|
||||
+ " * Ls: The default image processor generates an lsr-style listing\n"
|
||||
+ " of the files in the namespace, with the same fields in the same\n"
|
||||
+ " order. Note that in order to correctly determine file sizes,\n"
|
||||
+ " this formatter cannot skip blocks and will override the\n"
|
||||
+ " -skipBlocks option.\n"
|
||||
+ " * Indented: This processor enumerates over all of the elements in\n"
|
||||
+ " the fsimage file, using levels of indentation to delineate\n"
|
||||
+ " sections within the file.\n"
|
||||
+ " * Delimited: Generate a text file with all of the elements common\n"
|
||||
+ " to both inodes and inodes-under-construction, separated by a\n"
|
||||
+ " delimiter. The default delimiter is \u0001, though this may be\n"
|
||||
+ " changed via the -delimiter argument. This processor also overrides\n"
|
||||
+ " the -skipBlocks option for the same reason as the Ls processor\n"
|
||||
+ " * XML: This processor creates an XML document with all elements of\n"
|
||||
+ " the fsimage enumerated, suitable for further analysis by XML\n"
|
||||
+ " tools.\n"
|
||||
+ " * FileDistribution: This processor analyzes the file size\n"
|
||||
+ " distribution in the image.\n"
|
||||
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
|
||||
+ " analyzed (128GB by default).\n"
|
||||
+ " -step defines the granularity of the distribution. (2MB by default)\n"
|
||||
+ " -format formats the output result in a human-readable fashion\n"
|
||||
+ " rather than a number of bytes. (false by default)\n"
|
||||
+ " * NameDistribution: This processor analyzes the file names\n"
|
||||
+ " in the image and prints total number of file names and how frequently\n"
|
||||
+ " file names are reused.\n"
|
||||
+ "\n"
|
||||
+ "Required command line arguments:\n"
|
||||
+ "-i,--inputFile <arg> FSImage file to process.\n"
|
||||
+ "-o,--outputFile <arg> Name of output file. If the specified\n"
|
||||
+ " file exists, it will be overwritten.\n"
|
||||
+ "\n"
|
||||
+ "Optional command line arguments:\n"
|
||||
+ "-p,--processor <arg> Select which type of processor to apply\n"
|
||||
+ " against image file."
|
||||
+ " (Ls|XML|Delimited|Indented|FileDistribution).\n"
|
||||
+ "-h,--help Display usage information and exit\n"
|
||||
+ "-printToScreen For processors that write to a file, also\n"
|
||||
+ " output to screen. On large image files this\n"
|
||||
+ " will dramatically increase processing time.\n"
|
||||
+ "-skipBlocks Skip inodes' blocks information. May\n"
|
||||
+ " significantly decrease output.\n"
|
||||
+ " (default = false).\n"
|
||||
+ "-delimiter <arg> Delimiting string to use with Delimited processor\n";
|
||||
|
||||
private final boolean skipBlocks;
|
||||
private final String inputFile;
|
||||
|
@ -188,6 +190,7 @@ public class OfflineImageViewer {
|
|||
options.addOption("h", "help", false, "");
|
||||
options.addOption("maxSize", true, "");
|
||||
options.addOption("step", true, "");
|
||||
options.addOption("format", false, "");
|
||||
options.addOption("skipBlocks", false, "");
|
||||
options.addOption("printToScreen", false, "");
|
||||
options.addOption("delimiter", true, "");
|
||||
|
@ -253,7 +256,8 @@ public class OfflineImageViewer {
|
|||
} else if (processor.equals("FileDistribution")) {
|
||||
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
||||
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
||||
v = new FileDistributionVisitor(outputFile, maxSize, step);
|
||||
boolean formatOutput = cmd.hasOption("format");
|
||||
v = new FileDistributionVisitor(outputFile, maxSize, step, formatOutput);
|
||||
} else if (processor.equals("NameDistribution")) {
|
||||
v = new NameDistributionVisitor(outputFile, printToScreen);
|
||||
} else {
|
||||
|
|
|
@ -67,6 +67,8 @@ public class OfflineImageViewerPB {
|
|||
+ " -maxSize specifies the range [0, maxSize] of file sizes to be\n"
|
||||
+ " analyzed (128GB by default).\n"
|
||||
+ " -step defines the granularity of the distribution. (2MB by default)\n"
|
||||
+ " -format formats the output result in a human-readable fashion\n"
|
||||
+ " rather than a number of bytes. (false by default)\n"
|
||||
+ " * Web: Run a viewer to expose read-only WebHDFS API.\n"
|
||||
+ " -addr specifies the address to listen. (localhost:5978 by default)\n"
|
||||
+ " * Delimited (experimental): Generate a text file with all of the elements common\n"
|
||||
|
@ -111,6 +113,7 @@ public class OfflineImageViewerPB {
|
|||
options.addOption("h", "help", false, "");
|
||||
options.addOption("maxSize", true, "");
|
||||
options.addOption("step", true, "");
|
||||
options.addOption("format", false, "");
|
||||
options.addOption("addr", true, "");
|
||||
options.addOption("delimiter", true, "");
|
||||
options.addOption("t", "temp", true, "");
|
||||
|
@ -172,43 +175,44 @@ public class OfflineImageViewerPB {
|
|||
try (PrintStream out = outputFile.equals("-") ?
|
||||
System.out : new PrintStream(outputFile, "UTF-8")) {
|
||||
switch (processor) {
|
||||
case "FileDistribution":
|
||||
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
||||
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
||||
new FileDistributionCalculator(conf, maxSize, step, out).visit(
|
||||
new RandomAccessFile(inputFile, "r"));
|
||||
break;
|
||||
case "XML":
|
||||
new PBImageXmlWriter(conf, out).visit(
|
||||
new RandomAccessFile(inputFile, "r"));
|
||||
break;
|
||||
case "ReverseXML":
|
||||
try {
|
||||
OfflineImageReconstructor.run(inputFile, outputFile);
|
||||
} catch (Exception e) {
|
||||
System.err.println("OfflineImageReconstructor failed: " +
|
||||
e.getMessage());
|
||||
e.printStackTrace(System.err);
|
||||
System.exit(1);
|
||||
}
|
||||
break;
|
||||
case "Web":
|
||||
String addr = cmd.getOptionValue("addr", "localhost:5978");
|
||||
try (WebImageViewer viewer = new WebImageViewer(
|
||||
NetUtils.createSocketAddr(addr))) {
|
||||
viewer.start(inputFile);
|
||||
}
|
||||
break;
|
||||
case "Delimited":
|
||||
try (PBImageDelimitedTextWriter writer =
|
||||
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
|
||||
writer.visit(new RandomAccessFile(inputFile, "r"));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
System.err.println("Invalid processor specified : " + processor);
|
||||
printUsage();
|
||||
return -1;
|
||||
case "FileDistribution":
|
||||
long maxSize = Long.parseLong(cmd.getOptionValue("maxSize", "0"));
|
||||
int step = Integer.parseInt(cmd.getOptionValue("step", "0"));
|
||||
boolean formatOutput = cmd.hasOption("format");
|
||||
new FileDistributionCalculator(conf, maxSize, step, formatOutput, out)
|
||||
.visit(new RandomAccessFile(inputFile, "r"));
|
||||
break;
|
||||
case "XML":
|
||||
new PBImageXmlWriter(conf, out).visit(new RandomAccessFile(inputFile,
|
||||
"r"));
|
||||
break;
|
||||
case "ReverseXML":
|
||||
try {
|
||||
OfflineImageReconstructor.run(inputFile, outputFile);
|
||||
} catch (Exception e) {
|
||||
System.err.println("OfflineImageReconstructor failed: "
|
||||
+ e.getMessage());
|
||||
e.printStackTrace(System.err);
|
||||
System.exit(1);
|
||||
}
|
||||
break;
|
||||
case "Web":
|
||||
String addr = cmd.getOptionValue("addr", "localhost:5978");
|
||||
try (WebImageViewer viewer =
|
||||
new WebImageViewer(NetUtils.createSocketAddr(addr))) {
|
||||
viewer.start(inputFile);
|
||||
}
|
||||
break;
|
||||
case "Delimited":
|
||||
try (PBImageDelimitedTextWriter writer =
|
||||
new PBImageDelimitedTextWriter(out, delimiter, tempPath)) {
|
||||
writer.visit(new RandomAccessFile(inputFile, "r"));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
System.err.println("Invalid processor specified : " + processor);
|
||||
printUsage();
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
} catch (EOFException e) {
|
||||
|
|
|
@ -163,7 +163,7 @@
|
|||
<name>dfs.namenode.http-bind-host</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The actual adress the HTTP server will bind to. If this optional address
|
||||
The actual address the HTTP server will bind to. If this optional address
|
||||
is set, it overrides only the hostname portion of dfs.namenode.http-address.
|
||||
It can also be specified per name node or name service for HA/Federation.
|
||||
This is useful for making the name node HTTP server listen on all
|
||||
|
@ -243,7 +243,7 @@
|
|||
<name>dfs.namenode.https-bind-host</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The actual adress the HTTPS server will bind to. If this optional address
|
||||
The actual address the HTTPS server will bind to. If this optional address
|
||||
is set, it overrides only the hostname portion of dfs.namenode.https-address.
|
||||
It can also be specified per name node or name service for HA/Federation.
|
||||
This is useful for making the name node HTTPS server listen on all
|
||||
|
@ -650,7 +650,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.blockreport.initialDelay</name>
|
||||
<value>0</value>
|
||||
<value>0s</value>
|
||||
<description>
|
||||
Delay for first block report in seconds. Support multiple time unit
|
||||
suffix(case insensitive), as described in dfs.heartbeat.interval.
|
||||
|
@ -694,7 +694,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.datanode.directoryscan.interval</name>
|
||||
<value>21600</value>
|
||||
<value>21600s</value>
|
||||
<description>Interval in seconds for Datanode to scan data directories and
|
||||
reconcile the difference between blocks in memory and on the disk.
|
||||
Support multiple time unit suffix(case insensitive), as described
|
||||
|
@ -732,7 +732,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.heartbeat.interval</name>
|
||||
<value>3</value>
|
||||
<value>3s</value>
|
||||
<description>
|
||||
Determines datanode heartbeat interval in seconds.
|
||||
Can use the following suffix (case insensitive):
|
||||
|
@ -942,7 +942,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.namenode.decommission.interval</name>
|
||||
<value>30</value>
|
||||
<value>30s</value>
|
||||
<description>Namenode periodicity in seconds to check if decommission is
|
||||
complete. Support multiple time unit suffix(case insensitive), as described
|
||||
in dfs.heartbeat.interval.
|
||||
|
@ -973,7 +973,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.namenode.replication.interval</name>
|
||||
<value>3</value>
|
||||
<value>3s</value>
|
||||
<description>The periodicity in seconds with which the namenode computes
|
||||
replication work for datanodes. Support multiple time unit suffix(case insensitive),
|
||||
as described in dfs.heartbeat.interval.
|
||||
|
@ -1071,7 +1071,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.period</name>
|
||||
<value>3600</value>
|
||||
<value>3600s</value>
|
||||
<description>
|
||||
The number of seconds between two periodic checkpoints.
|
||||
Support multiple time unit suffix(case insensitive), as described
|
||||
|
@ -1090,7 +1090,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.namenode.checkpoint.check.period</name>
|
||||
<value>60</value>
|
||||
<value>60s</value>
|
||||
<description>The SecondaryNameNode and CheckpointNode will poll the NameNode
|
||||
every 'dfs.namenode.checkpoint.check.period' seconds to query the number
|
||||
of uncheckpointed transactions. Support multiple time unit suffix(case insensitive),
|
||||
|
@ -1433,7 +1433,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.client.datanode-restart.timeout</name>
|
||||
<value>30</value>
|
||||
<value>30s</value>
|
||||
<description>
|
||||
Expert only. The time to wait, in seconds, from reception of an
|
||||
datanode shutdown notification for quick restart, until declaring
|
||||
|
@ -1502,7 +1502,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.ha.log-roll.period</name>
|
||||
<value>120</value>
|
||||
<value>120s</value>
|
||||
<description>
|
||||
How often, in seconds, the StandbyNode should ask the active to
|
||||
roll edit logs. Since the StandbyNode only reads from finalized
|
||||
|
@ -1516,7 +1516,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.ha.tail-edits.period</name>
|
||||
<value>60</value>
|
||||
<value>60s</value>
|
||||
<description>
|
||||
How often, in seconds, the StandbyNode should check for new
|
||||
finalized log segments in the shared edits log.
|
||||
|
@ -2950,7 +2950,7 @@
|
|||
|
||||
<property>
|
||||
<name>dfs.datanode.bp-ready.timeout</name>
|
||||
<value>20</value>
|
||||
<value>20s</value>
|
||||
<description>
|
||||
The maximum wait time for datanode to be ready before failing the
|
||||
received request. Setting this to 0 fails requests right away if the
|
||||
|
@ -4273,4 +4273,12 @@
|
|||
a plan.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.lock.suppress.warning.interval</name>
|
||||
<value>10s</value>
|
||||
<description>Instrumentation reporting long critical sections will suppress
|
||||
consecutive warnings within this interval.</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
||||
|
|
|
@ -89,7 +89,7 @@ Note 2: For the erasure coded files with striping layout, the suitable storage p
|
|||
|
||||
When a file or directory is created, its storage policy is *unspecified*. The storage policy can be specified using the "[`storagepolicies -setStoragePolicy`](#Set_Storage_Policy)" command. The effective storage policy of a file or directory is resolved by the following rules.
|
||||
|
||||
1. If the file or directory is specificed with a storage policy, return it.
|
||||
1. If the file or directory is specified with a storage policy, return it.
|
||||
|
||||
2. For an unspecified file or directory, if it is the root directory, return the *default storage policy*. Otherwise, return its parent's effective storage policy.
|
||||
|
||||
|
|
|
@ -239,6 +239,7 @@ Usage: `hdfs oiv [OPTIONS] -i INPUT_FILE`
|
|||
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
|
||||
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
|
||||
| `-t`,`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
|
||||
| `-h`,`--help` | Display the tool usage and help information and exit. |
|
||||
|
@ -260,6 +261,9 @@ Usage: `hdfs oiv_legacy [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE`
|
|||
| COMMAND\_OPTION | Description |
|
||||
|:---- |:---- |
|
||||
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
|
||||
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
|
||||
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
|
||||
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |
|
||||
|
|
|
@ -102,9 +102,9 @@ or
|
|||
Plan ID can be read from datanode using query command.
|
||||
|
||||
### Report
|
||||
Report command provides detailed report about a node.
|
||||
Report command provides detailed report about node(s).
|
||||
|
||||
`hdfs diskbalancer -fs http://namenode.uri -report -node {DataNodeID | IP | Hostname}`
|
||||
`hdfs diskbalancer -fs http://namenode.uri -report -node [<DataNodeID|IP|Hostname>,...]`
|
||||
|
||||
|
||||
Settings
|
||||
|
|
|
@ -87,7 +87,7 @@ In order to deploy an HA cluster, you should prepare the following:
|
|||
|
||||
* **NameNode machines** - the machines on which you run the Active and Standby NameNodes should have equivalent hardware to each other, and equivalent hardware to what would be used in a non-HA cluster.
|
||||
|
||||
* **Shared storage** - you will need to have a shared directory which the NameNode machines have read/write access to. Typically this is a remote filer which supports NFS and is mounted on each of the NameNode machines. Currently only a single shared edits directory is supported. Thus, the availability of the system is limited by the availability of this shared edits directory, and therefore in order to remove all single points of failure there needs to be redundancy for the shared edits directory. Specifically, multiple network paths to the storage, and redundancy in the storage itself (disk, network, and power). Beacuse of this, it is recommended that the shared storage server be a high-quality dedicated NAS appliance rather than a simple Linux server.
|
||||
* **Shared storage** - you will need to have a shared directory which the NameNode machines have read/write access to. Typically this is a remote filer which supports NFS and is mounted on each of the NameNode machines. Currently only a single shared edits directory is supported. Thus, the availability of the system is limited by the availability of this shared edits directory, and therefore in order to remove all single points of failure there needs to be redundancy for the shared edits directory. Specifically, multiple network paths to the storage, and redundancy in the storage itself (disk, network, and power). Because of this, it is recommended that the shared storage server be a high-quality dedicated NAS appliance rather than a simple Linux server.
|
||||
|
||||
Note that, in an HA cluster, the Standby NameNodes also perform checkpoints of the namespace state, and thus it is not necessary to run a Secondary NameNode, CheckpointNode, or BackupNode in an HA cluster. In fact, to do so would be an error. This also allows one who is reconfiguring a non-HA-enabled HDFS cluster to be HA-enabled to reuse the hardware which they had previously dedicated to the Secondary NameNode.
|
||||
|
||||
|
@ -137,7 +137,7 @@ The order in which you set these configurations is unimportant, but the values y
|
|||
* **dfs.namenode.rpc-address.[nameservice ID].[name node ID]** - the fully-qualified RPC address for each NameNode to listen on
|
||||
|
||||
For both of the previously-configured NameNode IDs, set the full address and
|
||||
IPC port of the NameNode processs. Note that this results in two separate
|
||||
IPC port of the NameNode process. Note that this results in two separate
|
||||
configuration options. For example:
|
||||
|
||||
<property>
|
||||
|
|
|
@ -50,10 +50,13 @@ The Offline Image Viewer provides several output processors:
|
|||
..., s[n-1], maxSize], and the processor calculates how many files
|
||||
in the system fall into each segment [s[i-1], s[i]). Note that
|
||||
files larger than maxSize always fall into the very last segment.
|
||||
The output file is formatted as a tab separated two column table:
|
||||
Size and NumFiles. Where Size represents the start of the segment,
|
||||
By default, the output file is formatted as a tab separated two column
|
||||
table: Size and NumFiles. Where Size represents the start of the segment,
|
||||
and numFiles is the number of files form the image which size falls
|
||||
in this segment.
|
||||
in this segment. By specifying the option -format, the output file will be
|
||||
formatted in a human-readable fashion rather than a number of bytes that
|
||||
showed in Size column. In addition, the Size column will be changed to the
|
||||
Size Range column.
|
||||
|
||||
4. Delimited (experimental): Generate a text file with all of the elements
|
||||
common to both inodes and inodes-under-construction, separated by a
|
||||
|
@ -150,6 +153,7 @@ Options
|
|||
| `-addr` *address* | Specify the address(host:port) to listen. (localhost:5978 by default). This option is used with Web processor. |
|
||||
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||
| `-delimiter` *arg* | Delimiting string to use with Delimited processor. |
|
||||
| `-t`\|`--temp` *temporary dir* | Use temporary dir to cache intermediate result to generate Delimited outputs. If not set, Delimited processor constructs the namespace in memory before outputting text. |
|
||||
| `-h`\|`--help` | Display the tool usage and help information and exit. |
|
||||
|
@ -181,6 +185,9 @@ Due to the internal layout changes introduced by the ProtocolBuffer-based fsimag
|
|||
| `-i`\|`--inputFile` *input file* | Specify the input fsimage file to process. Required. |
|
||||
| `-o`\|`--outputFile` *output file* | Specify the output filename, if the specified output processor generates one. If the specified file already exists, it is silently overwritten. Required. |
|
||||
| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
|
||||
| `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
|
||||
| `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
|
||||
| `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |
|
||||
| `-skipBlocks` | Do not enumerate individual blocks within files. This may save processing time and outfile file space on namespaces with very large files. The Ls processor reads the blocks to correctly determine file sizes and ignores this option. |
|
||||
| `-printToScreen` | Pipe output of processor to console as well as specified file. On extremely large namespaces, this may increase processing time by an order of magnitude. |
|
||||
| `-delimiter` *arg* | When used in conjunction with the Delimited processor, replaces the default tab delimiter with the string specified by *arg*. |
|
||||
|
|
|
@ -86,7 +86,7 @@ The solution is to have separate setting for server endpoints to force binding t
|
|||
<name>dfs.namenode.http-bind-host</name>
|
||||
<value>0.0.0.0</value>
|
||||
<description>
|
||||
The actual adress the HTTP server will bind to. If this optional address
|
||||
The actual address the HTTP server will bind to. If this optional address
|
||||
is set, it overrides only the hostname portion of dfs.namenode.http-address.
|
||||
It can also be specified per name node or name service for HA/Federation.
|
||||
This is useful for making the name node HTTP server listen on all
|
||||
|
@ -98,7 +98,7 @@ The solution is to have separate setting for server endpoints to force binding t
|
|||
<name>dfs.namenode.https-bind-host</name>
|
||||
<value>0.0.0.0</value>
|
||||
<description>
|
||||
The actual adress the HTTPS server will bind to. If this optional address
|
||||
The actual address the HTTPS server will bind to. If this optional address
|
||||
is set, it overrides only the hostname portion of dfs.namenode.https-address.
|
||||
It can also be specified per name node or name service for HA/Federation.
|
||||
This is useful for making the name node HTTPS server listen on all
|
||||
|
|
|
@ -148,7 +148,7 @@ It's strongly recommended for the users to update a few configuration properties
|
|||
characters. The machine name format can be a single host, a "*", a Java regular expression, or an IPv4 address. The access
|
||||
privilege uses rw or ro to specify read/write or read-only access of the machines to exports. If the access privilege is not provided, the default is read-only. Entries are separated by ";".
|
||||
For example: "192.168.0.0/22 rw ; \\\\w\*\\\\.example\\\\.com ; host1.test.org ro;". Only the NFS gateway needs to restart after
|
||||
this property is updated. Note that, here Java regular expression is differnt with the regrulation expression used in
|
||||
this property is updated. Note that, here Java regular expression is different with the regulation expression used in
|
||||
Linux NFS export table, such as, using "\\\\w\*\\\\.example\\\\.com" instead of "\*.example.com", "192\\\\.168\\\\.0\\\\.(11|22)"
|
||||
instead of "192.168.0.[11|22]" and so on.
|
||||
|
||||
|
@ -183,7 +183,7 @@ It's strongly recommended for the users to update a few configuration properties
|
|||
</property>
|
||||
|
||||
* JVM and log settings. You can export JVM settings (e.g., heap size and GC log) in
|
||||
HADOOP\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
|
||||
HDFS\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
|
||||
To get NFS debug trace, you can edit the log4j.property file
|
||||
to add the following. Note, debug trace, especially for ONCRPC, can be very verbose.
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ Hence on Cluster X, where the `core-site.xml` is set to make the default fs to u
|
|||
|
||||
### Pathname Usage Best Practices
|
||||
|
||||
When one is within a cluster, it is recommended to use the pathname of type (1) above instead of a fully qualified URI like (2). Futher, applications should not use the knowledge of the mount points and use a path like `hdfs://namenodeContainingUserDirs:port/joe/foo/bar` to refer to a file in a particular namenode. One should use `/user/joe/foo/bar` instead.
|
||||
When one is within a cluster, it is recommended to use the pathname of type (1) above instead of a fully qualified URI like (2). Further, applications should not use the knowledge of the mount points and use a path like `hdfs://namenodeContainingUserDirs:port/joe/foo/bar` to refer to a file in a particular namenode. One should use `/user/joe/foo/bar` instead.
|
||||
|
||||
### Renaming Pathnames Across Namespaces
|
||||
|
||||
|
|
|
@ -57,7 +57,8 @@ import static org.junit.Assert.assertTrue;
|
|||
|
||||
public class TestDFSStripedInputStream {
|
||||
|
||||
public static final Log LOG = LogFactory.getLog(TestDFSStripedInputStream.class);
|
||||
public static final Log LOG =
|
||||
LogFactory.getLog(TestDFSStripedInputStream.class);
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private Configuration conf = new Configuration();
|
||||
|
@ -272,12 +273,16 @@ public class TestDFSStripedInputStream {
|
|||
// |10 |
|
||||
done += in.read(0, readBuffer, 0, delta);
|
||||
assertEquals(delta, done);
|
||||
assertArrayEquals(Arrays.copyOf(expected, done),
|
||||
Arrays.copyOf(readBuffer, done));
|
||||
// both head and trail cells are partial
|
||||
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
||||
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
||||
done += in.read(delta, readBuffer, delta,
|
||||
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
||||
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
||||
assertArrayEquals(Arrays.copyOf(expected, done),
|
||||
Arrays.copyOf(readBuffer, done));
|
||||
// read the rest
|
||||
done += in.read(done, readBuffer, done, readSize - done);
|
||||
assertEquals(readSize, done);
|
||||
|
@ -291,8 +296,8 @@ public class TestDFSStripedInputStream {
|
|||
testStatefulRead(true, true);
|
||||
}
|
||||
|
||||
private void testStatefulRead(boolean useByteBuffer, boolean cellMisalignPacket)
|
||||
throws Exception {
|
||||
private void testStatefulRead(boolean useByteBuffer,
|
||||
boolean cellMisalignPacket) throws Exception {
|
||||
final int numBlocks = 2;
|
||||
final int fileSize = numBlocks * BLOCK_GROUP_SIZE;
|
||||
if (cellMisalignPacket) {
|
||||
|
@ -302,7 +307,8 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(filePath.toString(), 0, fileSize);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.
|
||||
getBlockLocations(filePath.toString(), 0, fileSize);
|
||||
|
||||
assert lbs.getLocatedBlocks().size() == numBlocks;
|
||||
for (LocatedBlock lb : lbs.getLocatedBlocks()) {
|
||||
|
@ -360,4 +366,111 @@ public class TestDFSStripedInputStream {
|
|||
}
|
||||
fs.delete(filePath, true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testStatefulReadWithDNFailure() throws Exception {
|
||||
final int numBlocks = 4;
|
||||
final int failedDNIdx = DATA_BLK_NUM - 1;
|
||||
DFSTestUtil.createStripedFile(cluster, filePath, null, numBlocks,
|
||||
NUM_STRIPE_PER_BLOCK, false);
|
||||
LocatedBlocks lbs = fs.getClient().namenode.getBlockLocations(
|
||||
filePath.toString(), 0, BLOCK_GROUP_SIZE);
|
||||
|
||||
assert lbs.get(0) instanceof LocatedStripedBlock;
|
||||
LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
|
||||
for (int i = 0; i < DATA_BLK_NUM + PARITY_BLK_NUM; i++) {
|
||||
Block blk = new Block(bg.getBlock().getBlockId() + i,
|
||||
NUM_STRIPE_PER_BLOCK * CELLSIZE,
|
||||
bg.getBlock().getGenerationStamp());
|
||||
blk.setGenerationStamp(bg.getBlock().getGenerationStamp());
|
||||
cluster.injectBlocks(i, Arrays.asList(blk),
|
||||
bg.getBlock().getBlockPoolId());
|
||||
}
|
||||
DFSStripedInputStream in =
|
||||
new DFSStripedInputStream(fs.getClient(), filePath.toString(), false,
|
||||
ecPolicy, null);
|
||||
int readSize = BLOCK_GROUP_SIZE;
|
||||
byte[] readBuffer = new byte[readSize];
|
||||
byte[] expected = new byte[readSize];
|
||||
/** A variation of {@link DFSTestUtil#fillExpectedBuf} for striped blocks */
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
int posInFile = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE + k;
|
||||
expected[posInFile] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
|
||||
DATA_BLK_NUM, PARITY_BLK_NUM);
|
||||
RawErasureDecoder rawDecoder = CodecUtil.createRawDecoder(conf,
|
||||
ecPolicy.getCodecName(), coderOptions);
|
||||
|
||||
// Update the expected content for decoded data
|
||||
int[] missingBlkIdx = new int[PARITY_BLK_NUM];
|
||||
for (int i = 0; i < missingBlkIdx.length; i++) {
|
||||
if (i == 0) {
|
||||
missingBlkIdx[i] = failedDNIdx;
|
||||
} else {
|
||||
missingBlkIdx[i] = DATA_BLK_NUM + i;
|
||||
}
|
||||
}
|
||||
cluster.stopDataNode(failedDNIdx);
|
||||
for (int i = 0; i < NUM_STRIPE_PER_BLOCK; i++) {
|
||||
byte[][] decodeInputs = new byte[DATA_BLK_NUM + PARITY_BLK_NUM][CELLSIZE];
|
||||
byte[][] decodeOutputs = new byte[missingBlkIdx.length][CELLSIZE];
|
||||
for (int j = 0; j < DATA_BLK_NUM; j++) {
|
||||
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + j * CELLSIZE;
|
||||
if (j != failedDNIdx) {
|
||||
System.arraycopy(expected, posInBuf, decodeInputs[j], 0, CELLSIZE);
|
||||
}
|
||||
}
|
||||
for (int j = DATA_BLK_NUM; j < DATA_BLK_NUM + PARITY_BLK_NUM; j++) {
|
||||
for (int k = 0; k < CELLSIZE; k++) {
|
||||
int posInBlk = i * CELLSIZE + k;
|
||||
decodeInputs[j][k] = SimulatedFSDataset.simulatedByte(
|
||||
new Block(bg.getBlock().getBlockId() + j), posInBlk);
|
||||
}
|
||||
}
|
||||
for (int m : missingBlkIdx) {
|
||||
decodeInputs[m] = null;
|
||||
}
|
||||
rawDecoder.decode(decodeInputs, missingBlkIdx, decodeOutputs);
|
||||
int posInBuf = i * CELLSIZE * DATA_BLK_NUM + failedDNIdx * CELLSIZE;
|
||||
System.arraycopy(decodeOutputs[0], 0, expected, posInBuf, CELLSIZE);
|
||||
}
|
||||
|
||||
int delta = 10;
|
||||
int done = 0;
|
||||
// read a small delta, shouldn't trigger decode
|
||||
// |cell_0 |
|
||||
// |10 |
|
||||
done += in.read(readBuffer, 0, delta);
|
||||
assertEquals(delta, done);
|
||||
// both head and trail cells are partial
|
||||
// |c_0 |c_1 |c_2 |c_3 |c_4 |c_5 |
|
||||
// |256K - 10|missing|256K|256K|256K - 10|not in range|
|
||||
while (done < (CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta)) {
|
||||
int ret = in.read(readBuffer, delta,
|
||||
CELLSIZE * (DATA_BLK_NUM - 1) - 2 * delta);
|
||||
assertTrue(ret > 0);
|
||||
done += ret;
|
||||
}
|
||||
assertEquals(CELLSIZE * (DATA_BLK_NUM - 1) - delta, done);
|
||||
// read the rest
|
||||
|
||||
int restSize;
|
||||
restSize = readSize - done;
|
||||
while (done < restSize) {
|
||||
int ret = in.read(readBuffer, done, restSize);
|
||||
assertTrue(ret > 0);
|
||||
done += ret;
|
||||
}
|
||||
|
||||
assertEquals(readSize, done);
|
||||
assertArrayEquals(expected, readBuffer);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.concurrent.locks.Lock;
|
||||
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.TestName;
|
||||
import static org.mockito.Mockito.*;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* A test class for InstrumentedLock.
|
||||
*/
|
||||
public class TestInstrumentedLock {
|
||||
|
||||
static final Log LOG = LogFactory.getLog(TestInstrumentedLock.class);
|
||||
|
||||
@Rule public TestName name = new TestName();
|
||||
|
||||
/**
|
||||
* Test exclusive access of the lock.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test(timeout=10000)
|
||||
public void testMultipleThread() throws Exception {
|
||||
String testname = name.getMethodName();
|
||||
InstrumentedLock lock = new InstrumentedLock(testname, LOG, 0, 300);
|
||||
lock.lock();
|
||||
try {
|
||||
Thread competingThread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertFalse(lock.tryLock());
|
||||
}
|
||||
};
|
||||
competingThread.start();
|
||||
competingThread.join();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the correctness with try-with-resource syntax.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test(timeout=10000)
|
||||
public void testTryWithResourceSyntax() throws Exception {
|
||||
String testname = name.getMethodName();
|
||||
final AtomicReference<Thread> lockThread = new AtomicReference<>(null);
|
||||
Lock lock = new InstrumentedLock(testname, LOG, 0, 300) {
|
||||
@Override
|
||||
public void lock() {
|
||||
super.lock();
|
||||
lockThread.set(Thread.currentThread());
|
||||
}
|
||||
@Override
|
||||
public void unlock() {
|
||||
super.unlock();
|
||||
lockThread.set(null);
|
||||
}
|
||||
};
|
||||
AutoCloseableLock acl = new AutoCloseableLock(lock);
|
||||
try (AutoCloseable localLock = acl.acquire()) {
|
||||
assertEquals(acl, localLock);
|
||||
Thread competingThread = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
assertNotEquals(Thread.currentThread(), lockThread.get());
|
||||
assertFalse(lock.tryLock());
|
||||
}
|
||||
};
|
||||
competingThread.start();
|
||||
competingThread.join();
|
||||
assertEquals(Thread.currentThread(), lockThread.get());
|
||||
}
|
||||
assertNull(lockThread.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test the lock logs warning when lock held time is greater than threshold
|
||||
* and not log warning otherwise.
|
||||
* @throws Exception
|
||||
*/
|
||||
@Test(timeout=10000)
|
||||
public void testLockLongHoldingReport() throws Exception {
|
||||
String testname = name.getMethodName();
|
||||
final AtomicLong time = new AtomicLong(0);
|
||||
Timer mclock = new Timer() {
|
||||
@Override
|
||||
public long monotonicNow() {
|
||||
return time.get();
|
||||
}
|
||||
};
|
||||
Lock mlock = mock(Lock.class);
|
||||
|
||||
final AtomicLong wlogged = new AtomicLong(0);
|
||||
final AtomicLong wsuppresed = new AtomicLong(0);
|
||||
InstrumentedLock lock = new InstrumentedLock(
|
||||
testname, LOG, mlock, 2000, 300, mclock) {
|
||||
@Override
|
||||
void logWarning(long lockHeldTime, long suppressed) {
|
||||
wlogged.incrementAndGet();
|
||||
wsuppresed.set(suppressed);
|
||||
}
|
||||
};
|
||||
|
||||
// do not log warning when the lock held time is short
|
||||
lock.lock(); // t = 0
|
||||
time.set(200);
|
||||
lock.unlock(); // t = 200
|
||||
assertEquals(0, wlogged.get());
|
||||
assertEquals(0, wsuppresed.get());
|
||||
|
||||
lock.lock(); // t = 200
|
||||
time.set(700);
|
||||
lock.unlock(); // t = 700
|
||||
assertEquals(1, wlogged.get());
|
||||
assertEquals(0, wsuppresed.get());
|
||||
|
||||
// despite the lock held time is greater than threshold
|
||||
// suppress the log warning due to the logging gap
|
||||
// (not recorded in wsuppressed until next log message)
|
||||
lock.lock(); // t = 700
|
||||
time.set(1100);
|
||||
lock.unlock(); // t = 1100
|
||||
assertEquals(1, wlogged.get());
|
||||
assertEquals(0, wsuppresed.get());
|
||||
|
||||
// log a warning message when the lock held time is greater the threshold
|
||||
// and the logging time gap is satisfied. Also should display suppressed
|
||||
// previous warnings.
|
||||
time.set(2400);
|
||||
lock.lock(); // t = 2400
|
||||
time.set(2800);
|
||||
lock.unlock(); // t = 2800
|
||||
assertEquals(2, wlogged.get());
|
||||
assertEquals(1, wsuppresed.get());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs.security;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.ipc.Client;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SaslInputStream;
|
||||
import org.apache.hadoop.security.SaslRpcClient;
|
||||
import org.apache.hadoop.security.SaslRpcServer;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Test;
|
||||
|
||||
/** Unit tests for using Delegation Token over RPC. */
|
||||
public class TestClientProtocolWithDelegationToken {
|
||||
private static final String ADDRESS = "0.0.0.0";
|
||||
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(TestClientProtocolWithDelegationToken.class);
|
||||
|
||||
private static final Configuration conf;
|
||||
static {
|
||||
conf = new Configuration();
|
||||
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
}
|
||||
|
||||
static {
|
||||
GenericTestUtils.setLogLevel(Client.LOG, Level.ALL);
|
||||
GenericTestUtils.setLogLevel(Server.LOG, Level.ALL);
|
||||
GenericTestUtils.setLogLevel(SaslRpcClient.LOG, Level.ALL);
|
||||
GenericTestUtils.setLogLevel(SaslRpcServer.LOG, Level.ALL);
|
||||
GenericTestUtils.setLogLevel(SaslInputStream.LOG, Level.ALL);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDelegationTokenRpc() throws Exception {
|
||||
ClientProtocol mockNN = mock(ClientProtocol.class);
|
||||
FSNamesystem mockNameSys = mock(FSNamesystem.class);
|
||||
|
||||
DelegationTokenSecretManager sm = new DelegationTokenSecretManager(
|
||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
|
||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
|
||||
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
|
||||
3600000, mockNameSys);
|
||||
sm.startThreads();
|
||||
final Server server = new RPC.Builder(conf)
|
||||
.setProtocol(ClientProtocol.class).setInstance(mockNN)
|
||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
||||
.setSecretManager(sm).build();
|
||||
|
||||
server.start();
|
||||
|
||||
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
String user = current.getUserName();
|
||||
Text owner = new Text(user);
|
||||
DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(owner, owner, null);
|
||||
Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
|
||||
dtId, sm);
|
||||
SecurityUtil.setTokenService(token, addr);
|
||||
LOG.info("Service for token is " + token.getService());
|
||||
current.addToken(token);
|
||||
current.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
@Override
|
||||
public Object run() throws Exception {
|
||||
ClientProtocol proxy = null;
|
||||
try {
|
||||
proxy = RPC.getProxy(ClientProtocol.class,
|
||||
ClientProtocol.versionID, addr, conf);
|
||||
proxy.getServerDefaults();
|
||||
} finally {
|
||||
server.stop();
|
||||
if (proxy != null) {
|
||||
RPC.stopProxy(proxy);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
}
|
|
@ -589,20 +589,21 @@ public class TestFsDatasetImpl {
|
|||
// Will write and remove on dn0.
|
||||
final ExtendedBlock eb = new ExtendedBlock(BLOCK_POOL_IDS[0], 0);
|
||||
final CountDownLatch startFinalizeLatch = new CountDownLatch(1);
|
||||
final CountDownLatch brReceivedLatch = new CountDownLatch(1);
|
||||
final CountDownLatch volRemovedLatch = new CountDownLatch(1);
|
||||
final CountDownLatch blockReportReceivedLatch = new CountDownLatch(1);
|
||||
final CountDownLatch volRemoveStartedLatch = new CountDownLatch(1);
|
||||
final CountDownLatch volRemoveCompletedLatch = new CountDownLatch(1);
|
||||
class BlockReportThread extends Thread {
|
||||
public void run() {
|
||||
// Lets wait for the volume remove process to start
|
||||
try {
|
||||
volRemovedLatch.await();
|
||||
volRemoveStartedLatch.await();
|
||||
} catch (Exception e) {
|
||||
LOG.info("Unexpected exception when waiting for vol removal:", e);
|
||||
}
|
||||
LOG.info("Getting block report");
|
||||
dataset.getBlockReports(eb.getBlockPoolId());
|
||||
LOG.info("Successfully received block report");
|
||||
brReceivedLatch.countDown();
|
||||
blockReportReceivedLatch.countDown();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -623,7 +624,7 @@ public class TestFsDatasetImpl {
|
|||
}
|
||||
|
||||
// Lets wait for the other thread finish getting block report
|
||||
brReceivedLatch.await();
|
||||
blockReportReceivedLatch.await();
|
||||
|
||||
dataset.finalizeBlock(eb);
|
||||
LOG.info("FinalizeBlock finished");
|
||||
|
@ -633,34 +634,53 @@ public class TestFsDatasetImpl {
|
|||
}
|
||||
}
|
||||
|
||||
ResponderThread res = new ResponderThread();
|
||||
res.start();
|
||||
startFinalizeLatch.await();
|
||||
|
||||
// Verify if block report can be received
|
||||
// when volume is being removed
|
||||
final BlockReportThread brt = new BlockReportThread();
|
||||
brt.start();
|
||||
|
||||
Set<File> volumesToRemove = new HashSet<>();
|
||||
volumesToRemove.add(
|
||||
StorageLocation.parse(dataset.getVolume(eb).getBasePath()).getFile());
|
||||
/**
|
||||
* TODO: {@link FsDatasetImpl#removeVolumes(Set, boolean)} is throwing
|
||||
* IllegalMonitorStateException when there is a parallel reader/writer
|
||||
* to the volume. Remove below try/catch block after fixing HDFS-10830.
|
||||
*/
|
||||
try {
|
||||
LOG.info("Removing volume " + volumesToRemove);
|
||||
dataset.removeVolumes(volumesToRemove, true);
|
||||
} catch (Exception e) {
|
||||
LOG.info("Unexpected issue while removing volume: ", e);
|
||||
} finally {
|
||||
volRemovedLatch.countDown();
|
||||
class VolRemoveThread extends Thread {
|
||||
public void run() {
|
||||
Set<File> volumesToRemove = new HashSet<>();
|
||||
try {
|
||||
volumesToRemove.add(StorageLocation.parse(
|
||||
dataset.getVolume(eb).getBasePath()).getFile());
|
||||
} catch (Exception e) {
|
||||
LOG.info("Problem preparing volumes to remove: ", e);
|
||||
Assert.fail("Exception in remove volume thread, check log for " +
|
||||
"details.");
|
||||
}
|
||||
LOG.info("Removing volume " + volumesToRemove);
|
||||
dataset.removeVolumes(volumesToRemove, true);
|
||||
volRemoveCompletedLatch.countDown();
|
||||
LOG.info("Removed volume " + volumesToRemove);
|
||||
}
|
||||
}
|
||||
|
||||
LOG.info("Volumes removed");
|
||||
brReceivedLatch.await();
|
||||
// Start the volume write operation
|
||||
ResponderThread responderThread = new ResponderThread();
|
||||
responderThread.start();
|
||||
startFinalizeLatch.await();
|
||||
|
||||
// Start the block report get operation
|
||||
final BlockReportThread blockReportThread = new BlockReportThread();
|
||||
blockReportThread.start();
|
||||
|
||||
// Start the volume remove operation
|
||||
VolRemoveThread volRemoveThread = new VolRemoveThread();
|
||||
volRemoveThread.start();
|
||||
|
||||
// Let volume write and remove operation be
|
||||
// blocked for few seconds
|
||||
Thread.sleep(2000);
|
||||
|
||||
// Signal block report receiver and volume writer
|
||||
// thread to complete their operations so that vol
|
||||
// remove can proceed
|
||||
volRemoveStartedLatch.countDown();
|
||||
|
||||
// Verify if block report can be received
|
||||
// when volume is in use and also being removed
|
||||
blockReportReceivedLatch.await();
|
||||
|
||||
// Verify if volume can be removed safely when there
|
||||
// are read/write operation in-progress
|
||||
volRemoveCompletedLatch.await();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
||||
|
@ -37,19 +36,18 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
|
|||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
|
||||
.DiskBalancerDataNode;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
@ -62,6 +60,7 @@ import static org.junit.Assert.assertTrue;
|
|||
public class TestDiskBalancer {
|
||||
|
||||
private static final String PLAN_FILE = "/system/current.plan.json";
|
||||
static final Logger LOG = LoggerFactory.getLogger(TestDiskBalancer.class);
|
||||
|
||||
@Test
|
||||
public void testDiskBalancerNameNodeConnectivity() throws Exception {
|
||||
|
@ -110,227 +109,77 @@ public class TestDiskBalancer {
|
|||
*/
|
||||
@Test
|
||||
public void testDiskBalancerEndToEnd() throws Exception {
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
final int defaultBlockSize = 100;
|
||||
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
final int numDatanodes = 1;
|
||||
final String fileName = "/tmp.txt";
|
||||
final Path filePath = new Path(fileName);
|
||||
final int blocks = 100;
|
||||
final int blocksSize = 1024;
|
||||
final int fileLen = blocks * blocksSize;
|
||||
final int blockCount = 100;
|
||||
final int blockSize = 1024;
|
||||
final int diskCount = 2;
|
||||
final int dataNodeCount = 1;
|
||||
final int dataNodeIndex = 0;
|
||||
final int sourceDiskIndex = 0;
|
||||
|
||||
|
||||
// Write a file and restart the cluster
|
||||
long[] capacities = new long[]{defaultBlockSize * 2 * fileLen,
|
||||
defaultBlockSize * 2 * fileLen};
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(numDatanodes)
|
||||
.storageCapacities(capacities)
|
||||
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
|
||||
.storagesPerDatanode(2)
|
||||
MiniDFSCluster cluster = new ClusterBuilder()
|
||||
.setBlockCount(blockCount)
|
||||
.setBlockSize(blockSize)
|
||||
.setDiskCount(diskCount)
|
||||
.setNumDatanodes(dataNodeCount)
|
||||
.setConf(conf)
|
||||
.build();
|
||||
FsVolumeImpl source = null;
|
||||
FsVolumeImpl dest = null;
|
||||
try {
|
||||
cluster.waitActive();
|
||||
Random r = new Random();
|
||||
FileSystem fs = cluster.getFileSystem(0);
|
||||
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1,
|
||||
numDatanodes - 1);
|
||||
|
||||
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
|
||||
// Get the data node and move all data to one disk.
|
||||
DataNode dnNode = cluster.getDataNodes().get(numDatanodes - 1);
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
dnNode.getFSDataset().getFsVolumeReferences()) {
|
||||
source = (FsVolumeImpl) refs.get(0);
|
||||
dest = (FsVolumeImpl) refs.get(1);
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
|
||||
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(),
|
||||
source, dest);
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
|
||||
}
|
||||
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
|
||||
// Start up a disk balancer and read the cluster info.
|
||||
final DataNode newDN = cluster.getDataNodes().get(numDatanodes - 1);
|
||||
ClusterConnector nameNodeConnector =
|
||||
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
|
||||
|
||||
DiskBalancerCluster diskBalancerCluster =
|
||||
new DiskBalancerCluster(nameNodeConnector);
|
||||
diskBalancerCluster.readClusterInfo();
|
||||
List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();
|
||||
|
||||
// Rewrite the capacity in the model to show that disks need
|
||||
// re-balancing.
|
||||
setVolumeCapacity(diskBalancerCluster, defaultBlockSize * 2 * fileLen,
|
||||
"DISK");
|
||||
// Pick a node to process.
|
||||
nodesToProcess.add(diskBalancerCluster.getNodeByUUID(dnNode
|
||||
.getDatanodeUuid()));
|
||||
diskBalancerCluster.setNodesToProcess(nodesToProcess);
|
||||
|
||||
// Compute a plan.
|
||||
List<NodePlan> clusterplan = diskBalancerCluster.computePlan(0.0f);
|
||||
|
||||
// Now we must have a plan,since the node is imbalanced and we
|
||||
// asked the disk balancer to create a plan.
|
||||
assertTrue(clusterplan.size() == 1);
|
||||
|
||||
NodePlan plan = clusterplan.get(0);
|
||||
plan.setNodeUUID(dnNode.getDatanodeUuid());
|
||||
plan.setTimeStamp(Time.now());
|
||||
String planJson = plan.toJson();
|
||||
String planID = DigestUtils.shaHex(planJson);
|
||||
assertNotNull(plan.getVolumeSetPlans());
|
||||
assertTrue(plan.getVolumeSetPlans().size() > 0);
|
||||
plan.getVolumeSetPlans().get(0).setTolerancePercent(10);
|
||||
|
||||
// Submit the plan and wait till the execution is done.
|
||||
newDN.submitDiskBalancerPlan(planID, 1, PLAN_FILE, planJson, false);
|
||||
String jmxString = newDN.getDiskBalancerStatus();
|
||||
assertNotNull(jmxString);
|
||||
DiskBalancerWorkStatus status =
|
||||
DiskBalancerWorkStatus.parseJson(jmxString);
|
||||
DiskBalancerWorkStatus realStatus = newDN.queryDiskBalancerPlan();
|
||||
assertEquals(realStatus.getPlanID(), status.getPlanID());
|
||||
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@Override
|
||||
public Boolean get() {
|
||||
try {
|
||||
return newDN.queryDiskBalancerPlan().getResult() ==
|
||||
DiskBalancerWorkStatus.Result.PLAN_DONE;
|
||||
} catch (IOException ex) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}, 1000, 100000);
|
||||
|
||||
|
||||
//verify that it worked.
|
||||
dnNode = cluster.getDataNodes().get(numDatanodes - 1);
|
||||
assertEquals(dnNode.queryDiskBalancerPlan().getResult(),
|
||||
DiskBalancerWorkStatus.Result.PLAN_DONE);
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
dnNode.getFSDataset().getFsVolumeReferences()) {
|
||||
source = (FsVolumeImpl) refs.get(0);
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
|
||||
}
|
||||
|
||||
|
||||
// Tolerance
|
||||
long delta = (plan.getVolumeSetPlans().get(0).getBytesToMove()
|
||||
* 10) / 100;
|
||||
assertTrue(
|
||||
(DiskBalancerTestUtil.getBlockCount(source) *
|
||||
defaultBlockSize + delta) >=
|
||||
plan.getVolumeSetPlans().get(0).getBytesToMove());
|
||||
|
||||
DataMover dataMover = new DataMover(cluster, dataNodeIndex,
|
||||
sourceDiskIndex, conf, blockSize, blockCount);
|
||||
dataMover.moveDataToSourceDisk();
|
||||
NodePlan plan = dataMover.generatePlan();
|
||||
dataMover.executePlan(plan);
|
||||
dataMover.verifyPlanExectionDone();
|
||||
dataMover.verifyAllVolumesHaveData();
|
||||
dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
@Test
|
||||
public void testBalanceDataBetweenMultiplePairsOfVolumes()
|
||||
throws Exception {
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
final int DEFAULT_BLOCK_SIZE = 2048;
|
||||
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
final int NUM_DATANODES = 1;
|
||||
final long CAP = 512 * 1024;
|
||||
final Path testFile = new Path("/testfile");
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(NUM_DATANODES)
|
||||
.storageCapacities(new long[]{CAP, CAP, CAP, CAP})
|
||||
.storagesPerDatanode(4)
|
||||
final int blockCount = 1000;
|
||||
final int blockSize = 1024;
|
||||
|
||||
// create 3 disks, that means we will have 2 plans
|
||||
// Move Data from disk0->disk1 and disk0->disk2.
|
||||
final int diskCount = 3;
|
||||
final int dataNodeCount = 1;
|
||||
final int dataNodeIndex = 0;
|
||||
final int sourceDiskIndex = 0;
|
||||
|
||||
|
||||
MiniDFSCluster cluster = new ClusterBuilder()
|
||||
.setBlockCount(blockCount)
|
||||
.setBlockSize(blockSize)
|
||||
.setDiskCount(diskCount)
|
||||
.setNumDatanodes(dataNodeCount)
|
||||
.setConf(conf)
|
||||
.build();
|
||||
|
||||
|
||||
try {
|
||||
cluster.waitActive();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
TestBalancer.createFile(cluster, testFile, CAP, (short) 1, 0);
|
||||
DataMover dataMover = new DataMover(cluster, dataNodeIndex,
|
||||
sourceDiskIndex, conf, blockSize, blockCount);
|
||||
dataMover.moveDataToSourceDisk();
|
||||
NodePlan plan = dataMover.generatePlan();
|
||||
|
||||
DFSTestUtil.waitReplication(fs, testFile, (short) 1);
|
||||
DataNode dnNode = cluster.getDataNodes().get(0);
|
||||
// Move data out of two volumes to make them empty.
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
dnNode.getFSDataset().getFsVolumeReferences()) {
|
||||
assertEquals(4, refs.size());
|
||||
for (int i = 0; i < refs.size(); i += 2) {
|
||||
FsVolumeImpl source = (FsVolumeImpl) refs.get(i);
|
||||
FsVolumeImpl dest = (FsVolumeImpl) refs.get(i + 1);
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
|
||||
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(),
|
||||
source, dest);
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
|
||||
}
|
||||
}
|
||||
// 3 disks , The plan should move data both disks,
|
||||
// so we must have 2 plan steps.
|
||||
assertEquals(plan.getVolumeSetPlans().size(), 2);
|
||||
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
|
||||
// Start up a disk balancer and read the cluster info.
|
||||
final DataNode dataNode = cluster.getDataNodes().get(0);
|
||||
ClusterConnector nameNodeConnector =
|
||||
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
|
||||
|
||||
DiskBalancerCluster diskBalancerCluster =
|
||||
new DiskBalancerCluster(nameNodeConnector);
|
||||
diskBalancerCluster.readClusterInfo();
|
||||
List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();
|
||||
// Rewrite the capacity in the model to show that disks need
|
||||
// re-balancing.
|
||||
setVolumeCapacity(diskBalancerCluster, CAP, "DISK");
|
||||
nodesToProcess.add(diskBalancerCluster.getNodeByUUID(
|
||||
dataNode.getDatanodeUuid()));
|
||||
diskBalancerCluster.setNodesToProcess(nodesToProcess);
|
||||
|
||||
// Compute a plan.
|
||||
List<NodePlan> clusterPlan = diskBalancerCluster.computePlan(10.0f);
|
||||
|
||||
NodePlan plan = clusterPlan.get(0);
|
||||
assertEquals(2, plan.getVolumeSetPlans().size());
|
||||
plan.setNodeUUID(dnNode.getDatanodeUuid());
|
||||
plan.setTimeStamp(Time.now());
|
||||
String planJson = plan.toJson();
|
||||
String planID = DigestUtils.shaHex(planJson);
|
||||
|
||||
dataNode.submitDiskBalancerPlan(planID, 1, PLAN_FILE, planJson, false);
|
||||
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@Override
|
||||
public Boolean get() {
|
||||
try {
|
||||
return dataNode.queryDiskBalancerPlan().getResult() ==
|
||||
DiskBalancerWorkStatus.Result.PLAN_DONE;
|
||||
} catch (IOException ex) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}, 1000, 100000);
|
||||
assertEquals(dataNode.queryDiskBalancerPlan().getResult(),
|
||||
DiskBalancerWorkStatus.Result.PLAN_DONE);
|
||||
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
dataNode.getFSDataset().getFsVolumeReferences()) {
|
||||
for (FsVolumeSpi vol : refs) {
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(vol) > 0);
|
||||
}
|
||||
}
|
||||
dataMover.executePlan(plan);
|
||||
dataMover.verifyPlanExectionDone();
|
||||
dataMover.verifyAllVolumesHaveData();
|
||||
dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
@ -353,4 +202,293 @@ public class TestDiskBalancer {
|
|||
node.getVolumeSets().get(diskType).computeVolumeDataDensity();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper class that allows us to create different kinds of MiniDFSClusters
|
||||
* and populate data.
|
||||
*/
|
||||
static class ClusterBuilder {
|
||||
private Configuration conf;
|
||||
private int blockSize;
|
||||
private int numDatanodes;
|
||||
private int fileLen;
|
||||
private int blockCount;
|
||||
private int diskCount;
|
||||
|
||||
public ClusterBuilder setConf(Configuration conf) {
|
||||
this.conf = conf;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterBuilder setBlockSize(int blockSize) {
|
||||
this.blockSize = blockSize;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterBuilder setNumDatanodes(int datanodeCount) {
|
||||
this.numDatanodes = datanodeCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterBuilder setBlockCount(int blockCount) {
|
||||
this.blockCount = blockCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterBuilder setDiskCount(int diskCount) {
|
||||
this.diskCount = diskCount;
|
||||
return this;
|
||||
}
|
||||
|
||||
private long[] getCapacities(int diskCount, int bSize, int fSize) {
|
||||
Preconditions.checkState(diskCount > 0);
|
||||
long[] capacities = new long[diskCount];
|
||||
for (int x = 0; x < diskCount; x++) {
|
||||
capacities[x] = diskCount * bSize * fSize * 2L;
|
||||
}
|
||||
return capacities;
|
||||
}
|
||||
|
||||
private StorageType[] getStorageTypes(int diskCount) {
|
||||
Preconditions.checkState(diskCount > 0);
|
||||
StorageType[] array = new StorageType[diskCount];
|
||||
for (int x = 0; x < diskCount; x++) {
|
||||
array[x] = StorageType.DISK;
|
||||
}
|
||||
return array;
|
||||
}
|
||||
|
||||
public MiniDFSCluster build() throws IOException, TimeoutException,
|
||||
InterruptedException {
|
||||
Preconditions.checkNotNull(this.conf);
|
||||
Preconditions.checkState(blockSize > 0);
|
||||
Preconditions.checkState(numDatanodes > 0);
|
||||
fileLen = blockCount * blockSize;
|
||||
Preconditions.checkState(fileLen > 0);
|
||||
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, blockSize);
|
||||
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||
|
||||
final String fileName = "/tmp.txt";
|
||||
Path filePath = new Path(fileName);
|
||||
fileLen = blockCount * blockSize;
|
||||
|
||||
|
||||
// Write a file and restart the cluster
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||
.numDataNodes(numDatanodes)
|
||||
.storageCapacities(getCapacities(diskCount, blockSize, fileLen))
|
||||
.storageTypes(getStorageTypes(diskCount))
|
||||
.storagesPerDatanode(diskCount)
|
||||
.build();
|
||||
generateData(filePath, cluster);
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
return cluster;
|
||||
}
|
||||
|
||||
private void generateData(Path filePath, MiniDFSCluster cluster)
|
||||
throws IOException, InterruptedException, TimeoutException {
|
||||
cluster.waitActive();
|
||||
FileSystem fs = cluster.getFileSystem(0);
|
||||
TestBalancer.createFile(cluster, filePath, fileLen, (short) 1,
|
||||
numDatanodes - 1);
|
||||
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
}
|
||||
}
|
||||
|
||||
class DataMover {
|
||||
private final MiniDFSCluster cluster;
|
||||
private final int sourceDiskIndex;
|
||||
private final int dataNodeIndex;
|
||||
private final Configuration conf;
|
||||
private final int blockCount;
|
||||
private final int blockSize;
|
||||
private DataNode node;
|
||||
|
||||
/**
|
||||
* Constructs a DataMover class.
|
||||
*
|
||||
* @param cluster - MiniDFSCluster.
|
||||
* @param dataNodeIndex - Datanode to operate against.
|
||||
* @param sourceDiskIndex - source Disk Index.
|
||||
*/
|
||||
public DataMover(MiniDFSCluster cluster, int dataNodeIndex, int
|
||||
sourceDiskIndex, Configuration conf, int blockSize, int
|
||||
blockCount) {
|
||||
this.cluster = cluster;
|
||||
this.dataNodeIndex = dataNodeIndex;
|
||||
this.node = cluster.getDataNodes().get(dataNodeIndex);
|
||||
this.sourceDiskIndex = sourceDiskIndex;
|
||||
this.conf = conf;
|
||||
this.blockCount = blockCount;
|
||||
this.blockSize = blockSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves all data to a source disk to create disk imbalance so we can run a
|
||||
* planner.
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
public void moveDataToSourceDisk() throws IOException {
|
||||
moveAllDataToDestDisk(this.node, sourceDiskIndex);
|
||||
cluster.restartDataNodes();
|
||||
cluster.waitActive();
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Moves all data in the data node to one disk.
|
||||
*
|
||||
* @param dataNode - Datanode
|
||||
* @param destDiskindex - Index of the destination disk.
|
||||
*/
|
||||
private void moveAllDataToDestDisk(DataNode dataNode, int destDiskindex)
|
||||
throws IOException {
|
||||
Preconditions.checkNotNull(dataNode);
|
||||
Preconditions.checkState(destDiskindex >= 0);
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
dataNode.getFSDataset().getFsVolumeReferences()) {
|
||||
if (refs.size() <= destDiskindex) {
|
||||
throw new IllegalArgumentException("Invalid Disk index.");
|
||||
}
|
||||
FsVolumeImpl dest = (FsVolumeImpl) refs.get(destDiskindex);
|
||||
for (int x = 0; x < refs.size(); x++) {
|
||||
if (x == destDiskindex) {
|
||||
continue;
|
||||
}
|
||||
FsVolumeImpl source = (FsVolumeImpl) refs.get(x);
|
||||
DiskBalancerTestUtil.moveAllDataToDestVolume(dataNode.getFSDataset(),
|
||||
source, dest);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a NodePlan for the datanode specified.
|
||||
*
|
||||
* @return NodePlan.
|
||||
*/
|
||||
public NodePlan generatePlan() throws Exception {
|
||||
|
||||
// Start up a disk balancer and read the cluster info.
|
||||
node = cluster.getDataNodes().get(dataNodeIndex);
|
||||
ClusterConnector nameNodeConnector =
|
||||
ConnectorFactory.getCluster(cluster.getFileSystem(dataNodeIndex)
|
||||
.getUri(), conf);
|
||||
|
||||
DiskBalancerCluster diskBalancerCluster =
|
||||
new DiskBalancerCluster(nameNodeConnector);
|
||||
diskBalancerCluster.readClusterInfo();
|
||||
List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();
|
||||
|
||||
// Rewrite the capacity in the model to show that disks need
|
||||
// re-balancing.
|
||||
setVolumeCapacity(diskBalancerCluster, blockSize * 2L * blockCount,
|
||||
"DISK");
|
||||
// Pick a node to process.
|
||||
nodesToProcess.add(diskBalancerCluster.getNodeByUUID(
|
||||
node.getDatanodeUuid()));
|
||||
diskBalancerCluster.setNodesToProcess(nodesToProcess);
|
||||
|
||||
// Compute a plan.
|
||||
List<NodePlan> clusterplan = diskBalancerCluster.computePlan(0.0f);
|
||||
|
||||
// Now we must have a plan,since the node is imbalanced and we
|
||||
// asked the disk balancer to create a plan.
|
||||
assertTrue(clusterplan.size() == 1);
|
||||
|
||||
NodePlan plan = clusterplan.get(0);
|
||||
plan.setNodeUUID(node.getDatanodeUuid());
|
||||
plan.setTimeStamp(Time.now());
|
||||
|
||||
assertNotNull(plan.getVolumeSetPlans());
|
||||
assertTrue(plan.getVolumeSetPlans().size() > 0);
|
||||
plan.getVolumeSetPlans().get(0).setTolerancePercent(10);
|
||||
return plan;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits for a plan executing to finish.
|
||||
*/
|
||||
public void executePlan(NodePlan plan) throws
|
||||
IOException, TimeoutException, InterruptedException {
|
||||
|
||||
node = cluster.getDataNodes().get(dataNodeIndex);
|
||||
String planJson = plan.toJson();
|
||||
String planID = DigestUtils.shaHex(planJson);
|
||||
|
||||
// Submit the plan and wait till the execution is done.
|
||||
node.submitDiskBalancerPlan(planID, 1, PLAN_FILE, planJson,
|
||||
false);
|
||||
String jmxString = node.getDiskBalancerStatus();
|
||||
assertNotNull(jmxString);
|
||||
DiskBalancerWorkStatus status =
|
||||
DiskBalancerWorkStatus.parseJson(jmxString);
|
||||
DiskBalancerWorkStatus realStatus = node.queryDiskBalancerPlan();
|
||||
assertEquals(realStatus.getPlanID(), status.getPlanID());
|
||||
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@Override
|
||||
public Boolean get() {
|
||||
try {
|
||||
return node.queryDiskBalancerPlan().getResult() ==
|
||||
DiskBalancerWorkStatus.Result.PLAN_DONE;
|
||||
} catch (IOException ex) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}, 1000, 100000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies the Plan Execution has been done.
|
||||
*/
|
||||
public void verifyPlanExectionDone() throws IOException {
|
||||
node = cluster.getDataNodes().get(dataNodeIndex);
|
||||
assertEquals(node.queryDiskBalancerPlan().getResult(),
|
||||
DiskBalancerWorkStatus.Result.PLAN_DONE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Once diskBalancer is run, all volumes mush has some data.
|
||||
*/
|
||||
public void verifyAllVolumesHaveData() throws IOException {
|
||||
node = cluster.getDataNodes().get(dataNodeIndex);
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
node.getFSDataset().getFsVolumeReferences()) {
|
||||
for (FsVolumeSpi volume : refs) {
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(volume) > 0);
|
||||
LOG.info(refs.toString() + " : Block Count : {}",
|
||||
DiskBalancerTestUtil.getBlockCount(volume));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that tolerance values are honored correctly.
|
||||
*/
|
||||
public void verifyTolerance(NodePlan plan, int planIndex, int
|
||||
sourceDiskIndex, int tolerance) throws IOException {
|
||||
// Tolerance
|
||||
long delta = (plan.getVolumeSetPlans().get(planIndex).getBytesToMove()
|
||||
* tolerance) / 100;
|
||||
FsVolumeImpl volume = null;
|
||||
try (FsDatasetSpi.FsVolumeReferences refs =
|
||||
node.getFSDataset().getFsVolumeReferences()) {
|
||||
volume = (FsVolumeImpl) refs.get(sourceDiskIndex);
|
||||
assertTrue(DiskBalancerTestUtil.getBlockCount(volume) > 0);
|
||||
|
||||
assertTrue(
|
||||
(DiskBalancerTestUtil.getBlockCount(volume) *
|
||||
(blockSize + delta)) >=
|
||||
plan.getVolumeSetPlans().get(0).getBytesToMove());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -358,14 +358,13 @@ public class TestDiskBalancerWithMockMover {
|
|||
|
||||
private AtomicBoolean shouldRun;
|
||||
private FsDatasetSpi dataset;
|
||||
private Integer runCount;
|
||||
private int runCount;
|
||||
private volatile boolean sleepInCopyBlocks;
|
||||
private long delay;
|
||||
|
||||
public TestMover(FsDatasetSpi dataset) {
|
||||
this.dataset = dataset;
|
||||
this.shouldRun = new AtomicBoolean(false);
|
||||
this.runCount = new Integer(0);
|
||||
}
|
||||
|
||||
public void setSleep() {
|
||||
|
@ -401,7 +400,7 @@ public class TestDiskBalancerWithMockMover {
|
|||
if (delay > 0) {
|
||||
Thread.sleep(delay);
|
||||
}
|
||||
synchronized (runCount) {
|
||||
synchronized (this) {
|
||||
if (shouldRun()) {
|
||||
runCount++;
|
||||
}
|
||||
|
@ -461,9 +460,9 @@ public class TestDiskBalancerWithMockMover {
|
|||
}
|
||||
|
||||
public int getRunCount() {
|
||||
synchronized (runCount) {
|
||||
LOG.info("Run count : " + runCount.intValue());
|
||||
return runCount.intValue();
|
||||
synchronized (this) {
|
||||
LOG.info("Run count : " + runCount);
|
||||
return runCount;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -510,7 +509,7 @@ public class TestDiskBalancerWithMockMover {
|
|||
}
|
||||
}
|
||||
|
||||
private class DiskBalancerBuilder {
|
||||
private static class DiskBalancerBuilder {
|
||||
private TestMover blockMover;
|
||||
private Configuration conf;
|
||||
private String nodeID;
|
||||
|
@ -546,7 +545,7 @@ public class TestDiskBalancerWithMockMover {
|
|||
}
|
||||
}
|
||||
|
||||
private class DiskBalancerClusterBuilder {
|
||||
private static class DiskBalancerClusterBuilder {
|
||||
private String jsonFilePath;
|
||||
private Configuration conf;
|
||||
|
||||
|
@ -573,7 +572,7 @@ public class TestDiskBalancerWithMockMover {
|
|||
}
|
||||
}
|
||||
|
||||
private class PlanBuilder {
|
||||
private static class PlanBuilder {
|
||||
private String sourcePath;
|
||||
private String destPath;
|
||||
private String sourceUUID;
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.hamcrest.CoreMatchers.containsString;
|
|||
import static org.hamcrest.CoreMatchers.is;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.PrintStream;
|
||||
|
@ -41,18 +42,19 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
|
|||
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
|
||||
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
|
||||
import org.apache.hadoop.hdfs.tools.DiskBalancerCLI;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.CANCEL;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.HELP;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.NODE;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.PLAN;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.QUERY;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancer.REPORT;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.CANCEL;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.HELP;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.NODE;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.PLAN;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.QUERY;
|
||||
import static org.apache.hadoop.hdfs.tools.DiskBalancerCLI.REPORT;
|
||||
|
||||
import org.junit.Rule;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
@ -387,8 +389,7 @@ public class TestDiskBalancerCommand {
|
|||
private List<String> runCommandInternal(final String cmdLine) throws
|
||||
Exception {
|
||||
String[] cmds = StringUtils.split(cmdLine, ' ');
|
||||
org.apache.hadoop.hdfs.tools.DiskBalancer db =
|
||||
new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
|
||||
DiskBalancerCLI db = new DiskBalancerCLI(conf);
|
||||
|
||||
ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
|
||||
PrintStream out = new PrintStream(bufOut);
|
||||
|
@ -457,4 +458,52 @@ public class TestDiskBalancerCommand {
|
|||
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
|
||||
assertEquals(nodeNum, nodeList.size());
|
||||
}
|
||||
|
||||
@Test(timeout = 60000)
|
||||
public void testReportCommandWithMultipleNodes() throws Exception {
|
||||
String dataNodeUuid1 = cluster.getDataNodes().get(0).getDatanodeUuid();
|
||||
String dataNodeUuid2 = cluster.getDataNodes().get(1).getDatanodeUuid();
|
||||
final String planArg = String.format("-%s -%s %s,%s",
|
||||
REPORT, NODE, dataNodeUuid1, dataNodeUuid2);
|
||||
final String cmdLine = String.format("hdfs diskbalancer %s", planArg);
|
||||
List<String> outputs = runCommand(cmdLine, cluster);
|
||||
|
||||
assertThat(
|
||||
outputs.get(0),
|
||||
containsString("Processing report command"));
|
||||
assertThat(
|
||||
outputs.get(1),
|
||||
is(allOf(containsString("Reporting volume information for DataNode"),
|
||||
containsString(dataNodeUuid1), containsString(dataNodeUuid2))));
|
||||
// Since the order of input nodes will be disrupted when parse
|
||||
// the node string, we should compare UUID with both output lines.
|
||||
assertTrue(outputs.get(2).contains(dataNodeUuid1)
|
||||
|| outputs.get(6).contains(dataNodeUuid1));
|
||||
assertTrue(outputs.get(2).contains(dataNodeUuid2)
|
||||
|| outputs.get(6).contains(dataNodeUuid2));
|
||||
}
|
||||
|
||||
@Test(timeout = 60000)
|
||||
public void testReportCommandWithInvalidNode() throws Exception {
|
||||
String dataNodeUuid1 = cluster.getDataNodes().get(0).getDatanodeUuid();
|
||||
String invalidNode = "invalidNode";
|
||||
final String planArg = String.format("-%s -%s %s,%s",
|
||||
REPORT, NODE, dataNodeUuid1, invalidNode);
|
||||
final String cmdLine = String.format("hdfs diskbalancer %s", planArg);
|
||||
List<String> outputs = runCommand(cmdLine, cluster);
|
||||
|
||||
assertThat(
|
||||
outputs.get(0),
|
||||
containsString("Processing report command"));
|
||||
assertThat(
|
||||
outputs.get(1),
|
||||
is(allOf(containsString("Reporting volume information for DataNode"),
|
||||
containsString(dataNodeUuid1), containsString(invalidNode))));
|
||||
|
||||
String invalidNodeInfo =
|
||||
String.format("The node(s) '%s' not found. "
|
||||
+ "Please make sure that '%s' exists in the cluster."
|
||||
, invalidNode, invalidNode);
|
||||
assertTrue(outputs.get(2).contains(invalidNodeInfo));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,109 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.Timeout;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.internal.util.reflection.Whitebox;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||
import static org.apache.hadoop.hdfs.StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||
|
||||
public class TestAddStripedBlockInFBR {
|
||||
private final short GROUP_SIZE = (short) (NUM_DATA_BLOCKS + NUM_PARITY_BLOCKS);
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private DistributedFileSystem dfs;
|
||||
|
||||
@Rule
|
||||
public Timeout globalTimeout = new Timeout(300000);
|
||||
|
||||
@Before
|
||||
public void setup() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE).build();
|
||||
cluster.waitActive();
|
||||
dfs = cluster.getFileSystem();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
cluster = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testAddBlockInFullBlockReport() throws Exception {
|
||||
BlockManager spy = Mockito.spy(cluster.getNamesystem().getBlockManager());
|
||||
// let NN ignore one DataNode's IBR
|
||||
final DataNode dn = cluster.getDataNodes().get(0);
|
||||
final DatanodeID datanodeID = dn.getDatanodeId();
|
||||
Mockito.doNothing().when(spy)
|
||||
.processIncrementalBlockReport(Mockito.eq(datanodeID), Mockito.any());
|
||||
Whitebox.setInternalState(cluster.getNamesystem(), "blockManager", spy);
|
||||
|
||||
final Path ecDir = new Path("/ec");
|
||||
final Path repDir = new Path("/rep");
|
||||
dfs.mkdirs(ecDir);
|
||||
dfs.mkdirs(repDir);
|
||||
dfs.getClient().setErasureCodingPolicy(ecDir.toString(), null);
|
||||
|
||||
// create several non-EC files and one EC file
|
||||
final Path[] repFiles = new Path[GROUP_SIZE];
|
||||
for (int i = 0; i < GROUP_SIZE; i++) {
|
||||
repFiles[i] = new Path(repDir, "f" + i);
|
||||
DFSTestUtil.createFile(dfs, repFiles[i], 1L, (short) 3, 0L);
|
||||
}
|
||||
final Path ecFile = new Path(ecDir, "f");
|
||||
DFSTestUtil.createFile(dfs, ecFile,
|
||||
BLOCK_STRIPED_CELL_SIZE * NUM_DATA_BLOCKS, (short) 1, 0L);
|
||||
|
||||
// trigger dn's FBR. The FBR will add block-dn mapping.
|
||||
DataNodeTestUtils.triggerBlockReport(dn);
|
||||
|
||||
// make sure NN has correct block-dn mapping
|
||||
BlockInfoStriped blockInfo = (BlockInfoStriped) cluster.getNamesystem()
|
||||
.getFSDirectory().getINode(ecFile.toString()).asFile().getLastBlock();
|
||||
NumberReplicas nr = spy.countNodes(blockInfo);
|
||||
Assert.assertEquals(GROUP_SIZE, nr.liveReplicas());
|
||||
Assert.assertEquals(0, nr.excessReplicas());
|
||||
}
|
||||
}
|
|
@ -237,7 +237,7 @@ public class TestOfflineImageViewer {
|
|||
File truncatedFile = new File(tempDir, "truncatedFsImage");
|
||||
PrintStream output = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
|
||||
copyPartOfFile(originalFsimage, truncatedFile);
|
||||
new FileDistributionCalculator(new Configuration(), 0, 0, output)
|
||||
new FileDistributionCalculator(new Configuration(), 0, 0, false, output)
|
||||
.visit(new RandomAccessFile(truncatedFile, "r"));
|
||||
}
|
||||
|
||||
|
@ -259,7 +259,7 @@ public class TestOfflineImageViewer {
|
|||
public void testFileDistributionCalculator() throws IOException {
|
||||
ByteArrayOutputStream output = new ByteArrayOutputStream();
|
||||
PrintStream o = new PrintStream(output);
|
||||
new FileDistributionCalculator(new Configuration(), 0, 0, o)
|
||||
new FileDistributionCalculator(new Configuration(), 0, 0, false, o)
|
||||
.visit(new RandomAccessFile(originalFsimage, "r"));
|
||||
o.close();
|
||||
|
||||
|
@ -620,4 +620,24 @@ public class TestOfflineImageViewer {
|
|||
IOUtils.closeStream(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testOfflineImageViewerWithFormatOption() throws Exception {
|
||||
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||
final PrintStream out = new PrintStream(bytes);
|
||||
final PrintStream oldOut = System.out;
|
||||
try {
|
||||
System.setOut(out);
|
||||
int status =
|
||||
OfflineImageViewerPB.run(new String[] {"-i",
|
||||
originalFsimage.getAbsolutePath(), "-o", "-", "-p",
|
||||
"FileDistribution", "-maxSize", "512", "-step", "8",
|
||||
"-format"});
|
||||
assertEquals(0, status);
|
||||
Assert.assertTrue(bytes.toString().contains("(0 B, 8 B]"));
|
||||
} finally {
|
||||
System.setOut(oldOut);
|
||||
IOUtils.closeStream(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.junit.Rule;
|
|||
import org.junit.Test;
|
||||
import org.junit.rules.Timeout;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Random;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
@ -242,7 +243,8 @@ public class TestStripedBlockUtil {
|
|||
*/
|
||||
@Test
|
||||
public void testDivideByteRangeIntoStripes() {
|
||||
byte[] assembled = new byte[BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE];
|
||||
ByteBuffer assembled =
|
||||
ByteBuffer.allocate(BLK_GROUP_STRIPE_NUM * FULL_STRIPE_SIZE);
|
||||
for (int bgSize : blockGroupSizes) {
|
||||
LocatedStripedBlock blockGroup = createDummyLocatedBlock(bgSize);
|
||||
byte[][] internalBlkBufs = createInternalBlkBuffers(bgSize);
|
||||
|
@ -252,7 +254,7 @@ public class TestStripedBlockUtil {
|
|||
continue;
|
||||
}
|
||||
AlignedStripe[] stripes = divideByteRangeIntoStripes(EC_POLICY,
|
||||
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled, 0);
|
||||
CELLSIZE, blockGroup, brStart, brStart + brSize - 1, assembled);
|
||||
|
||||
for (AlignedStripe stripe : stripes) {
|
||||
for (int i = 0; i < DATA_BLK_NUM; i++) {
|
||||
|
@ -261,21 +263,21 @@ public class TestStripedBlockUtil {
|
|||
continue;
|
||||
}
|
||||
int done = 0;
|
||||
for (int j = 0; j < chunk.byteArray.getLengths().length; j++) {
|
||||
System.arraycopy(internalBlkBufs[i],
|
||||
(int) stripe.getOffsetInBlock() + done, assembled,
|
||||
chunk.byteArray.getOffsets()[j],
|
||||
chunk.byteArray.getLengths()[j]);
|
||||
done += chunk.byteArray.getLengths()[j];
|
||||
int len;
|
||||
for (ByteBuffer slice : chunk.getChunkBuffer().getSlices()) {
|
||||
len = slice.remaining();
|
||||
slice.put(internalBlkBufs[i],
|
||||
(int) stripe.getOffsetInBlock() + done, len);
|
||||
done += len;
|
||||
}
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < brSize; i++) {
|
||||
if (hashIntToByte(brStart + i) != assembled[i]) {
|
||||
if (hashIntToByte(brStart + i) != assembled.get(i)) {
|
||||
System.out.println("Oops");
|
||||
}
|
||||
assertEquals("Byte at " + (brStart + i) + " should be the same",
|
||||
hashIntToByte(brStart + i), assembled[i]);
|
||||
hashIntToByte(brStart + i), assembled.get(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -679,63 +679,63 @@
|
|||
</cleanup-commands>
|
||||
<comparators>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output># file: /dir1/dir2</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output># owner: USERNAME</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output># group: supergroup</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>user::rwx</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>user:charlie:r-x</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>group::r-x</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>group:admin:rwx</expected-output>
|
||||
<type>RegexpComparator</type>
|
||||
<expected-output>^group:admin:rwx\b.*</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<expected-output>mask::rwx</expected-output>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>mask::r-x</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>default:user::rwx</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>default:user:charlie:r-x</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>default:group::r-x</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>default:group:admin:rwx</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>default:mask::rwx</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>default:other::r-x</expected-output>
|
||||
</comparator>
|
||||
<comparator>
|
||||
<type>SubstringComparator</type>
|
||||
<type>ExactLineComparator</type>
|
||||
<expected-output>other::r-x</expected-output>
|
||||
</comparator>
|
||||
</comparators>
|
||||
|
|
|
@ -69,8 +69,6 @@ function mapredcmd_case
|
|||
historyserver)
|
||||
HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer
|
||||
hadoop_debug "Appending HADOOP_JOB_HISTORYSERVER_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_JOB_HISTORYSERVER_OPTS}"
|
||||
if [ -n "${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}" ]; then
|
||||
# shellcheck disable=SC2034
|
||||
HADOOP_HEAPSIZE_MAX="${HADOOP_JOB_HISTORYSERVER_HEAPSIZE}"
|
||||
|
@ -79,31 +77,21 @@ function mapredcmd_case
|
|||
;;
|
||||
hsadmin)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.v2.hs.client.HSAdmin
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
job)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.mapred.JobClient
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
pipes)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.mapred.pipes.Submitter
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
queue)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.mapred.JobQueueClient
|
||||
;;
|
||||
sampler)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.mapred.lib.InputSampler
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
version)
|
||||
HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
|
||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||
;;
|
||||
*)
|
||||
HADOOP_CLASSNAME="${subcmd}"
|
||||
|
@ -141,6 +129,8 @@ fi
|
|||
HADOOP_SUBCMD=$1
|
||||
shift
|
||||
|
||||
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
HADOOP_SUBCMD_ARGS=("$@")
|
||||
|
||||
if declare -f mapred_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
|
||||
|
@ -150,15 +140,20 @@ else
|
|||
mapredcmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
|
||||
fi
|
||||
|
||||
hadoop_verify_user "${HADOOP_SUBCMD}"
|
||||
hadoop_add_client_opts
|
||||
|
||||
if [[ ${HADOOP_SLAVE_MODE} = true ]]; then
|
||||
hadoop_common_slave_mode_execute "${HADOOP_MAPRED_HOME}/bin/mapred" "${HADOOP_USER_PARAMS[@]}"
|
||||
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
|
||||
hadoop_common_worker_mode_execute "${HADOOP_MAPRED_HOME}/bin/mapred" "${HADOOP_USER_PARAMS[@]}"
|
||||
exit $?
|
||||
fi
|
||||
|
||||
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
|
||||
HADOOP_SECURE_USER="${HADOOP_SUBCMD_SECUREUSER}"
|
||||
|
||||
hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
|
||||
|
||||
hadoop_verify_secure_prereq
|
||||
hadoop_setup_secure_service
|
||||
priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
|
||||
|
|
|
@ -26,7 +26,7 @@ function hadoop_subproject_init
|
|||
export HADOOP_MAPRED_ENV_PROCESSED=true
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# at some point in time, someone thought it would be a good idea to
|
||||
# create separate vars for every subproject. *sigh*
|
||||
# let's perform some overrides and setup some defaults for bw compat
|
||||
|
@ -38,15 +38,17 @@ function hadoop_subproject_init
|
|||
hadoop_deprecate_envvar HADOOP_MAPRED_LOG_DIR HADOOP_LOG_DIR
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_MAPRED_LOGFILE HADOOP_LOGFILE
|
||||
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_MAPRED_NICENESS HADOOP_NICENESS
|
||||
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_MAPRED_STOP_TIMEOUT HADOOP_STOP_TIMEOUT
|
||||
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_MAPRED_PID_DIR HADOOP_PID_DIR
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_JOB_HISTORY_OPTS MAPRED_HISTORYSERVER_OPTS
|
||||
|
||||
HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME}"
|
||||
|
||||
hadoop_deprecate_envvar HADOOP_MAPRED_IDENT_STRING HADOOP_IDENT_STRING
|
||||
|
|
|
@ -31,14 +31,14 @@
|
|||
# Specify the max heapsize for the JobHistoryServer. If no units are
|
||||
# given, it will be assumed to be in MB.
|
||||
# This value will be overridden by an Xmx setting specified in HADOOP_OPTS,
|
||||
# and/or HADOOP_JOB_HISTORYSERVER_OPTS.
|
||||
# and/or MAPRED_HISTORYSERVER_OPTS.
|
||||
# Default is the same as HADOOP_HEAPSIZE_MAX.
|
||||
#export HADOOP_JOB_HISTORYSERVER_HEAPSIZE=
|
||||
|
||||
# Specify the JVM options to be used when starting the HistoryServer.
|
||||
# These options will be appended to the options specified as HADOOP_OPTS
|
||||
# and therefore may override any similar flags set in HADOOP_OPTS
|
||||
#export HADOOP_JOB_HISTORYSERVER_OPTS=
|
||||
#export MAPRED_HISTORYSERVER_OPTS=
|
||||
|
||||
# Specify the log4j settings for the JobHistoryServer
|
||||
# Java property: hadoop.root.logger
|
||||
|
|
|
@ -1588,6 +1588,7 @@ public class MapTask extends Task {
|
|||
final long size = distanceTo(bufstart, bufend, bufvoid) +
|
||||
partitions * APPROX_HEADER_LENGTH;
|
||||
FSDataOutputStream out = null;
|
||||
FSDataOutputStream partitionOut = null;
|
||||
try {
|
||||
// create spill file
|
||||
final SpillRecord spillRec = new SpillRecord(partitions);
|
||||
|
@ -1608,7 +1609,7 @@ public class MapTask extends Task {
|
|||
IFile.Writer<K, V> writer = null;
|
||||
try {
|
||||
long segmentStart = out.getPos();
|
||||
FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
|
||||
partitionOut = CryptoUtils.wrapIfNecessary(job, out, false);
|
||||
writer = new Writer<K, V>(job, partitionOut, keyClass, valClass, codec,
|
||||
spilledRecordsCounter);
|
||||
if (combinerRunner == null) {
|
||||
|
@ -1643,6 +1644,10 @@ public class MapTask extends Task {
|
|||
|
||||
// close the writer
|
||||
writer.close();
|
||||
if (partitionOut != out) {
|
||||
partitionOut.close();
|
||||
partitionOut = null;
|
||||
}
|
||||
|
||||
// record offsets
|
||||
rec.startOffset = segmentStart;
|
||||
|
@ -1671,6 +1676,9 @@ public class MapTask extends Task {
|
|||
++numSpills;
|
||||
} finally {
|
||||
if (out != null) out.close();
|
||||
if (partitionOut != null) {
|
||||
partitionOut.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1683,6 +1691,7 @@ public class MapTask extends Task {
|
|||
int partition) throws IOException {
|
||||
long size = kvbuffer.length + partitions * APPROX_HEADER_LENGTH;
|
||||
FSDataOutputStream out = null;
|
||||
FSDataOutputStream partitionOut = null;
|
||||
try {
|
||||
// create spill file
|
||||
final SpillRecord spillRec = new SpillRecord(partitions);
|
||||
|
@ -1697,7 +1706,7 @@ public class MapTask extends Task {
|
|||
try {
|
||||
long segmentStart = out.getPos();
|
||||
// Create a new codec, don't care!
|
||||
FSDataOutputStream partitionOut = CryptoUtils.wrapIfNecessary(job, out);
|
||||
partitionOut = CryptoUtils.wrapIfNecessary(job, out, false);
|
||||
writer = new IFile.Writer<K,V>(job, partitionOut, keyClass, valClass, codec,
|
||||
spilledRecordsCounter);
|
||||
|
||||
|
@ -1709,6 +1718,10 @@ public class MapTask extends Task {
|
|||
mapOutputByteCounter.increment(out.getPos() - recordStart);
|
||||
}
|
||||
writer.close();
|
||||
if (partitionOut != out) {
|
||||
partitionOut.close();
|
||||
partitionOut = null;
|
||||
}
|
||||
|
||||
// record offsets
|
||||
rec.startOffset = segmentStart;
|
||||
|
@ -1736,6 +1749,9 @@ public class MapTask extends Task {
|
|||
++numSpills;
|
||||
} finally {
|
||||
if (out != null) out.close();
|
||||
if (partitionOut != null) {
|
||||
partitionOut.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1847,6 +1863,7 @@ public class MapTask extends Task {
|
|||
|
||||
//The output stream for the final single output file
|
||||
FSDataOutputStream finalOut = rfs.create(finalOutputFile, true, 4096);
|
||||
FSDataOutputStream finalPartitionOut = null;
|
||||
|
||||
if (numSpills == 0) {
|
||||
//create dummy files
|
||||
|
@ -1855,10 +1872,15 @@ public class MapTask extends Task {
|
|||
try {
|
||||
for (int i = 0; i < partitions; i++) {
|
||||
long segmentStart = finalOut.getPos();
|
||||
FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
|
||||
finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut,
|
||||
false);
|
||||
Writer<K, V> writer =
|
||||
new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec, null);
|
||||
writer.close();
|
||||
if (finalPartitionOut != finalOut) {
|
||||
finalPartitionOut.close();
|
||||
finalPartitionOut = null;
|
||||
}
|
||||
rec.startOffset = segmentStart;
|
||||
rec.rawLength = writer.getRawLength() + CryptoUtils.cryptoPadding(job);
|
||||
rec.partLength = writer.getCompressedLength() + CryptoUtils.cryptoPadding(job);
|
||||
|
@ -1867,6 +1889,9 @@ public class MapTask extends Task {
|
|||
sr.writeToFile(finalIndexFile, job);
|
||||
} finally {
|
||||
finalOut.close();
|
||||
if (finalPartitionOut != null) {
|
||||
finalPartitionOut.close();
|
||||
}
|
||||
}
|
||||
sortPhase.complete();
|
||||
return;
|
||||
|
@ -1910,7 +1935,7 @@ public class MapTask extends Task {
|
|||
|
||||
//write merged output to disk
|
||||
long segmentStart = finalOut.getPos();
|
||||
FSDataOutputStream finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut);
|
||||
finalPartitionOut = CryptoUtils.wrapIfNecessary(job, finalOut, false);
|
||||
Writer<K, V> writer =
|
||||
new Writer<K, V>(job, finalPartitionOut, keyClass, valClass, codec,
|
||||
spilledRecordsCounter);
|
||||
|
@ -1923,6 +1948,10 @@ public class MapTask extends Task {
|
|||
|
||||
//close
|
||||
writer.close();
|
||||
if (finalPartitionOut != finalOut) {
|
||||
finalPartitionOut.close();
|
||||
finalPartitionOut = null;
|
||||
}
|
||||
|
||||
sortPhase.startNextPhase();
|
||||
|
||||
|
@ -1934,6 +1963,9 @@ public class MapTask extends Task {
|
|||
}
|
||||
spillRec.writeToFile(finalIndexFile, job);
|
||||
finalOut.close();
|
||||
if (finalPartitionOut != null) {
|
||||
finalPartitionOut.close();
|
||||
}
|
||||
for(int i = 0; i < numSpills; i++) {
|
||||
rfs.delete(filename[i],true);
|
||||
}
|
||||
|
|
|
@ -57,9 +57,9 @@ public class CryptoUtils {
|
|||
/**
|
||||
* This method creates and initializes an IV (Initialization Vector)
|
||||
*
|
||||
* @param conf
|
||||
* @return byte[]
|
||||
* @throws IOException
|
||||
* @param conf configuration
|
||||
* @return byte[] initialization vector
|
||||
* @throws IOException exception in case of error
|
||||
*/
|
||||
public static byte[] createIV(Configuration conf) throws IOException {
|
||||
CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf);
|
||||
|
@ -94,13 +94,33 @@ public class CryptoUtils {
|
|||
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
|
||||
* variable.
|
||||
*
|
||||
* @param conf
|
||||
* @param out
|
||||
* @return FSDataOutputStream
|
||||
* @throws IOException
|
||||
* @param conf configuration
|
||||
* @param out given output stream
|
||||
* @return FSDataOutputStream encrypted output stream if encryption is
|
||||
* enabled; otherwise the given output stream itself
|
||||
* @throws IOException exception in case of error
|
||||
*/
|
||||
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
|
||||
FSDataOutputStream out) throws IOException {
|
||||
return wrapIfNecessary(conf, out, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a given FSDataOutputStream with a CryptoOutputStream. The size of the
|
||||
* data buffer required for the stream is specified by the
|
||||
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
|
||||
* variable.
|
||||
*
|
||||
* @param conf configuration
|
||||
* @param out given output stream
|
||||
* @param closeOutputStream flag to indicate whether closing the wrapped
|
||||
* stream will close the given output stream
|
||||
* @return FSDataOutputStream encrypted output stream if encryption is
|
||||
* enabled; otherwise the given output stream itself
|
||||
* @throws IOException exception in case of error
|
||||
*/
|
||||
public static FSDataOutputStream wrapIfNecessary(Configuration conf,
|
||||
FSDataOutputStream out, boolean closeOutputStream) throws IOException {
|
||||
if (isEncryptedSpillEnabled(conf)) {
|
||||
out.write(ByteBuffer.allocate(8).putLong(out.getPos()).array());
|
||||
byte[] iv = createIV(conf);
|
||||
|
@ -110,7 +130,7 @@ public class CryptoUtils {
|
|||
+ Base64.encodeBase64URLSafeString(iv) + "]");
|
||||
}
|
||||
return new CryptoFSDataOutputStream(out, CryptoCodec.getInstance(conf),
|
||||
getBufferSize(conf), getEncryptionKey(), iv);
|
||||
getBufferSize(conf), getEncryptionKey(), iv, closeOutputStream);
|
||||
} else {
|
||||
return out;
|
||||
}
|
||||
|
@ -128,11 +148,12 @@ public class CryptoUtils {
|
|||
* LimitInputStream will ensure that the CryptoStream does not read past the
|
||||
* provided length from the given Input Stream.
|
||||
*
|
||||
* @param conf
|
||||
* @param in
|
||||
* @param length
|
||||
* @return InputStream
|
||||
* @throws IOException
|
||||
* @param conf configuration
|
||||
* @param in given input stream
|
||||
* @param length maximum number of bytes to read from the input stream
|
||||
* @return InputStream encrypted input stream if encryption is
|
||||
* enabled; otherwise the given input stream itself
|
||||
* @throws IOException exception in case of error
|
||||
*/
|
||||
public static InputStream wrapIfNecessary(Configuration conf, InputStream in,
|
||||
long length) throws IOException {
|
||||
|
@ -166,10 +187,11 @@ public class CryptoUtils {
|
|||
* "mapreduce.job.encrypted-intermediate-data.buffer.kb" Job configuration
|
||||
* variable.
|
||||
*
|
||||
* @param conf
|
||||
* @param in
|
||||
* @return FSDataInputStream
|
||||
* @throws IOException
|
||||
* @param conf configuration
|
||||
* @param in given input stream
|
||||
* @return FSDataInputStream encrypted input stream if encryption is
|
||||
* enabled; otherwise the given input stream itself
|
||||
* @throws IOException exception in case of error
|
||||
*/
|
||||
public static FSDataInputStream wrapIfNecessary(Configuration conf,
|
||||
FSDataInputStream in) throws IOException {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue