Merge branch 'trunk' into HDFS-7240

This commit is contained in:
Anu Engineer 2016-03-28 10:34:52 -07:00
commit 30473ecfb4
168 changed files with 5847 additions and 1820 deletions

View File

@ -252,7 +252,7 @@ Building distributions:
Create binary distribution without native code and without documentation:
$ mvn package -Pdist -DskipTests -Dtar
$ mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true
Create binary distribution with native code and with documentation:

View File

@ -0,0 +1,182 @@
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
VERSION=${1:-3.0.0-SNAPSHOT}
TARGETDIR=${2:-/tmp/target}
TOOLSDIR=${3:-/tmp/tools}
function getfilename
{
declare module=$1
declare modtype=$2
if [[ ${modtype} = builtin ]]; then
echo "${TARGETDIR}/hadoop-${VERSION}/libexec/tools/${module}.sh"
else
echo "${TARGETDIR}/hadoop-${VERSION}/libexec/shellprofile.d/${module}.sh"
fi
}
function header
{
declare fn=$1
cat >>"${fn}" <<-'TOKEN'
#!/usr/bin/env bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# IMPORTANT: This file is automatically generated by hadoop-dist at
# -Pdist time.
#
#
TOKEN
}
function optional_prologue
{
declare fn=$1
declare module=$2
if [[ -z "${OPTMODS}" ]]; then
OPTMODS=${module}
else
OPTMODS="${OPTMODS},${module}"
fi
{
echo "if hadoop_verify_entry HADOOP_TOOLS_OPTIONS \"${module}\"; then"
echo " hadoop_add_profile \"${module}\""
echo "fi"
echo ""
echo "function _${module}_hadoop_classpath"
echo "{"
} >> "${fn}"
}
function builtin_prologue
{
declare fn=$1
declare module=$2
{
echo ""
echo "function hadoop_classpath_tools_${module}"
echo "{"
} >> "${fn}"
}
function dependencywork
{
declare fn=$1
declare module=$2
declare depfn=$3
declare depline
declare jarname
while read -r depline; do
jarname=$(echo "${depline}" | awk -F: '{print $2"-"$4".jar"}')
if [[ -f "${TARGETDIR}/hadoop-${VERSION}/share/hadoop/tools/lib/${jarname}" ]]; then
{
echo " if [[ -f \"\${HADOOP_TOOLS_HOME}/\${HADOOP_TOOLS_LIB_JARS_DIR}/${jarname}\" ]]; then"
echo " hadoop_add_classpath \"\${HADOOP_TOOLS_HOME}/\${HADOOP_TOOLS_LIB_JARS_DIR}/${jarname}\""
echo " fi"
} >> "${fn}"
elif [[ -f "${TARGETDIR}/hadoop-${VERSION}/share/hadoop/common/${jarname}"
|| -f "${TARGETDIR}/hadoop-${VERSION}/share/hadoop/common/lib/${jarname}" ]]; then
true
else
echo "ERROR: ${module} has missing dependencies: ${jarname}"
fi
done < <(grep compile "${depfn}")
{
echo " hadoop_add_classpath \"\${HADOOP_TOOLS_HOME}/\${HADOOP_TOOLS_LIB_JARS_DIR}/${module}-${VERSION}.jar\""
echo "}"
echo ""
} >> "${fn}"
}
function document_optionals
{
echo "Rewriting ${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh"
sed -e "s^@@@HADOOP_OPTIONAL_TOOLS@@@^${OPTMODS}^" \
"${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh" \
> "${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh.new"
mv "${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh.new" \
"${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh"
}
function process
{
declare fn
declare basefn
declare modtype
declare module
declare newfile
declare newdir
while read -r fn; do
basefn=${fn##*/}
module=$(echo "${basefn}" | cut -f1 -d.)
modtype=$(echo "${basefn}" | cut -f2 -d.)
modtype=${modtype##tools-}
newfile=$(getfilename "${module}" "${modtype}")
newdir=$(dirname "${newfile}")
mkdir -p "${newdir}"
if [[ -f "${newfile}" ]]; then
rm "${newfile}"
fi
touch "${newfile}"
header "${newfile}" "${module}"
"${modtype}_prologue" "${newfile}" "${module}"
dependencywork "${newfile}" "${module}" "${fn}"
chmod a+rx "${newfile}"
done < <(find "${TOOLSDIR}" -name '*.tools-builtin.txt' -o -name '*.tools-optional.txt')
document_optionals
}
process

View File

@ -45,6 +45,14 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
# See http://wiki.apache.org/commons/VfsProblems
RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
######
# Install ISA-L library
######
RUN curl -L http://http.us.debian.org/debian/pool/main/libi/libisal/libisal2_2.15.0-2_amd64.deb \
-o /opt/libisal2_2.15.0-2_amd64.deb && \
dpkg -i /opt/libisal2_2.15.0-2_amd64.deb
#######
# Oracle Java
#######

View File

@ -26,20 +26,10 @@ import java.util.List;
import java.util.Properties;
import java.text.ParseException;
import java.io.ByteArrayInputStream;
import java.io.UnsupportedEncodingException;
import java.security.PublicKey;
import java.security.cert.CertificateFactory;
import java.security.cert.X509Certificate;
import java.security.cert.CertificateException;
import java.security.interfaces.RSAPublicKey;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.authentication.server.AltKerberosAuthenticationHandler;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.CertificateUtil;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -83,7 +73,8 @@ public class JWTRedirectAuthenticationHandler extends
private static Logger LOG = LoggerFactory
.getLogger(JWTRedirectAuthenticationHandler.class);
public static final String AUTHENTICATION_PROVIDER_URL = "authentication.provider.url";
public static final String AUTHENTICATION_PROVIDER_URL =
"authentication.provider.url";
public static final String PUBLIC_KEY_PEM = "public.key.pem";
public static final String EXPECTED_JWT_AUDIENCES = "expected.jwt.audiences";
public static final String JWT_COOKIE_NAME = "jwt.cookie.name";
@ -205,7 +196,6 @@ public class JWTRedirectAuthenticationHandler extends
protected String getJWTFromCookie(HttpServletRequest req) {
String serializedJWT = null;
Cookie[] cookies = req.getCookies();
String userName = null;
if (cookies != null) {
for (Cookie cookie : cookies) {
if (cookieName.equals(cookie.getName())) {
@ -350,7 +340,7 @@ public class JWTRedirectAuthenticationHandler extends
boolean valid = false;
try {
Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
if (expires != null && new Date().before(expires)) {
if (expires == null || new Date().before(expires)) {
LOG.debug("JWT token expiration date has been "
+ "successfully validated");
valid = true;

View File

@ -13,19 +13,15 @@
*/
package org.apache.hadoop.security.authentication.server;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.NoSuchAlgorithmException;
import java.security.interfaces.RSAPrivateKey;
import java.security.interfaces.RSAPublicKey;
import java.util.Arrays;
import java.util.List;
import java.util.ArrayList;
import java.util.Properties;
@ -50,8 +46,6 @@ import com.nimbusds.jose.*;
import com.nimbusds.jwt.JWTClaimsSet;
import com.nimbusds.jwt.SignedJWT;
import com.nimbusds.jose.crypto.RSASSASigner;
import com.nimbusds.jose.crypto.RSASSAVerifier;
import com.nimbusds.jose.util.Base64URL;
public class TestJWTRedirectAuthentictionHandler extends
KerberosSecurityTestcase {
@ -261,6 +255,36 @@ public class TestJWTRedirectAuthentictionHandler extends
}
}
@Test
public void testNoExpirationJWT() throws Exception {
try {
handler.setPublicKey(publicKey);
Properties props = getProperties();
handler.init(props);
SignedJWT jwt = getJWT("bob", null, privateKey);
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
Mockito.when(request.getRequestURL()).thenReturn(
new StringBuffer(SERVICE_URL));
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
SERVICE_URL);
AuthenticationToken token = handler.alternateAuthenticate(request,
response);
Assert.assertNotNull("Token should not be null.", token);
Assert.assertEquals("bob", token.getUserName());
} catch (ServletException se) {
fail("alternateAuthentication should NOT have thrown a ServletException");
} catch (AuthenticationException ae) {
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
}
}
@Test
public void testInvalidAudienceJWT() throws Exception {
try {
@ -442,7 +466,6 @@ public class TestJWTRedirectAuthentictionHandler extends
JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
SignedJWT signedJWT = new SignedJWT(header, claimsSet);
Base64URL sigInput = Base64URL.encode(signedJWT.getSigningInput());
JWSSigner signer = new RSASSASigner(privateKey);
signedJWT.sign(signer);

View File

@ -114,7 +114,7 @@ case ${COMMAND} in
;;
archive)
CLASS=org.apache.hadoop.tools.HadoopArchives
hadoop_add_to_classpath_toolspath
hadoop_add_to_classpath_tools hadoop-archives
;;
checknative)
CLASS=org.apache.hadoop.util.NativeLibraryChecker
@ -133,11 +133,11 @@ case ${COMMAND} in
;;
distch)
CLASS=org.apache.hadoop.tools.DistCh
hadoop_add_to_classpath_toolspath
hadoop_add_to_classpath_tools hadoop-extras
;;
distcp)
CLASS=org.apache.hadoop.tools.DistCp
hadoop_add_to_classpath_toolspath
hadoop_add_to_classpath_tools hadoop-distcp
;;
envvars)
echo "JAVA_HOME='${JAVA_HOME}'"
@ -146,7 +146,9 @@ case ${COMMAND} in
echo "HADOOP_COMMON_LIB_JARS_DIR='${HADOOP_COMMON_LIB_JARS_DIR}'"
echo "HADOOP_COMMON_LIB_NATIVE_DIR='${HADOOP_COMMON_LIB_NATIVE_DIR}'"
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
echo "HADOOP_TOOLS_PATH='${HADOOP_TOOLS_PATH}'"
echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
exit 0
;;
fs)

View File

@ -41,6 +41,44 @@ function hadoop_debug
fi
}
## @description Given variable $1 delete $2 from it
## @audience public
## @stability stable
## @replaceable no
function hadoop_delete_entry
{
if [[ ${!1} =~ \ ${2}\ ]] ; then
hadoop_debug "Removing ${2} from ${1}"
eval "${1}"=\""${!1// ${2} }"\"
fi
}
## @description Given variable $1 add $2 to it
## @audience public
## @stability stable
## @replaceable no
function hadoop_add_entry
{
if [[ ! ${!1} =~ \ ${2}\ ]] ; then
hadoop_debug "Adding ${2} to ${1}"
#shellcheck disable=SC2140
eval "${1}"=\""${!1} ${2} "\"
fi
}
## @description Given variable $1 determine if $2 is in it
## @audience public
## @stability stable
## @replaceable no
## @return 0 = yes, 1 = no
function hadoop_verify_entry
{
# this unfortunately can't really be tested by bats. :(
# so if this changes, be aware that unit tests effectively
# do this function in them
[[ ${!1} =~ \ ${2}\ ]]
}
## @description Add a subcommand to the usage output
## @audience private
## @stability evolving
@ -264,10 +302,9 @@ function hadoop_bootstrap
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
# setup a default HADOOP_TOOLS_PATH
hadoop_deprecate_envvar TOOL_PATH HADOOP_TOOLS_PATH
HADOOP_TOOLS_PATH=${HADOOP_TOOLS_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_PREFIX}}
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
# usage output set to zero
hadoop_reset_usage
@ -322,6 +359,7 @@ function hadoop_exec_hadoopenv
if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
export HADOOP_ENV_PROCESSED=true
# shellcheck disable=SC1090
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
fi
fi
@ -334,6 +372,7 @@ function hadoop_exec_hadoopenv
function hadoop_exec_userfuncs
{
if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
# shellcheck disable=SC1090
. "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
fi
}
@ -348,6 +387,7 @@ function hadoop_exec_hadooprc
{
if [[ -f "${HOME}/.hadooprc" ]]; then
hadoop_debug "Applying the user's .hadooprc"
# shellcheck disable=SC1090
. "${HOME}/.hadooprc"
fi
}
@ -373,11 +413,22 @@ function hadoop_import_shellprofiles
files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
fi
# enable bundled shellprofiles that come
# from hadoop-tools. This converts the user-facing HADOOP_OPTIONAL_TOOLS
# to the HADOOP_TOOLS_OPTIONS that the shell profiles expect.
# See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS
# gets populated into hadoop-env.sh
for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do
hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}"
done
for i in "${files1[@]}" "${files2[@]}"
do
if [[ -n "${i}"
&& -f "${i}" ]]; then
hadoop_debug "Profiles: importing ${i}"
# shellcheck disable=SC1090
. "${i}"
fi
done
@ -945,34 +996,25 @@ function hadoop_add_common_to_classpath
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
}
## @description Add the HADOOP_TOOLS_PATH to the classpath
## @description Run libexec/tools/module.sh to add to the classpath
## @description environment
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_add_to_classpath_toolspath
## @param module
function hadoop_add_to_classpath_tools
{
declare -a array
declare -i c=0
declare -i j
declare -i i
declare idx
declare module=$1
if [[ -n "${HADOOP_TOOLS_PATH}" ]]; then
hadoop_debug "Adding HADOOP_TOOLS_PATH to CLASSPATH"
oldifs=${IFS}
IFS=:
for idx in ${HADOOP_TOOLS_PATH}; do
array[${c}]=${idx}
((c=c+1))
done
IFS=${oldifs}
((j=c-1)) || ${QATESTMODE}
for ((i=0; i<=j; i++)); do
hadoop_add_classpath "${array[$i]}" after
done
if [[ -f "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" ]]; then
# shellcheck disable=SC1090
. "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh"
else
hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found."
fi
if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then
"hadoop_classpath_tools_${module}"
fi
}

View File

@ -1,3 +1,4 @@
#!/usr/bin/env bash
# Copyright 2014 The Apache Software Foundation
#
# Licensed to the Apache Software Foundation (ASF) under one
@ -87,7 +88,10 @@
# Misc paths
####
# setup a default HADOOP_TOOLS_PATH, where things like distcp lives
# This is where things like distcp, S3, and other things live
# note that this path only gets added for certain commands and not
# part of the general classpath
# export HADOOP_TOOLS_PATH="${HADOOP_PREFIX}/share/hadoop/tools/lib/*"
# part of the general classpath unless HADOOP_OPTIONAL_TOOLS is used
# to configure them in
# export HADOOP_TOOLS_HOME=${HADOOP_PREFIX}
# export HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
# export HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}

View File

@ -49,7 +49,7 @@
# preferred. Many sites configure these options outside of Hadoop,
# such as in /etc/profile.d
# The java implementation to use. By default, this environment
# The java implementation to use. By default, this environment
# variable is REQUIRED on ALL platforms except OS X!
# export JAVA_HOME=
@ -64,15 +64,15 @@
# path.
# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
# The maximum amount of heap to use (Java -Xmx). If no unit
# is provided, it will be converted to MB. Daemons will
# The maximum amount of heap to use (Java -Xmx). If no unit
# is provided, it will be converted to MB. Daemons will
# prefer any Xmx setting in their respective _OPT variable.
# There is no default; the JVM will autoscale based upon machine
# memory size.
# export HADOOP_HEAPSIZE_MAX=
# The minimum amount of heap to use (Java -Xms). If no unit
# is provided, it will be converted to MB. Daemons will
# The minimum amount of heap to use (Java -Xms). If no unit
# is provided, it will be converted to MB. Daemons will
# prefer any Xms setting in their respective _OPT variable.
# There is no default; the JVM will autoscale based upon machine
# memory size.
@ -107,8 +107,8 @@ case ${HADOOP_OS_TYPE} in
esac
# Extra Java runtime options for some Hadoop commands
# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
# such commands. In most cases, # this should be left empty and
# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
# such commands. In most cases, # this should be left empty and
# let users supply it on the command line.
# export HADOOP_CLIENT_OPTS=""
@ -146,6 +146,11 @@ esac
# names starting with a '-' are treated as negative matches. For example,
# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
# Enable optional, bundled Hadoop features
# This is a comma delimited list. It may NOT be overridden via .hadooprc
# Entries may be added/removed as needed.
# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@"
###
# Options for remote shell connectivity
###
@ -181,7 +186,7 @@ esac
# non-secure)
#
# Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
# Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
# by default.
# Java property: hadoop.log.dir
# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
@ -201,7 +206,7 @@ esac
# Java property: hadoop.root.logger
# export HADOOP_ROOT_LOGGER=INFO,console
# Default log4j setting for daemons spawned explicitly by
# Default log4j setting for daemons spawned explicitly by
# --daemon option of hadoop, hdfs, mapred and yarn command.
# Java property: hadoop.root.logger
# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA

View File

@ -849,12 +849,8 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
}
@VisibleForTesting
public int getEncKeyQueueSize(String keyName) throws IOException {
try {
return encKeyVersionQueue.getSize(keyName);
} catch (ExecutionException e) {
throw new IOException(e);
}
public int getEncKeyQueueSize(String keyName) {
return encKeyVersionQueue.getSize(keyName);
}
@Override

View File

@ -18,9 +18,11 @@
package org.apache.hadoop.crypto.key.kms;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
@ -240,13 +242,19 @@ public class ValueQueue <E> {
}
/**
* Get size of the Queue for keyName
* Get size of the Queue for keyName. This is only used in unit tests.
* @param keyName the key name
* @return int queue size
* @throws ExecutionException
*/
public int getSize(String keyName) throws ExecutionException {
return keyQueues.get(keyName).size();
public int getSize(String keyName) {
// We can't do keyQueues.get(keyName).size() here,
// since that will have the side effect of populating the cache.
Map<String, LinkedBlockingQueue<E>> map =
keyQueues.getAllPresent(Arrays.asList(keyName));
if (map.get(keyName) == null) {
return 0;
}
return map.get(keyName).size();
}
/**

View File

@ -20,7 +20,7 @@ package org.apache.hadoop.fs;
import java.io.Closeable;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.ref.PhantomReference;
import java.lang.ref.WeakReference;
import java.lang.ref.ReferenceQueue;
import java.net.URI;
import java.net.URISyntaxException;
@ -3023,11 +3023,15 @@ public abstract class FileSystem extends Configured implements Closeable {
* need.
*/
public static class StatisticsData {
volatile long bytesRead;
volatile long bytesWritten;
volatile int readOps;
volatile int largeReadOps;
volatile int writeOps;
private volatile long bytesRead;
private volatile long bytesWritten;
private volatile int readOps;
private volatile int largeReadOps;
private volatile int writeOps;
private volatile long bytesReadLocalHost;
private volatile long bytesReadDistanceOfOneOrTwo;
private volatile long bytesReadDistanceOfThreeOrFour;
private volatile long bytesReadDistanceOfFiveOrLarger;
/**
* Add another StatisticsData object to this one.
@ -3038,6 +3042,12 @@ public abstract class FileSystem extends Configured implements Closeable {
this.readOps += other.readOps;
this.largeReadOps += other.largeReadOps;
this.writeOps += other.writeOps;
this.bytesReadLocalHost += other.bytesReadLocalHost;
this.bytesReadDistanceOfOneOrTwo += other.bytesReadDistanceOfOneOrTwo;
this.bytesReadDistanceOfThreeOrFour +=
other.bytesReadDistanceOfThreeOrFour;
this.bytesReadDistanceOfFiveOrLarger +=
other.bytesReadDistanceOfFiveOrLarger;
}
/**
@ -3049,6 +3059,12 @@ public abstract class FileSystem extends Configured implements Closeable {
this.readOps = -this.readOps;
this.largeReadOps = -this.largeReadOps;
this.writeOps = -this.writeOps;
this.bytesReadLocalHost = -this.bytesReadLocalHost;
this.bytesReadDistanceOfOneOrTwo = -this.bytesReadDistanceOfOneOrTwo;
this.bytesReadDistanceOfThreeOrFour =
-this.bytesReadDistanceOfThreeOrFour;
this.bytesReadDistanceOfFiveOrLarger =
-this.bytesReadDistanceOfFiveOrLarger;
}
@Override
@ -3077,6 +3093,22 @@ public abstract class FileSystem extends Configured implements Closeable {
public int getWriteOps() {
return writeOps;
}
public long getBytesReadLocalHost() {
return bytesReadLocalHost;
}
public long getBytesReadDistanceOfOneOrTwo() {
return bytesReadDistanceOfOneOrTwo;
}
public long getBytesReadDistanceOfThreeOrFour() {
return bytesReadDistanceOfThreeOrFour;
}
public long getBytesReadDistanceOfFiveOrLarger() {
return bytesReadDistanceOfFiveOrLarger;
}
}
private interface StatisticsAggregator<T> {
@ -3101,7 +3133,7 @@ public abstract class FileSystem extends Configured implements Closeable {
/**
* Set of all thread-local data areas. Protected by the Statistics lock.
* The references to the statistics data are kept using phantom references
* The references to the statistics data are kept using weak references
* to the associated threads. Proper clean-up is performed by the cleaner
* thread when the threads are garbage collected.
*/
@ -3154,11 +3186,11 @@ public abstract class FileSystem extends Configured implements Closeable {
}
/**
* A phantom reference to a thread that also includes the data associated
* A weak reference to a thread that also includes the data associated
* with that thread. On the thread being garbage collected, it is enqueued
* to the reference queue for clean-up.
*/
private class StatisticsDataReference extends PhantomReference<Thread> {
private class StatisticsDataReference extends WeakReference<Thread> {
private final StatisticsData data;
public StatisticsDataReference(StatisticsData data, Thread thread) {
@ -3267,6 +3299,33 @@ public abstract class FileSystem extends Configured implements Closeable {
getThreadStatistics().writeOps += count;
}
/**
* Increment the bytes read by the network distance in the statistics
* In the common network topology setup, distance value should be an even
* number such as 0, 2, 4, 6. To make it more general, we group distance
* by {1, 2}, {3, 4} and {5 and beyond} for accounting.
* @param distance the network distance
* @param newBytes the additional bytes read
*/
public void incrementBytesReadByDistance(int distance, long newBytes) {
switch (distance) {
case 0:
getThreadStatistics().bytesReadLocalHost += newBytes;
break;
case 1:
case 2:
getThreadStatistics().bytesReadDistanceOfOneOrTwo += newBytes;
break;
case 3:
case 4:
getThreadStatistics().bytesReadDistanceOfThreeOrFour += newBytes;
break;
default:
getThreadStatistics().bytesReadDistanceOfFiveOrLarger += newBytes;
break;
}
}
/**
* Apply the given aggregator to all StatisticsData objects associated with
* this Statistics object.
@ -3384,6 +3443,55 @@ public abstract class FileSystem extends Configured implements Closeable {
});
}
/**
* In the common network topology setup, distance value should be an even
* number such as 0, 2, 4, 6. To make it more general, we group distance
* by {1, 2}, {3, 4} and {5 and beyond} for accounting. So if the caller
* ask for bytes read for distance 2, the function will return the value
* for group {1, 2}.
* @param distance the network distance
* @return the total number of bytes read by the network distance
*/
public long getBytesReadByDistance(int distance) {
long bytesRead;
switch (distance) {
case 0:
bytesRead = getData().getBytesReadLocalHost();
break;
case 1:
case 2:
bytesRead = getData().getBytesReadDistanceOfOneOrTwo();
break;
case 3:
case 4:
bytesRead = getData().getBytesReadDistanceOfThreeOrFour();
break;
default:
bytesRead = getData().getBytesReadDistanceOfFiveOrLarger();
break;
}
return bytesRead;
}
/**
* Get all statistics data
* MR or other frameworks can use the method to get all statistics at once.
* @return the StatisticsData
*/
public StatisticsData getData() {
return visitAll(new StatisticsAggregator<StatisticsData>() {
private StatisticsData all = new StatisticsData();
@Override
public void accept(StatisticsData data) {
all.add(data);
}
public StatisticsData aggregate() {
return all;
}
});
}
@Override
public String toString() {

View File

@ -638,13 +638,27 @@ public class NetUtils {
/**
* Return hostname without throwing exception.
* The returned hostname String format is "hostname".
* @return hostname
*/
public static String getLocalHostname() {
try {
return InetAddress.getLocalHost().getHostName();
} catch(UnknownHostException uhe) {
return "" + uhe;
}
}
/**
* Return hostname without throwing exception.
* The returned hostname String format is "hostname/ip address".
* @return hostname
*/
public static String getHostname() {
try {return "" + InetAddress.getLocalHost();}
catch(UnknownHostException uhe) {return "" + uhe;}
}
/**
* Compose a "host:port" string from the address.
*/

View File

@ -369,6 +369,16 @@ public class NetworkTopology {
int getNumOfLeaves() {
return numOfLeaves;
}
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean equals(Object to) {
return super.equals(to);
}
} // end of InnerNode
/**
@ -607,9 +617,14 @@ public class NetworkTopology {
* or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster
*/
public int getDistance(Node node1, Node node2) {
if (node1 == node2) {
if ((node1 != null && node1.equals(node2)) ||
(node1 == null && node2 == null)) {
return 0;
}
if (node1 == null || node2 == null) {
LOG.warn("One of the nodes is a null pointer");
return Integer.MAX_VALUE;
}
Node n1=node1, n2=node2;
int dis = 0;
netlock.readLock().lock();

View File

@ -112,7 +112,23 @@ public class NodeBase implements Node {
public static String getPath(Node node) {
return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
}
@Override
public boolean equals(Object to) {
if (this == to) {
return true;
}
if (!(to instanceof NodeBase)) {
return false;
}
return getPath(this).equals(getPath((NodeBase)to));
}
@Override
public int hashCode() {
return getPath(this).hashCode();
}
/** @return this node's path as its string representation */
@Override
public String toString() {

View File

@ -52,7 +52,7 @@
|| defined(__ppc64__) || defined(__ppc64le__) \
|| defined(__PPC64__) || defined(__PPC64LE__) \
|| defined(__ia64) || defined(__itanium__) || defined(_M_IA64) \
|| defined(__s390x__) ) /* Detects 64 bits mode */
|| defined(__s390x__) || defined(_LP64)) /* Detects 64 bits mode */
# define LZ4_ARCH64 1
#else
# define LZ4_ARCH64 0

View File

@ -1577,7 +1577,7 @@
<value>DEFAULT</value>
<description>
The hostname verifier to provide for HttpsURLConnections.
Valid values are: DEFAULT, STRICT, STRICT_I6, DEFAULT_AND_LOCALHOST and
Valid values are: DEFAULT, STRICT, STRICT_IE6, DEFAULT_AND_LOCALHOST and
ALLOW_ALL
</description>
</property>

View File

@ -15,14 +15,7 @@
Hadoop Groups Mapping
===================
* [Hadoop Groups Mapping](#Hadoop_Groups_Mapping)
* [Overview](#Overview)
* [LDAP Groups Mapping](#LDAP_Groups_Mapping)
* [Active Directory](#Active_Directory)
* [POSIX Groups](#POSIX_Groups)
* [SSL](#SSL)
* [Composite Groups Mapping](#Composite_Groups_Mapping)
* [Multiple group mapping providers configuration sample](#Multiple_group_mapping_providers_configuration_sample)
<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
Overview
--------
@ -61,6 +54,17 @@ For HDFS, the mapping of users to groups is performed on the NameNode. Thus, the
Note that HDFS stores the user and group of a file or directory as strings; there is no conversion from user and group identity numbers as is conventional in Unix.
Static Mapping
--------
It is possible to statically map users to groups by defining the mapping in `hadoop.user.group.static.mapping.overrides` in the format `user1=group1,group2;user2=;user3=group2`.
This property overrides any group mapping service provider. If a user's groups are defined in it, the groups are returned without more lookups; otherwise, the service provider defined in `hadoop.security.group.mapping` is used to look up the groups. By default, `dr.who=;` is defined, so the fake user dr.who will not have any groups.
Caching/Negative caching
--------
Since the group mapping resolution relies on external mechanisms, the NameNode performance may be impacted. To reduce the impact due to repeated lookups, Hadoop caches the groups returned by the service provider. The cache invalidate is configurable via `hadoop.security.groups.cache.secs`, and the default is 300 seconds.
To avoid spamming NameNode with unknown users, Hadoop employs negative caching so that if the result of the lookup is empty, return an empty group directly instead of performing more group mapping queries,
The cache invalidation is configurable via `hadoop.security.groups.negative-cache.secs`. The default is 30 seconds, so if group mapping service providers returns no group for a user, no lookup will be performed for the same user within 30 seconds.
LDAP Groups Mapping
--------
@ -85,9 +89,9 @@ in order to be considered a member.
The default configuration supports LDAP group name resolution with an Active Directory server.
### POSIX Groups ###
If the LDAP server supports POSIX group semantics, Hadoop can perform LDAP group resolution queries to the server by setting both
`hadoop.security.group.mapping.ldap.search.filter.user` to `posixAccount` and
`hadoop.security.group.mapping.ldap.search.filter.group` to `posixGroup`.
If the LDAP server supports POSIX group semantics (RFC-2307), Hadoop can perform LDAP group resolution queries to the server by setting both
`hadoop.security.group.mapping.ldap.search.filter.user` to `(&amp;(objectClass=posixAccount)(uid={0}))` and
`hadoop.security.group.mapping.ldap.search.filter.group` to `(objectClass=posixGroup)`.
### SSL ###
To secure the connection, the implementation supports LDAP over SSL (LDAPS). SSL is enable by setting `hadoop.security.group.mapping.ldap.ssl` to `true`.

View File

@ -22,11 +22,11 @@ Introduction
This document describes how to configure Hadoop HTTP web-consoles to require user authentication.
By default Hadoop HTTP web-consoles (JobTracker, NameNode, TaskTrackers and DataNodes) allow access without any form of authentication.
By default Hadoop HTTP web-consoles (ResourceManager, NameNode, NodeManagers and DataNodes) allow access without any form of authentication.
Hadoop HTTP web-consoles can be configured to require Kerberos authentication using HTTP SPNEGO protocol (supported by browsers like Firefox and Internet Explorer).
In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's Pseudo/Simple authentication. If this option is enabled, the user name must be specified in the first browser interaction using the user.name query string parameter. e.g. `http://localhost:50030/jobtracker.jsp?user.name=babu`.
In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's Pseudo/Simple authentication. If this option is enabled, the user name must be specified in the first browser interaction using the user.name query string parameter. e.g. `http://localhost:8088/cluster?user.name=babu`.
If a custom authentication mechanism is required for the HTTP web-consoles, it is possible to implement a plugin to support the alternate authentication mechanism (refer to Hadoop hadoop-auth for details on writing an `AuthenticatorHandler`).
@ -43,7 +43,7 @@ The following properties should be in the `core-site.xml` of all the nodes in th
| `hadoop.http.authentication.type` | `simple` | Defines authentication used for the HTTP web-consoles. The supported values are: `simple` \| `kerberos` \| `#AUTHENTICATION_HANDLER_CLASSNAME#`. |
| `hadoop.http.authentication.token.validity` | `36000` | Indicates how long (in seconds) an authentication token is valid before it has to be renewed. |
| `hadoop.http.authentication.token.max-inactive-interval` | `-1` (disabled) | Specifies the time, in seconds, between client requests the server will invalidate the token. |
| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, JobTracker, NameNode, DataNode and TastTracker. This file should be readable only by the Unix user running the daemons. |
| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, ResourceManager, NameNode, DataNode and NodeManager. This file should be readable only by the Unix user running the daemons. |
| `hadoop.http.authentication.cookie.domain` | | The domain to use for the HTTP cookie that stores the authentication token. For authentication to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie. |
| `hadoop.http.authentication.cookie.persistent` | `false` (session cookie) | Specifies the persistence of the HTTP cookie. If the value is true, the cookie is a persistent one. Otherwise, it is a session cookie. *IMPORTANT*: when using IP addresses, browsers ignore cookies with domain settings. For this setting to work properly all nodes in the cluster must be configured to generate URLs with `hostname.domain` names on it. |
| `hadoop.http.authentication.simple.anonymous.allowed` | `true` | Indicates whether anonymous requests are allowed when using 'simple' authentication. |

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.crypto.key;
import java.io.IOException;
import java.util.Queue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
@ -158,17 +157,12 @@ public class TestValueQueue {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
try {
int size = vq.getSize("k1");
if (size != 10) {
LOG.info("Current ValueQueue size is " + size);
return false;
}
return true;
} catch (ExecutionException e) {
LOG.error("Exception when getSize.", e);
return false;
}
}
}, 100, 3000);
Assert.assertEquals("Failed in async call.", 10, filler.getTop().num);

View File

@ -29,6 +29,22 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.junit.Assert;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.protobuf.TestProtos;
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.junit.Assert;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
@ -37,6 +53,8 @@ import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.CountDownLatch;
@ -149,6 +167,89 @@ public class TestRpcBase {
return count;
}
public static class TestTokenIdentifier extends TokenIdentifier {
private Text tokenid;
private Text realUser;
final static Text KIND_NAME = new Text("test.token");
public TestTokenIdentifier() {
this(new Text(), new Text());
}
public TestTokenIdentifier(Text tokenid) {
this(tokenid, new Text());
}
public TestTokenIdentifier(Text tokenid, Text realUser) {
this.tokenid = tokenid == null ? new Text() : tokenid;
this.realUser = realUser == null ? new Text() : realUser;
}
@Override
public Text getKind() {
return KIND_NAME;
}
@Override
public UserGroupInformation getUser() {
if (realUser.toString().isEmpty()) {
return UserGroupInformation.createRemoteUser(tokenid.toString());
} else {
UserGroupInformation realUgi = UserGroupInformation
.createRemoteUser(realUser.toString());
return UserGroupInformation
.createProxyUser(tokenid.toString(), realUgi);
}
}
@Override
public void readFields(DataInput in) throws IOException {
tokenid.readFields(in);
realUser.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
tokenid.write(out);
realUser.write(out);
}
}
public static class TestTokenSecretManager extends
SecretManager<TestTokenIdentifier> {
@Override
public byte[] createPassword(TestTokenIdentifier id) {
return id.getBytes();
}
@Override
public byte[] retrievePassword(TestTokenIdentifier id)
throws InvalidToken {
return id.getBytes();
}
@Override
public TestTokenIdentifier createIdentifier() {
return new TestTokenIdentifier();
}
}
public static class TestTokenSelector implements
TokenSelector<TestTokenIdentifier> {
@SuppressWarnings("unchecked")
@Override
public Token<TestTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (TestTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) {
return (Token<TestTokenIdentifier>) token;
}
}
return null;
}
}
@KerberosInfo(serverPrincipal = SERVER_PRINCIPAL_KEY)
@TokenInfo(TestTokenSelector.class)
@ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRpcBase$TestRpcService",
protocolVersion = 1)
public interface TestRpcService
@ -267,12 +368,80 @@ public class TestRpcBase {
} catch (InterruptedException ignore) {}
return TestProtos.EmptyResponseProto.newBuilder().build();
}
@Override
public TestProtos.AuthMethodResponseProto getAuthMethod(
RpcController controller, TestProtos.EmptyRequestProto request)
throws ServiceException {
AuthMethod authMethod = null;
try {
authMethod = UserGroupInformation.getCurrentUser()
.getAuthenticationMethod().getAuthMethod();
} catch (IOException e) {
throw new ServiceException(e);
}
return TestProtos.AuthMethodResponseProto.newBuilder()
.setCode(authMethod.code)
.setMechanismName(authMethod.getMechanismName())
.build();
}
@Override
public TestProtos.AuthUserResponseProto getAuthUser(
RpcController controller, TestProtos.EmptyRequestProto request)
throws ServiceException {
UserGroupInformation authUser = null;
try {
authUser = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
throw new ServiceException(e);
}
return TestProtos.AuthUserResponseProto.newBuilder()
.setAuthUser(authUser.getUserName())
.build();
}
@Override
public TestProtos.EchoResponseProto echoPostponed(
RpcController controller, TestProtos.EchoRequestProto request)
throws ServiceException {
Server.Call call = Server.getCurCall().get();
call.postponeResponse();
postponedCalls.add(call);
return TestProtos.EchoResponseProto.newBuilder().setMessage(
request.getMessage())
.build();
}
@Override
public TestProtos.EmptyResponseProto sendPostponed(
RpcController controller, TestProtos.EmptyRequestProto request)
throws ServiceException {
Collections.shuffle(postponedCalls);
try {
for (Server.Call call : postponedCalls) {
call.sendResponse();
}
} catch (IOException e) {
throw new ServiceException(e);
}
postponedCalls.clear();
return TestProtos.EmptyResponseProto.newBuilder().build();
}
}
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
return TestProtos.EmptyRequestProto.newBuilder().build();
}
protected static TestProtos.EmptyResponseProto newEmptyResponse() {
return TestProtos.EmptyResponseProto.newBuilder().build();
}
protected static TestProtos.EchoRequestProto newEchoRequest(String msg) {
return TestProtos.EchoRequestProto.newBuilder().setMessage(msg).build();
}
@ -290,6 +459,27 @@ public class TestRpcBase {
protected static TestProtos.SleepRequestProto newSleepRequest(
int milliSeconds) {
return TestProtos.SleepRequestProto.newBuilder()
.setMilliSeconds(milliSeconds).build();
.setMilliSeconds(milliSeconds).build();
}
protected static TestProtos.EchoResponseProto newEchoResponse(String msg) {
return TestProtos.EchoResponseProto.newBuilder().setMessage(msg).build();
}
protected static AuthMethod convert(
TestProtos.AuthMethodResponseProto authMethodResponse) {
String mechanism = authMethodResponse.getMechanismName();
if (mechanism.equals(AuthMethod.SIMPLE.getMechanismName())) {
return AuthMethod.SIMPLE;
} else if (mechanism.equals(AuthMethod.KERBEROS.getMechanismName())) {
return AuthMethod.KERBEROS;
} else if (mechanism.equals(AuthMethod.TOKEN.getMechanismName())) {
return AuthMethod.TOKEN;
}
return null;
}
protected static String convert(TestProtos.AuthUserResponseProto response) {
return response.getAuthUser();
}
}

View File

@ -18,53 +18,7 @@
package org.apache.hadoop.ipc;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNotSame;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.security.Security;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import com.google.protobuf.ServiceException;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -74,27 +28,13 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.Server.Call;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslPlainServer;
import org.apache.hadoop.security.SaslPropertiesResolver;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
import org.apache.hadoop.security.*;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.TestUserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.*;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Before;
@ -104,9 +44,27 @@ import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
import javax.security.auth.callback.*;
import javax.security.sasl.*;
import java.io.IOException;
import java.lang.annotation.Annotation;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
import java.security.Security;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.regex.Pattern;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
import static org.junit.Assert.*;
/** Unit tests for using Sasl over RPC. */
@RunWith(Parameterized.class)
public class TestSaslRPC {
public class TestSaslRPC extends TestRpcBase {
@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<Object[]>();
@ -135,18 +93,14 @@ public class TestSaslRPC {
this.expectedQop = expectedQop;
this.saslPropertiesResolver = saslPropertiesResolver;
}
private static final String ADDRESS = "0.0.0.0";
public static final Log LOG =
LogFactory.getLog(TestSaslRPC.class);
static final String ERROR_MESSAGE = "Token is invalid";
static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
private static Configuration conf;
// If this is set to true AND the auth-method is not simple, secretManager
// will be enabled.
static Boolean enableSecretManager = null;
@ -155,7 +109,7 @@ public class TestSaslRPC {
static Boolean forceSecretManager = null;
static Boolean clientFallBackToSimpleAllowed = true;
static enum UseToken {
enum UseToken {
NONE(),
VALID(),
INVALID(),
@ -174,6 +128,7 @@ public class TestSaslRPC {
LOG.info("---------------------------------");
LOG.info("Testing QOP:"+ getQOPNames(qop));
LOG.info("---------------------------------");
conf = new Configuration();
// the specific tests for kerberos will enable kerberos. forcing it
// for all tests will cause tests to fail if the user has a TGT
@ -187,6 +142,9 @@ public class TestSaslRPC {
enableSecretManager = null;
forceSecretManager = null;
clientFallBackToSimpleAllowed = true;
// Set RPC engine to protobuf RPC engine
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
}
static String getQOPNames (QualityOfProtection[] qops){
@ -210,68 +168,6 @@ public class TestSaslRPC {
((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
}
public static class TestTokenIdentifier extends TokenIdentifier {
private Text tokenid;
private Text realUser;
final static Text KIND_NAME = new Text("test.token");
public TestTokenIdentifier() {
this(new Text(), new Text());
}
public TestTokenIdentifier(Text tokenid) {
this(tokenid, new Text());
}
public TestTokenIdentifier(Text tokenid, Text realUser) {
this.tokenid = tokenid == null ? new Text() : tokenid;
this.realUser = realUser == null ? new Text() : realUser;
}
@Override
public Text getKind() {
return KIND_NAME;
}
@Override
public UserGroupInformation getUser() {
if (realUser.toString().isEmpty()) {
return UserGroupInformation.createRemoteUser(tokenid.toString());
} else {
UserGroupInformation realUgi = UserGroupInformation
.createRemoteUser(realUser.toString());
return UserGroupInformation
.createProxyUser(tokenid.toString(), realUgi);
}
}
@Override
public void readFields(DataInput in) throws IOException {
tokenid.readFields(in);
realUser.readFields(in);
}
@Override
public void write(DataOutput out) throws IOException {
tokenid.write(out);
realUser.write(out);
}
}
public static class TestTokenSecretManager extends
SecretManager<TestTokenIdentifier> {
@Override
public byte[] createPassword(TestTokenIdentifier id) {
return id.getBytes();
}
@Override
public byte[] retrievePassword(TestTokenIdentifier id)
throws InvalidToken {
return id.getBytes();
}
@Override
public TestTokenIdentifier createIdentifier() {
return new TestTokenIdentifier();
}
}
public static class BadTokenSecretManager extends TestTokenSecretManager {
@Override
@ -281,64 +177,6 @@ public class TestSaslRPC {
}
}
public static class TestTokenSelector implements
TokenSelector<TestTokenIdentifier> {
@SuppressWarnings("unchecked")
@Override
public Token<TestTokenIdentifier> selectToken(Text service,
Collection<Token<? extends TokenIdentifier>> tokens) {
if (service == null) {
return null;
}
for (Token<? extends TokenIdentifier> token : tokens) {
if (TestTokenIdentifier.KIND_NAME.equals(token.getKind())
&& service.equals(token.getService())) {
return (Token<TestTokenIdentifier>) token;
}
}
return null;
}
}
@KerberosInfo(
serverPrincipal = SERVER_PRINCIPAL_KEY)
@TokenInfo(TestTokenSelector.class)
public interface TestSaslProtocol extends TestRPC.TestProtocol {
public AuthMethod getAuthMethod() throws IOException;
public String getAuthUser() throws IOException;
public String echoPostponed(String value) throws IOException;
public void sendPostponed() throws IOException;
}
public static class TestSaslImpl extends TestRPC.TestImpl implements
TestSaslProtocol {
private List<Call> postponedCalls = new ArrayList<Call>();
@Override
public AuthMethod getAuthMethod() throws IOException {
return UserGroupInformation.getCurrentUser()
.getAuthenticationMethod().getAuthMethod();
}
@Override
public String getAuthUser() throws IOException {
return UserGroupInformation.getCurrentUser().getUserName();
}
@Override
public String echoPostponed(String value) {
Call call = Server.getCurCall().get();
call.postponeResponse();
postponedCalls.add(call);
return value;
}
@Override
public void sendPostponed() throws IOException {
Collections.shuffle(postponedCalls);
for (Call call : postponedCalls) {
call.sendResponse();
}
postponedCalls.clear();
}
}
public static class CustomSecurityInfo extends SecurityInfo {
@Override
@ -363,8 +201,8 @@ public class TestSaslRPC {
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
return new TokenInfo() {
@Override
public Class<? extends TokenSelector<? extends
TokenIdentifier>> value() {
public Class<? extends TokenSelector<? extends
TokenIdentifier>> value() {
return TestTokenSelector.class;
}
@Override
@ -378,10 +216,7 @@ public class TestSaslRPC {
@Test
public void testDigestRpc() throws Exception {
TestTokenSecretManager sm = new TestTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
final Server server = setupTestServer(conf, 5, sm);
doDigestRpc(server, sm);
}
@ -391,10 +226,7 @@ public class TestSaslRPC {
TestTokenSecretManager sm = new TestTokenSecretManager();
try {
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5)
.setVerbose(true).setSecretManager(sm).build();
final Server server = setupTestServer(conf, 5, sm);
doDigestRpc(server, sm);
} finally {
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
@ -404,59 +236,47 @@ public class TestSaslRPC {
@Test
public void testErrorMessage() throws Exception {
BadTokenSecretManager sm = new BadTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
final Server server = setupTestServer(conf, 5, sm);
boolean succeeded = false;
try {
doDigestRpc(server, sm);
} catch (RemoteException e) {
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
assertEquals(ERROR_MESSAGE, e.getLocalizedMessage());
assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
} catch (ServiceException e) {
assertTrue(e.getCause() instanceof RemoteException);
RemoteException re = (RemoteException) e.getCause();
LOG.info("LOGGING MESSAGE: " + re.getLocalizedMessage());
assertEquals(ERROR_MESSAGE, re.getLocalizedMessage());
assertTrue(re.unwrapRemoteException() instanceof InvalidToken);
succeeded = true;
}
assertTrue(succeeded);
}
private void doDigestRpc(Server server, TestTokenSecretManager sm
) throws Exception {
server.start();
private void doDigestRpc(Server server, TestTokenSecretManager sm)
throws Exception {
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()));
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
TestSaslProtocol proxy = null;
TestRpcService proxy = null;
try {
proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, conf);
AuthMethod authMethod = proxy.getAuthMethod();
proxy = getClient(addr, conf);
AuthMethod authMethod = convert(
proxy.getAuthMethod(null, newEmptyRequest()));
assertEquals(TOKEN, authMethod);
//QOP must be auth
assertEquals(expectedQop.saslQop,
RPC.getConnectionIdForProxy(proxy).getSaslQop());
proxy.ping();
proxy.ping(null, newEmptyRequest());
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
stop(server, proxy);
}
}
static ConnectionId getConnectionId(Configuration conf) throws IOException {
return ConnectionId.getConnectionId(new InetSocketAddress(0),
TestSaslProtocol.class, null, 0, null, conf);
}
@Test
public void testPingInterval() throws Exception {
Configuration newConf = new Configuration(conf);
@ -466,29 +286,26 @@ public class TestSaslRPC {
// set doPing to true
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
ConnectionId remoteId = getConnectionId(newConf);
ConnectionId remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
TestRpcService.class, null, 0, null, newConf);
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
remoteId.getPingInterval());
// set doPing to false
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
remoteId = getConnectionId(newConf);
remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
TestRpcService.class, null, 0, null, newConf);
assertEquals(0, remoteId.getPingInterval());
}
@Test
public void testPerConnectionConf() throws Exception {
TestTokenSecretManager sm = new TestTokenSecretManager();
final Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.setSecretManager(sm).build();
server.start();
final Server server = setupTestServer(conf, 5, sm);
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
.getUserName()));
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
sm);
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
SecurityUtil.setTokenService(token, addr);
current.addToken(token);
@ -497,28 +314,25 @@ public class TestSaslRPC {
HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
Client client = null;
TestSaslProtocol proxy1 = null;
TestSaslProtocol proxy2 = null;
TestSaslProtocol proxy3 = null;
TestRpcService proxy1 = null;
TestRpcService proxy2 = null;
TestRpcService proxy3 = null;
int timeouts[] = {111222, 3333333};
try {
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
proxy1 = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy1.getAuthMethod();
client = WritableRpcEngine.getClient(newConf);
proxy1 = getClient(addr, newConf);
proxy1.getAuthMethod(null, newEmptyRequest());
client = ProtobufRpcEngine.getClient(newConf);
Set<ConnectionId> conns = client.getConnectionIds();
assertEquals("number of connections in cache is wrong", 1, conns.size());
// same conf, connection should be re-used
proxy2 = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy2.getAuthMethod();
proxy2 = getClient(addr, newConf);
proxy2.getAuthMethod(null, newEmptyRequest());
assertEquals("number of connections in cache is wrong", 1, conns.size());
// different conf, new connection should be set up
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]);
proxy3 = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy3.getAuthMethod();
proxy3 = getClient(addr, newConf);
proxy3.getAuthMethod(null, newEmptyRequest());
assertEquals("number of connections in cache is wrong", 2, conns.size());
// now verify the proxies have the correct connection ids and timeouts
ConnectionId[] connsArray = {
@ -551,24 +365,14 @@ public class TestSaslRPC {
UserGroupInformation current = UserGroupInformation.getCurrentUser();
System.out.println("UGI: " + current);
Server server = new RPC.Builder(newConf)
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
.build();
TestSaslProtocol proxy = null;
Server server = setupTestServer(newConf, 5);
TestRpcService proxy = null;
server.start();
InetSocketAddress addr = NetUtils.getConnectAddress(server);
try {
proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, newConf);
proxy.ping();
proxy = getClient(addr, newConf);
proxy.ping(null, newEmptyRequest());
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
stop(server, proxy);
}
System.out.println("Test is successful.");
}
@ -887,14 +691,7 @@ public class TestSaslRPC {
UserGroupInformation.setConfiguration(conf);
TestTokenSecretManager sm = new TestTokenSecretManager();
Server server = new RPC.Builder(conf)
.setProtocol(TestSaslProtocol.class)
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(1) // prevents ordering issues when unblocking calls.
.setVerbose(true)
.setSecretManager(sm)
.build();
server.start();
Server server = setupTestServer(conf, 1, sm);
try {
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
final UserGroupInformation clientUgi =
@ -903,14 +700,13 @@ public class TestSaslRPC {
TestTokenIdentifier tokenId = new TestTokenIdentifier(
new Text(clientUgi.getUserName()));
Token<?> token = new Token<TestTokenIdentifier>(tokenId, sm);
Token<?> token = new Token<>(tokenId, sm);
SecurityUtil.setTokenService(token, addr);
clientUgi.addToken(token);
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final TestSaslProtocol proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, conf);
final TestRpcService proxy = getClient(addr, conf);
final ExecutorService executor = Executors.newCachedThreadPool();
final AtomicInteger count = new AtomicInteger();
try {
@ -922,7 +718,8 @@ public class TestSaslRPC {
@Override
public Void call() throws Exception {
String expect = "future"+count.getAndIncrement();
String answer = proxy.echoPostponed(expect);
String answer = convert(proxy.echoPostponed(null,
newEchoRequest(expect)));
assertEquals(expect, answer);
return null;
}
@ -939,7 +736,7 @@ public class TestSaslRPC {
// only 1 handler ensures that the prior calls are already
// postponed. 1 handler also ensures that this call will
// timeout if the postponing doesn't work (ie. free up handler)
proxy.sendPostponed();
proxy.sendPostponed(null, newEmptyRequest());
for (int i=0; i < futures.length; i++) {
LOG.info("waiting for future"+i);
futures[i].get();
@ -1009,14 +806,7 @@ public class TestSaslRPC {
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
@Override
public Server run() throws IOException {
Server server = new RPC.Builder(serverConf)
.setProtocol(TestSaslProtocol.class)
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
.setNumHandlers(5).setVerbose(true)
.setSecretManager(serverSm)
.build();
server.start();
return server;
return setupTestServer(serverConf, 5, serverSm);
}
});
@ -1038,17 +828,17 @@ public class TestSaslRPC {
Token<TestTokenIdentifier> token = null;
switch (tokenType) {
case VALID:
token = new Token<TestTokenIdentifier>(tokenId, sm);
token = new Token<>(tokenId, sm);
SecurityUtil.setTokenService(token, addr);
break;
case INVALID:
token = new Token<TestTokenIdentifier>(
token = new Token<>(
tokenId.getBytes(), "bad-password!".getBytes(),
tokenId.getKind(), null);
SecurityUtil.setTokenService(token, addr);
break;
case OTHER:
token = new Token<TestTokenIdentifier>();
token = new Token<>();
break;
case NONE: // won't get here
}
@ -1060,19 +850,28 @@ public class TestSaslRPC {
return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
@Override
public String run() throws IOException {
TestSaslProtocol proxy = null;
TestRpcService proxy = null;
try {
proxy = RPC.getProxy(TestSaslProtocol.class,
TestSaslProtocol.versionID, addr, clientConf);
proxy.ping();
proxy = getClient(addr, clientConf);
proxy.ping(null, newEmptyRequest());
// make sure the other side thinks we are who we said we are!!!
assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
AuthMethod authMethod = proxy.getAuthMethod();
assertEquals(clientUgi.getUserName(),
convert(proxy.getAuthUser(null, newEmptyRequest())));
AuthMethod authMethod =
convert(proxy.getAuthMethod(null, newEmptyRequest()));
// verify sasl completed with correct QOP
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
RPC.getConnectionIdForProxy(proxy).getSaslQop());
RPC.getConnectionIdForProxy(proxy).getSaslQop());
return authMethod.toString();
} catch (ServiceException se) {
if (se.getCause() instanceof RemoteException) {
throw (RemoteException) se.getCause();
} else if (se.getCause() instanceof IOException) {
throw (IOException) se.getCause();
} else {
throw new RuntimeException(se.getCause());
}
} finally {
if (proxy != null) {
RPC.stopProxy(proxy);

View File

@ -41,9 +41,9 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
import org.junit.Before;
import org.junit.Test;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier;
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSelector;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSecretManager;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSelector;
import org.apache.commons.logging.*;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;

View File

@ -81,4 +81,13 @@ message ExchangeRequestProto {
message ExchangeResponseProto {
repeated int32 values = 1;
}
message AuthMethodResponseProto {
required int32 code = 1;
required string mechanismName = 2;
}
message AuthUserResponseProto {
required string authUser = 1;
}

View File

@ -39,6 +39,10 @@ service TestProtobufRpcProto {
rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
rpc getAuthUser(EmptyRequestProto) returns (AuthUserResponseProto);
rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
}
service TestProtobufRpc2Proto {

View File

@ -11,5 +11,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
org.apache.hadoop.ipc.TestRpcBase$TestTokenIdentifier
org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier

View File

@ -0,0 +1,47 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load hadoop-functions_test_helper
toolsetup () {
HADOOP_LIBEXEC_DIR="${TMP}/libexec"
mkdir -p "${HADOOP_LIBEXEC_DIR}/tools"
}
@test "hadoop_classpath_tools (load)" {
toolsetup
echo "unittest=libexec" > "${HADOOP_LIBEXEC_DIR}/tools/test.sh"
hadoop_add_to_classpath_tools test
[ -n "${unittest}" ]
}
@test "hadoop_classpath_tools (not exist)" {
toolsetup
hadoop_add_to_classpath_tools test
[ -z "${unittest}" ]
}
@test "hadoop_classpath_tools (function)" {
toolsetup
{
echo "function hadoop_classpath_tools_test {"
echo " unittest=libexec"
echo " }"
} > "${HADOOP_LIBEXEC_DIR}/tools/test.sh"
hadoop_add_to_classpath_tools test
declare -f
[ -n "${unittest}" ]
}

View File

@ -1,74 +0,0 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load hadoop-functions_test_helper
freetheclasses () {
local j
for j in HADOOP_TOOLS_PATH \
CLASSPATH; do
unset ${j}
done
}
createdirs () {
local j
for j in new old foo bar baz; do
mkdir -p "${TMP}/${j}"
done
}
@test "hadoop_add_to_classpath_toolspath (nothing)" {
freetheclasses
hadoop_add_to_classpath_toolspath
[ -z "${CLASSPATH}" ]
}
@test "hadoop_add_to_classpath_toolspath (none)" {
freetheclasses
CLASSPATH=test
hadoop_add_to_classpath_toolspath
[ "${CLASSPATH}" = "test" ]
}
@test "hadoop_add_to_classpath_toolspath (only)" {
freetheclasses
createdirs
HADOOP_TOOLS_PATH="${TMP}/new"
hadoop_add_to_classpath_toolspath
[ "${CLASSPATH}" = "${TMP}/new" ]
}
@test "hadoop_add_to_classpath_toolspath (1+1)" {
freetheclasses
createdirs
CLASSPATH=${TMP}/foo
HADOOP_TOOLS_PATH=${TMP}/foo
hadoop_add_to_classpath_toolspath
echo ">${CLASSPATH}<"
[ ${CLASSPATH} = "${TMP}/foo" ]
}
@test "hadoop_add_to_classpath_toolspath (3+2)" {
freetheclasses
createdirs
CLASSPATH=${TMP}/foo:${TMP}/bar:${TMP}/baz
HADOOP_TOOLS_PATH=${TMP}/new:${TMP}/old
hadoop_add_to_classpath_toolspath
echo ">${CLASSPATH}<"
[ ${CLASSPATH} = "${TMP}/foo:${TMP}/bar:${TMP}/baz:${TMP}/new:${TMP}/old" ]
}

View File

@ -38,7 +38,8 @@ basicinitsetup () {
dirvars="HADOOP_COMMON_HOME \
HADOOP_MAPRED_HOME \
HADOOP_HDFS_HOME \
HADOOP_YARN_HOME"
HADOOP_YARN_HOME \
HADOOP_TOOLS_HOME"
for j in ${testvars}; do
unset ${j}

View File

@ -15,13 +15,13 @@
load hadoop-functions_test_helper
@test "hadoop_deprecate_envvar (no libexec)" {
@test "hadoop_bootstrap (no libexec)" {
unset HADOOP_LIBEXEC_DIR
run hadoop_bootstrap
[ "${status}" -eq 1 ]
}
@test "hadoop_deprecate_envvar (libexec)" {
@test "hadoop_bootstrap (libexec)" {
unset HADOOP_PREFIX
unset HADOOP_COMMON_DIR
unset HADOOP_COMMON_LIB_JARS_DIR
@ -31,7 +31,9 @@ load hadoop-functions_test_helper
unset YARN_LIB_JARS_DIR
unset MAPRED_DIR
unset MAPRED_LIB_JARS_DIR
unset TOOL_PATH
unset HADOOP_TOOLS_HOME
unset HADOOP_TOOLS_DIR
unset HADOOP_TOOLS_LIB_JARS_DIR
unset HADOOP_OS_TYPE
hadoop_bootstrap
@ -46,6 +48,9 @@ load hadoop-functions_test_helper
[ -n ${YARN_LIB_JARS_DIR} ]
[ -n ${MAPRED_DIR} ]
[ -n ${MAPRED_LIB_JARS_DIR} ]
[ -n ${TOOL_PATH} ]
[ -n ${HADOOP_OS_TYPE} ]
}
[ -n ${HADOOP_TOOLS_PATH} ]
[ -n ${HADOOP_TOOLS_HOME} ]
[ -n ${HADOOP_TOOLS_DIR} ]
[ -n ${HADOOP_TOOLS_LIB_JARS_DIR} ]
}

View File

@ -0,0 +1,49 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load hadoop-functions_test_helper
@test "hadoop_add_entry (positive 1)" {
hadoop_add_entry testvar foo
echo ">${testvar}<"
[ "${testvar}" = " foo " ]
}
@test "hadoop_add_entry (negative)" {
hadoop_add_entry testvar foo
hadoop_add_entry testvar foo
echo ">${testvar}<"
[ "${testvar}" = " foo " ]
}
@test "hadoop_add_entry (positive 2)" {
hadoop_add_entry testvar foo
hadoop_add_entry testvar foo
hadoop_add_entry testvar bar
echo ">${testvar}<"
[ "${testvar}" = " foo bar " ]
}
@test "hadoop_add_entry (positive 3)" {
hadoop_add_entry testvar foo
hadoop_add_entry testvar foo
hadoop_add_entry testvar bar
hadoop_add_entry testvar bar
hadoop_add_entry testvar baz
hadoop_add_entry testvar baz
echo ">${testvar}<"
[ "${testvar}" = " foo bar baz " ]
}

View File

@ -66,6 +66,13 @@ _test_hadoop_finalize () {
[ -z "${unittest}" ]
}
@test "hadoop_import_shellprofiles (H_O_T)" {
HADOOP_OPTIONAL_TOOLS=1,2
shellprofilesetup
hadoop_import_shellprofiles
[ "${HADOOP_TOOLS_OPTIONS}" == " 1 2 " ]
}
@test "hadoop_add_profile+hadoop_shellprofiles_init" {
hadoop_add_profile test
hadoop_shellprofiles_init

View File

@ -41,10 +41,10 @@ import javax.ws.rs.Produces;
import javax.ws.rs.QueryParam;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.UriBuilder;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.LinkedList;
@ -89,9 +89,9 @@ public class KMS {
keyVersion.getVersionName(), null);
}
private static URI getKeyURI(String name) throws URISyntaxException {
return new URI(KMSRESTConstants.SERVICE_VERSION + "/" +
KMSRESTConstants.KEY_RESOURCE + "/" + name);
private static URI getKeyURI(String domain, String keyName) {
return UriBuilder.fromPath("{a}/{b}/{c}")
.build(domain, KMSRESTConstants.KEY_RESOURCE, keyName);
}
@POST
@ -151,9 +151,9 @@ public class KMS {
String requestURL = KMSMDCFilter.getURL();
int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
requestURL = requestURL.substring(0, idx);
String keyURL = requestURL + KMSRESTConstants.KEY_RESOURCE + "/" + name;
return Response.created(getKeyURI(name)).type(MediaType.APPLICATION_JSON).
header("Location", keyURL).entity(json).build();
return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
.type(MediaType.APPLICATION_JSON)
.header("Location", getKeyURI(requestURL, name)).entity(json).build();
}
@DELETE

View File

@ -39,11 +39,15 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
@ -69,12 +73,14 @@ import java.util.UUID;
import java.util.concurrent.Callable;
public class TestKMS {
private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
@Before
public void cleanUp() {
// resetting kerberos security
Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
GenericTestUtils.setLogLevel(LOG, Level.INFO);
}
public static File getTestDir() throws Exception {
@ -380,6 +386,42 @@ public class TestKMS {
testStartStop(true, true);
}
@Test(timeout = 30000)
public void testSpecialKeyNames() throws Exception {
final String specialKey = "key %^[\n{]}|\"<>\\";
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
UserGroupInformation.setConfiguration(conf);
File confDir = getTestDir();
conf = createBaseKMSConf(confDir);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + specialKey + ".ALL", "*");
writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
Configuration conf = new Configuration();
URI uri = createKMSUri(getKMSUrl());
KeyProvider kp = createProvider(uri, conf);
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0, kp.getKeysMetadata().length);
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
LOG.info("Creating key with name '{}'", specialKey);
KeyProvider.KeyVersion kv0 = kp.createKey(specialKey, options);
Assert.assertNotNull(kv0);
Assert.assertEquals(specialKey, kv0.getName());
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
return null;
}
});
}
@Test
public void testKMSProvider() throws Exception {
Configuration conf = new Configuration();

View File

@ -103,6 +103,24 @@
</arguments>
</configuration>
</execution>
<execution>
<id>toolshooks</id>
<phase>prepare-package</phase>
<goals>
<goal>exec</goal>
</goals>
<configuration>
<executable>${shell-executable}</executable>
<workingDirectory>${basedir}</workingDirectory>
<requiresOnline>false</requiresOnline>
<arguments>
<argument>${basedir}/../dev-support/bin/dist-tools-hooks-maker</argument>
<argument>${project.version}</argument>
<argument>${project.build.directory}</argument>
<argument>${basedir}/../hadoop-tools</argument>
</arguments>
</configuration>
</execution>
<execution>
<id>tar</id>
<phase>package</phase>

View File

@ -8,6 +8,7 @@
<Class name="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
<Class name="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"/>
<Class name="org.apache.hadoop.hdfs.protocol.CorruptFileBlocks"/>
<Class name="org.apache.hadoop.hdfs.protocol.StripedBlockInfo"/>
<Class name="org.apache.hadoop.hdfs.protocol.DirectoryListing"/>
<Class name="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
<Class name="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"/>

View File

@ -83,11 +83,6 @@ public interface BlockReader extends ByteBufferReadable, Closeable {
*/
int readAll(byte[] buf, int offset, int len) throws IOException;
/**
* @return true only if this is a local read.
*/
boolean isLocal();
/**
* @return true only if this is a short-circuit read.
* All short-circuit reads are also local.
@ -107,4 +102,9 @@ public interface BlockReader extends ByteBufferReadable, Closeable {
* @return The DataChecksum used by the read block
*/
DataChecksum getDataChecksum();
/**
* Return the network distance between local machine and the remote machine.
*/
int getNetworkDistance();
}

View File

@ -833,16 +833,19 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
@SuppressWarnings("deprecation")
private BlockReader getRemoteBlockReader(Peer peer) throws IOException {
int networkDistance = clientContext.getNetworkDistance(datanode);
if (conf.getShortCircuitConf().isUseLegacyBlockReader()) {
return RemoteBlockReader.newBlockReader(fileName,
block, token, startOffset, length, conf.getIoBufferSize(),
verifyChecksum, clientName, peer, datanode,
clientContext.getPeerCache(), cachingStrategy, tracer);
clientContext.getPeerCache(), cachingStrategy, tracer,
networkDistance);
} else {
return RemoteBlockReader2.newBlockReader(
fileName, block, token, startOffset, length,
verifyChecksum, clientName, peer, datanode,
clientContext.getPeerCache(), cachingStrategy, tracer);
clientContext.getPeerCache(), cachingStrategy, tracer,
networkDistance);
}
}

View File

@ -640,11 +640,6 @@ class BlockReaderLocal implements BlockReader {
return BlockReaderUtil.readAll(this, buf, off, len);
}
@Override
public boolean isLocal() {
return true;
}
@Override
public boolean isShortCircuit() {
return true;
@ -721,4 +716,9 @@ class BlockReaderLocal implements BlockReader {
public DataChecksum getDataChecksum() {
return checksum;
}
@Override
public int getNetworkDistance() {
return 0;
}
}

View File

@ -722,11 +722,6 @@ class BlockReaderLocalLegacy implements BlockReader {
return Integer.MAX_VALUE;
}
@Override
public boolean isLocal() {
return true;
}
@Override
public boolean isShortCircuit() {
return true;
@ -741,4 +736,9 @@ class BlockReaderLocalLegacy implements BlockReader {
public DataChecksum getDataChecksum() {
return checksum;
}
@Override
public int getNetworkDistance() {
return 0;
}
}

View File

@ -17,16 +17,28 @@
*/
package org.apache.hadoop.hdfs;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
import org.apache.hadoop.hdfs.util.ByteArrayManager;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.util.ReflectionUtils;
import com.google.common.annotations.VisibleForTesting;
@ -101,7 +113,12 @@ public class ClientContext {
*/
private boolean printedConfWarning = false;
private ClientContext(String name, DfsClientConf conf) {
private final NetworkTopology topology;
private final NodeBase clientNode;
private final Map<NodeBase, Integer> nodeToDistance;
private ClientContext(String name, DfsClientConf conf,
Configuration config) {
final ShortCircuitConf scConf = conf.getShortCircuitConf();
this.name = name;
@ -116,14 +133,28 @@ public class ClientContext {
this.byteArrayManager = ByteArrayManager.newInstance(
conf.getWriteByteArrayManagerConf());
DNSToSwitchMapping dnsToSwitchMapping = ReflectionUtils.newInstance(
config.getClass(
CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
ScriptBasedMapping.class, DNSToSwitchMapping.class), config);
List<String> nodes = new ArrayList<>();
String clientHostName = NetUtils.getLocalHostname();
nodes.add(clientHostName);
clientNode = new NodeBase(clientHostName,
dnsToSwitchMapping.resolve(nodes).get(0));
this.topology = NetworkTopology.getInstance(config);
this.topology.add(clientNode);
this.nodeToDistance = new ConcurrentHashMap<>();
}
public static ClientContext get(String name, DfsClientConf conf) {
public static ClientContext get(String name, DfsClientConf conf,
Configuration config) {
ClientContext context;
synchronized(ClientContext.class) {
context = CACHES.get(name);
if (context == null) {
context = new ClientContext(name, conf);
context = new ClientContext(name, conf, config);
CACHES.put(name, context);
} else {
context.printConfWarningIfNeeded(conf);
@ -132,6 +163,10 @@ public class ClientContext {
return context;
}
public static ClientContext get(String name, Configuration config) {
return get(name, new DfsClientConf(config), config);
}
/**
* Get a client context, from a Configuration object.
*
@ -141,8 +176,7 @@ public class ClientContext {
@VisibleForTesting
public static ClientContext getFromConf(Configuration conf) {
return get(conf.get(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT),
new DfsClientConf(conf));
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT), conf);
}
private void printConfWarningIfNeeded(DfsClientConf conf) {
@ -193,4 +227,16 @@ public class ClientContext {
public ByteArrayManager getByteArrayManager() {
return byteArrayManager;
}
public int getNetworkDistance(DatanodeInfo datanodeInfo) {
NodeBase node = new NodeBase(datanodeInfo.getHostName(),
datanodeInfo.getNetworkLocation());
Integer distance = nodeToDistance.get(node);
if (distance == null) {
topology.add(node);
distance = topology.getDistance(clientNode, node);
nodeToDistance.put(node, distance);
}
return distance;
}
}

View File

@ -212,7 +212,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
final String clientName;
final SocketFactory socketFactory;
final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
final FileSystem.Statistics stats;
private final FileSystem.Statistics stats;
private final String authority;
private final Random r = new Random();
private SocketAddress[] localInterfaceAddrs;
@ -357,7 +357,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
new CachingStrategy(writeDropBehind, readahead);
this.clientContext = ClientContext.get(
conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
dfsClientConf);
dfsClientConf, conf);
if (dfsClientConf.getHedgedReadThreadpoolSize() > 0) {
this.initThreadsNumForHedgedReads(dfsClientConf.
@ -1704,7 +1704,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Get the checksum of the whole file or a range of the file. Note that the
* range always starts from the beginning of the file.
* range always starts from the beginning of the file. The file can be
* in replicated form, or striped mode. It can be used to checksum and compare
* two replicated files, or two striped files, but not applicable for two
* files of different block layout forms.
* @param src The file path
* @param length the length of the range, i.e., the range is [0, length]
* @return The checksum
@ -1717,7 +1720,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
LocatedBlocks blockLocations = getBlockLocations(src, length);
FileChecksumHelper.FileChecksumComputer maker =
FileChecksumHelper.FileChecksumComputer maker;
ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
maker = ecPolicy != null ?
new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
length, blockLocations, namenode, this, ecPolicy) :
new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
blockLocations, namenode, this);
@ -2740,6 +2747,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
void updateFileSystemReadStats(int distance, int nRead) {
if (stats != null) {
stats.incrementBytesRead(nRead);
stats.incrementBytesReadByDistance(distance, nRead);
}
}
/**
* Create hedged reads thread pool, HEDGED_READ_THREAD_POOL, if
* it does not already exist.

View File

@ -775,7 +775,7 @@ public class DFSInputStream extends FSInputStream
synchronized(infoLock) {
if (blockReader.isShortCircuit()) {
readStatistics.addShortCircuitBytes(nRead);
} else if (blockReader.isLocal()) {
} else if (blockReader.getNetworkDistance() == 0) {
readStatistics.addLocalBytes(nRead);
} else {
readStatistics.addRemoteBytes(nRead);
@ -798,6 +798,8 @@ public class DFSInputStream extends FSInputStream
throws IOException {
int nRead = blockReader.read(buf, off, len);
updateReadStatistics(readStatistics, nRead, blockReader);
dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
nRead);
return nRead;
}
@ -828,6 +830,8 @@ public class DFSInputStream extends FSInputStream
int ret = blockReader.read(buf);
success = true;
updateReadStatistics(readStatistics, ret, blockReader);
dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
ret);
if (ret == 0) {
DFSClient.LOG.warn("zero");
}
@ -939,9 +943,6 @@ public class DFSInputStream extends FSInputStream
// got a EOS from reader though we expect more data on it.
throw new IOException("Unexpected EOS from the reader");
}
if (dfsClient.stats != null) {
dfsClient.stats.incrementBytesRead(result);
}
return result;
} catch (ChecksumException ce) {
throw ce;
@ -1194,6 +1195,8 @@ public class DFSInputStream extends FSInputStream
datanode.storageType, datanode.info);
int nread = reader.readAll(buf, offset, len);
updateReadStatistics(readStatistics, nread, reader);
dfsClient.updateFileSystemReadStats(
reader.getNetworkDistance(), nread);
if (nread != len) {
throw new IOException("truncated return from reader.read(): " +
"excpected " + len + ", got " + nread);
@ -1276,7 +1279,7 @@ public class DFSInputStream extends FSInputStream
// chooseDataNode is a commitment. If no node, we go to
// the NN to reget block locations. Only go here on first read.
chosenNode = chooseDataNode(block, ignored);
bb = ByteBuffer.wrap(buf, offset, len);
bb = ByteBuffer.allocate(len);
Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
chosenNode, block, start, end, bb,
corruptedBlocks, hedgedReadId++);
@ -1287,7 +1290,9 @@ public class DFSInputStream extends FSInputStream
Future<ByteBuffer> future = hedgedService.poll(
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
if (future != null) {
future.get();
ByteBuffer result = future.get();
System.arraycopy(result.array(), result.position(), buf, offset,
len);
return;
}
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
@ -1325,13 +1330,9 @@ public class DFSInputStream extends FSInputStream
ByteBuffer result = getFirstToComplete(hedgedService, futures);
// cancel the rest.
cancelAll(futures);
if (result.array() != buf) { // compare the array pointers
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
System.arraycopy(result.array(), result.position(), buf, offset,
len);
} else {
dfsClient.getHedgedReadMetrics().incHedgedReadOps();
}
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
System.arraycopy(result.array(), result.position(), buf, offset,
len);
return;
} catch (InterruptedException ie) {
// Ignore and retry
@ -1479,9 +1480,6 @@ public class DFSInputStream extends FSInputStream
offset += bytesToRead;
}
assert remaining == 0 : "Wrong number of bytes read.";
if (dfsClient.stats != null) {
dfsClient.stats.incrementBytesRead(realLen);
}
return realLen;
}

View File

@ -447,9 +447,6 @@ public class DFSStripedInputStream extends DFSInputStream {
result += ret;
pos += ret;
}
if (dfsClient.stats != null) {
dfsClient.stats.incrementBytesRead(result);
}
return result;
} finally {
// Check if need to report block replicas corruption either read

View File

@ -109,11 +109,6 @@ public final class ExternalBlockReader implements BlockReader {
return BlockReaderUtil.readAll(this, buf, offset, len);
}
@Override
public boolean isLocal() {
return accessor.isLocal();
}
@Override
public boolean isShortCircuit() {
return accessor.isShortCircuit();
@ -129,4 +124,9 @@ public final class ExternalBlockReader implements BlockReader {
public DataChecksum getDataChecksum() {
return null;
}
@Override
public int getNetworkDistance() {
return accessor.getNetworkDistance();
}
}

View File

@ -22,10 +22,13 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
@ -75,7 +78,7 @@ final class FileChecksumHelper {
private int bytesPerCRC = -1;
private DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
private long crcPerBlock = 0;
private boolean refetchBlocks = false;
private boolean isRefetchBlocks = false;
private int lastRetriedIndex = -1;
/**
@ -127,8 +130,11 @@ final class FileChecksumHelper {
return blockLocations;
}
void setBlockLocations(LocatedBlocks blockLocations) {
this.blockLocations = blockLocations;
void refetchBlocks() throws IOException {
this.blockLocations = getClient().getBlockLocations(getSrc(),
getLength());
this.locatedBlocks = getBlockLocations().getLocatedBlocks();
this.isRefetchBlocks = false;
}
int getTimeout() {
@ -143,10 +149,6 @@ final class FileChecksumHelper {
return locatedBlocks;
}
void setLocatedBlocks(List<LocatedBlock> locatedBlocks) {
this.locatedBlocks = locatedBlocks;
}
long getRemaining() {
return remaining;
}
@ -180,11 +182,11 @@ final class FileChecksumHelper {
}
boolean isRefetchBlocks() {
return refetchBlocks;
return isRefetchBlocks;
}
void setRefetchBlocks(boolean refetchBlocks) {
this.refetchBlocks = refetchBlocks;
this.isRefetchBlocks = refetchBlocks;
}
int getLastRetriedIndex() {
@ -278,10 +280,7 @@ final class FileChecksumHelper {
blockIdx < getLocatedBlocks().size() && getRemaining() >= 0;
blockIdx++) {
if (isRefetchBlocks()) { // refetch to get fresh tokens
setBlockLocations(getClient().getBlockLocations(getSrc(),
getLength()));
setLocatedBlocks(getBlockLocations().getLocatedBlocks());
setRefetchBlocks(false);
refetchBlocks();
}
LocatedBlock locatedBlock = getLocatedBlocks().get(blockIdx);
@ -380,15 +379,13 @@ final class FileChecksumHelper {
}
//read md5
final MD5Hash md5 = new MD5Hash(
checksumData.getMd5().toByteArray());
final MD5Hash md5 = new MD5Hash(checksumData.getMd5().toByteArray());
md5.write(getMd5out());
// read crc-type
final DataChecksum.Type ct;
if (checksumData.hasCrcType()) {
ct = PBHelperClient.convert(checksumData
.getCrcType());
ct = PBHelperClient.convert(checksumData.getCrcType());
} else {
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
"inferring checksum by reading first byte");
@ -413,4 +410,160 @@ final class FileChecksumHelper {
}
}
}
/**
* Striped file checksum computing.
*/
static class StripedFileNonStripedChecksumComputer
extends FileChecksumComputer {
private final ErasureCodingPolicy ecPolicy;
private int bgIdx;
StripedFileNonStripedChecksumComputer(String src, long length,
LocatedBlocks blockLocations,
ClientProtocol namenode,
DFSClient client,
ErasureCodingPolicy ecPolicy)
throws IOException {
super(src, length, blockLocations, namenode, client);
this.ecPolicy = ecPolicy;
}
@Override
void checksumBlocks() throws IOException {
int tmpTimeout = 3000 * 1 + getClient().getConf().getSocketTimeout();
setTimeout(tmpTimeout);
for (bgIdx = 0;
bgIdx < getLocatedBlocks().size() && getRemaining() >= 0; bgIdx++) {
if (isRefetchBlocks()) { // refetch to get fresh tokens
refetchBlocks();
}
LocatedBlock locatedBlock = getLocatedBlocks().get(bgIdx);
LocatedStripedBlock blockGroup = (LocatedStripedBlock) locatedBlock;
if (!checksumBlockGroup(blockGroup)) {
throw new IOException("Fail to get block MD5 for " + locatedBlock);
}
}
}
private boolean checksumBlockGroup(
LocatedStripedBlock blockGroup) throws IOException {
ExtendedBlock block = blockGroup.getBlock();
if (getRemaining() < block.getNumBytes()) {
block.setNumBytes(getRemaining());
}
setRemaining(getRemaining() - block.getNumBytes());
StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(block,
blockGroup.getLocations(), blockGroup.getBlockTokens(), ecPolicy);
DatanodeInfo[] datanodes = blockGroup.getLocations();
//try each datanode in the block group.
boolean done = false;
for (int j = 0; !done && j < datanodes.length; j++) {
try {
tryDatanode(blockGroup, stripedBlockInfo, datanodes[j]);
done = true;
} catch (InvalidBlockTokenException ibte) {
if (bgIdx > getLastRetriedIndex()) {
LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
+ "for file {} for block {} from datanode {}. Will retry "
+ "the block once.",
getSrc(), block, datanodes[j]);
setLastRetriedIndex(bgIdx);
done = true; // actually it's not done; but we'll retry
bgIdx--; // repeat at bgIdx-th block
setRefetchBlocks(true);
}
} catch (IOException ie) {
LOG.warn("src={}" + ", datanodes[{}]={}",
getSrc(), j, datanodes[j], ie);
}
}
return done;
}
/**
* Return true when sounds good to continue or retry, false when severe
* condition or totally failed.
*/
private void tryDatanode(LocatedStripedBlock blockGroup,
StripedBlockInfo stripedBlockInfo,
DatanodeInfo datanode) throws IOException {
try (IOStreamPair pair = getClient().connectToDN(datanode,
getTimeout(), blockGroup.getBlockToken())) {
LOG.debug("write to {}: {}, blockGroup={}",
datanode, Op.BLOCK_GROUP_CHECKSUM, blockGroup);
// get block MD5
createSender(pair).blockGroupChecksum(stripedBlockInfo,
blockGroup.getBlockToken());
BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(
PBHelperClient.vintPrefixed(pair.in));
String logInfo = "for blockGroup " + blockGroup +
" from datanode " + datanode;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
OpBlockChecksumResponseProto checksumData = reply.getChecksumResponse();
//read byte-per-checksum
final int bpc = checksumData.getBytesPerCrc();
if (bgIdx == 0) { //first block
setBytesPerCRC(bpc);
} else {
if (bpc != getBytesPerCRC()) {
throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
+ " but bytesPerCRC=" + getBytesPerCRC());
}
}
//read crc-per-block
final long cpb = checksumData.getCrcPerBlock();
if (getLocatedBlocks().size() > 1 && bgIdx == 0) { // first block
setCrcPerBlock(cpb);
}
//read md5
final MD5Hash md5 = new MD5Hash(
checksumData.getMd5().toByteArray());
md5.write(getMd5out());
// read crc-type
final DataChecksum.Type ct;
if (checksumData.hasCrcType()) {
ct = PBHelperClient.convert(checksumData.getCrcType());
} else {
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
"inferring checksum by reading first byte");
ct = getClient().inferChecksumTypeByReading(blockGroup, datanode);
}
if (bgIdx == 0) {
setCrcType(ct);
} else if (getCrcType() != DataChecksum.Type.MIXED &&
getCrcType() != ct) {
// if crc types are mixed in a file
setCrcType(DataChecksum.Type.MIXED);
}
if (LOG.isDebugEnabled()) {
if (bgIdx == 0) {
LOG.debug("set bytesPerCRC=" + getBytesPerCRC()
+ ", crcPerBlock=" + getCrcPerBlock());
}
LOG.debug("got reply from " + datanode + ": md5=" + md5);
}
}
}
}
}

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.core.TraceScope;
@ -93,11 +92,6 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
*/
private final long bytesNeededToFinish;
/**
* True if we are reading from a local DataNode.
*/
private final boolean isLocal;
private boolean eos = false;
private boolean sentStatusCode = false;
@ -109,6 +103,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
private final Tracer tracer;
private final int networkDistance;
/* FSInputChecker interface */
/* same interface as inputStream java.io.InputStream#read()
@ -342,7 +338,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
private RemoteBlockReader(String file, String bpid, long blockId,
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer,
int networkDistance) {
// Path is used only for printing block and file information in debug
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
":" + bpid + ":of:"+ file)/*too non path-like?*/,
@ -351,9 +348,6 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
checksum.getBytesPerChecksum(),
checksum.getChecksumSize());
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
createSocketAddr(datanodeID.getXferAddr()));
this.peer = peer;
this.datanodeID = datanodeID;
this.in = in;
@ -375,6 +369,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
checksumSize = this.checksum.getChecksumSize();
this.peerCache = peerCache;
this.tracer = tracer;
this.networkDistance = networkDistance;
}
/**
@ -400,7 +395,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
DatanodeID datanodeID,
PeerCache peerCache,
CachingStrategy cachingStrategy,
Tracer tracer)
Tracer tracer, int networkDistance)
throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out =
@ -436,7 +431,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
peer, datanodeID, peerCache, tracer);
peer, datanodeID, peerCache, tracer, networkDistance);
}
@Override
@ -493,11 +488,6 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
return RemoteBlockReader2.TCP_WINDOW_SIZE;
}
@Override
public boolean isLocal() {
return isLocal;
}
@Override
public boolean isShortCircuit() {
return false;
@ -512,4 +502,9 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
public DataChecksum getDataChecksum() {
return checksum;
}
@Override
public int getNetworkDistance() {
return networkDistance;
}
}

View File

@ -45,7 +45,6 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.core.TraceScope;
@ -116,17 +115,14 @@ public class RemoteBlockReader2 implements BlockReader {
*/
private long bytesNeededToFinish;
/**
* True if we are reading from a local DataNode.
*/
private final boolean isLocal;
private final boolean verifyChecksum;
private boolean sentStatusCode = false;
private final Tracer tracer;
private final int networkDistance;
@VisibleForTesting
public Peer getPeer() {
return peer;
@ -280,9 +276,8 @@ public class RemoteBlockReader2 implements BlockReader {
protected RemoteBlockReader2(String file, long blockId,
DataChecksum checksum, boolean verifyChecksum,
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
createSocketAddr(datanodeID.getXferAddr()));
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer,
int networkDistance) {
// Path is used only for printing block and file information in debug
this.peer = peer;
this.datanodeID = datanodeID;
@ -302,6 +297,7 @@ public class RemoteBlockReader2 implements BlockReader {
bytesPerChecksum = this.checksum.getBytesPerChecksum();
checksumSize = this.checksum.getChecksumSize();
this.tracer = tracer;
this.networkDistance = networkDistance;
}
@ -397,7 +393,8 @@ public class RemoteBlockReader2 implements BlockReader {
Peer peer, DatanodeID datanodeID,
PeerCache peerCache,
CachingStrategy cachingStrategy,
Tracer tracer) throws IOException {
Tracer tracer,
int networkDistance) throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
peer.getOutputStream()));
@ -430,7 +427,7 @@ public class RemoteBlockReader2 implements BlockReader {
return new RemoteBlockReader2(file, block.getBlockId(), checksum,
verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID,
peerCache, tracer);
peerCache, tracer, networkDistance);
}
static void checkSuccess(
@ -453,11 +450,6 @@ public class RemoteBlockReader2 implements BlockReader {
return TCP_WINDOW_SIZE;
}
@Override
public boolean isLocal() {
return isLocal;
}
@Override
public boolean isShortCircuit() {
return false;
@ -472,4 +464,9 @@ public class RemoteBlockReader2 implements BlockReader {
public DataChecksum getDataChecksum() {
return checksum;
}
@Override
public int getNetworkDistance() {
return networkDistance;
}
}

View File

@ -87,4 +87,11 @@ public abstract class ReplicaAccessor {
* short-circuit byte count statistics.
*/
public abstract boolean isShortCircuit();
/**
* Return the network distance between local machine and the remote machine.
*/
public int getNetworkDistance() {
return isLocal() ? 0 : Integer.MAX_VALUE;
}
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
/**
* The class describes the configured admin properties for a datanode.
*
* It is the static configuration specified by administrators via dfsadmin
* command; different from the runtime state. CombinedHostFileManager uses
* the class to deserialize the configurations from json-based file format.
*
* To decommission a node, use AdminStates.DECOMMISSIONED.
*/
public class DatanodeAdminProperties {
private String hostName;
private int port;
private String upgradeDomain;
private AdminStates adminState = AdminStates.NORMAL;
/**
* Return the host name of the datanode.
* @return the host name of the datanode.
*/
public String getHostName() {
return hostName;
}
/**
* Set the host name of the datanode.
* @param hostName the host name of the datanode.
*/
public void setHostName(final String hostName) {
this.hostName = hostName;
}
/**
* Get the port number of the datanode.
* @return the port number of the datanode.
*/
public int getPort() {
return port;
}
/**
* Set the port number of the datanode.
* @param port the port number of the datanode.
*/
public void setPort(final int port) {
this.port = port;
}
/**
* Get the upgrade domain of the datanode.
* @return the upgrade domain of the datanode.
*/
public String getUpgradeDomain() {
return upgradeDomain;
}
/**
* Set the upgrade domain of the datanode.
* @param upgradeDomain the upgrade domain of the datanode.
*/
public void setUpgradeDomain(final String upgradeDomain) {
this.upgradeDomain = upgradeDomain;
}
/**
* Get the admin state of the datanode.
* @return the admin state of the datanode.
*/
public AdminStates getAdminState() {
return adminState;
}
/**
* Set the admin state of the datanode.
* @param adminState the admin state of the datanode.
*/
public void setAdminState(final AdminStates adminState) {
this.adminState = adminState;
}
}

View File

@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import java.net.InetSocketAddress;
/**
* This class represents the primary identifier for a Datanode.
* Datanodes are identified by how they can be contacted (hostname
@ -327,4 +329,8 @@ public class DatanodeID implements Comparable<DatanodeID> {
.setContainerPort(this.getContainerPort())
.build();
}
public InetSocketAddress getResolvedAddress() {
return new InetSocketAddress(this.getIpAddr(), this.getXferPort());
}
}

View File

@ -0,0 +1,61 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.security.token.Token;
/**
* Striped block info that can be sent elsewhere to do block group level things,
* like checksum, and etc.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class StripedBlockInfo {
private final ExtendedBlock block;
private final DatanodeInfo[] datanodes;
private final Token<BlockTokenIdentifier>[] blockTokens;
private final ErasureCodingPolicy ecPolicy;
public StripedBlockInfo(ExtendedBlock block, DatanodeInfo[] datanodes,
Token<BlockTokenIdentifier>[] blockTokens,
ErasureCodingPolicy ecPolicy) {
this.block = block;
this.datanodes = datanodes;
this.blockTokens = blockTokens;
this.ecPolicy = ecPolicy;
}
public ExtendedBlock getBlock() {
return block;
}
public DatanodeInfo[] getDatanodes() {
return datanodes;
}
public Token<BlockTokenIdentifier>[] getBlockTokens() {
return blockTokens;
}
public ErasureCodingPolicy getErasureCodingPolicy() {
return ecPolicy;
}
}

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
@ -197,6 +198,17 @@ public interface DataTransferProtocol {
* @param blockToken security token for accessing the block.
* @throws IOException
*/
void blockChecksum(final ExtendedBlock blk,
final Token<BlockTokenIdentifier> blockToken) throws IOException;
void blockChecksum(ExtendedBlock blk,
Token<BlockTokenIdentifier> blockToken) throws IOException;
/**
* Get striped block group checksum (MD5 of CRC32).
*
* @param stripedBlockInfo a striped block info.
* @param blockToken security token for accessing the block.
* @throws IOException
*/
void blockGroupChecksum(StripedBlockInfo stripedBlockInfo,
Token<BlockTokenIdentifier> blockToken) throws IOException;
}

View File

@ -38,6 +38,7 @@ public enum Op {
REQUEST_SHORT_CIRCUIT_FDS((byte)87),
RELEASE_SHORT_CIRCUIT_FDS((byte)88),
REQUEST_SHORT_CIRCUIT_SHM((byte)89),
BLOCK_GROUP_CHECKSUM((byte)90),
CUSTOM((byte)127);
/** The code for this operation. */

View File

@ -28,11 +28,13 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
@ -261,4 +263,21 @@ public class Sender implements DataTransferProtocol {
send(out, Op.BLOCK_CHECKSUM, proto);
}
@Override
public void blockGroupChecksum(StripedBlockInfo stripedBlockInfo,
Token<BlockTokenIdentifier> blockToken) throws IOException {
OpBlockGroupChecksumProto proto = OpBlockGroupChecksumProto.newBuilder()
.setHeader(DataTransferProtoUtil.buildBaseHeader(
stripedBlockInfo.getBlock(), blockToken))
.setDatanodes(PBHelperClient.convertToProto(
stripedBlockInfo.getDatanodes()))
.addAllBlockTokens(PBHelperClient.convert(
stripedBlockInfo.getBlockTokens()))
.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(
stripedBlockInfo.getErasureCodingPolicy()))
.build();
send(out, Op.BLOCK_GROUP_CHECKSUM, proto);
}
}

View File

@ -553,10 +553,8 @@ public class PBHelperClient {
proto.getCorrupt(),
cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
List<TokenProto> tokenProtos = proto.getBlockTokensList();
Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];
for (int i = 0; i < indices.length; i++) {
blockTokens[i] = convert(tokenProtos.get(i));
}
Token<BlockTokenIdentifier>[] blockTokens =
convertTokens(tokenProtos);
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
}
lb.setBlockToken(convert(proto.getBlockToken()));
@ -564,6 +562,18 @@ public class PBHelperClient {
return lb;
}
static public Token<BlockTokenIdentifier>[] convertTokens(
List<TokenProto> tokenProtos) {
@SuppressWarnings("unchecked")
Token<BlockTokenIdentifier>[] blockTokens = new Token[tokenProtos.size()];
for (int i = 0; i < blockTokens.length; i++) {
blockTokens[i] = convert(tokenProtos.get(i));
}
return blockTokens;
}
static public DatanodeInfo convert(DatanodeInfoProto di) {
if (di == null) return null;
return new DatanodeInfo(
@ -815,9 +825,7 @@ public class PBHelperClient {
byte[] indices = sb.getBlockIndices();
builder.setBlockIndices(PBHelperClient.getByteString(indices));
Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
for (int i = 0; i < indices.length; i++) {
builder.addBlockTokens(PBHelperClient.convert(blockTokens[i]));
}
builder.addAllBlockTokens(convert(blockTokens));
}
return builder.setB(PBHelperClient.convert(b.getBlock()))
@ -825,6 +833,16 @@ public class PBHelperClient {
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
}
public static List<TokenProto> convert(
Token<BlockTokenIdentifier>[] blockTokens) {
List<TokenProto> results = new ArrayList<>(blockTokens.length);
for (Token<BlockTokenIdentifier> bt : blockTokens) {
results.add(convert(bt));
}
return results;
}
public static BlockStoragePolicy convert(BlockStoragePolicyProto proto) {
List<StorageTypeProto> cList = proto.getCreationPolicy()
.getStorageTypesList();
@ -2500,4 +2518,14 @@ public class PBHelperClient {
.setId(policy.getId());
return builder.build();
}
public static HdfsProtos.DatanodeInfosProto convertToProto(
DatanodeInfo[] datanodeInfos) {
HdfsProtos.DatanodeInfosProto.Builder builder =
HdfsProtos.DatanodeInfosProto.newBuilder();
for (DatanodeInfo datanodeInfo : datanodeInfos) {
builder.addDatanodes(PBHelperClient.convert(datanodeInfo));
}
return builder.build();
}
}

View File

@ -0,0 +1,76 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.io.IOException;
import java.io.Reader;
import java.util.Iterator;
import java.util.Set;
import java.util.HashSet;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.map.ObjectMapper;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
/**
* Reader support for JSON based datanode configuration, an alternative
* to the exclude/include files configuration.
* The JSON file format is the array of elements where each element
* in the array describes the properties of a datanode. The properties of
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
*
* {"hostName": "host1"}
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
public final class CombinedHostsFileReader {
private CombinedHostsFileReader() {
}
/**
* Deserialize a set of DatanodeAdminProperties from a json file.
* @param hostsFile the input json file to read from.
* @return the set of DatanodeAdminProperties
* @throws IOException
*/
public static Set<DatanodeAdminProperties>
readFile(final String hostsFile) throws IOException {
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
ObjectMapper mapper = new ObjectMapper();
try (Reader input =
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
Iterator<DatanodeAdminProperties> iterator =
mapper.readValues(new JsonFactory().createJsonParser(input),
DatanodeAdminProperties.class);
while (iterator.hasNext()) {
DatanodeAdminProperties properties = iterator.next();
allDNs.add(properties);
}
}
return allDNs;
}
}

View File

@ -0,0 +1,69 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.util.Set;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.codehaus.jackson.map.ObjectMapper;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
/**
* Writer support for JSON based datanode configuration, an alternative
* to the exclude/include files configuration.
* The JSON file format is the array of elements where each element
* in the array describes the properties of a datanode. The properties of
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
*
* {"hostName": "host1"}
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
@InterfaceStability.Unstable
public final class CombinedHostsFileWriter {
private CombinedHostsFileWriter() {
}
/**
* Serialize a set of DatanodeAdminProperties to a json file.
* @param hostsFile the json file name.
* @param allDNs the set of DatanodeAdminProperties
* @throws IOException
*/
public static void writeFile(final String hostsFile,
final Set<DatanodeAdminProperties> allDNs) throws IOException {
StringBuilder configs = new StringBuilder();
try (Writer output =
new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
for (DatanodeAdminProperties datanodeAdminProperties: allDNs) {
ObjectMapper mapper = new ObjectMapper();
configs.append(mapper.writeValueAsString(datanodeAdminProperties));
}
output.write(configs.toString());
}
}
}

View File

@ -75,6 +75,18 @@ public class StripedBlockUtil {
public static final Logger LOG = LoggerFactory.getLogger(StripedBlockUtil.class);
/**
* Parses a striped block group into individual blocks.
* @param bg The striped block group
* @param ecPolicy The erasure coding policy
* @return An array of the blocks in the group
*/
public static LocatedBlock[] parseStripedBlockGroup(LocatedStripedBlock bg,
ErasureCodingPolicy ecPolicy) {
return parseStripedBlockGroup(bg, ecPolicy.getCellSize(),
ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits());
}
/**
* This method parses a striped block group into individual blocks.
*

View File

@ -74,7 +74,6 @@ message OpReadBlockProto {
optional CachingStrategyProto cachingStrategy = 5;
}
message ChecksumProto {
required ChecksumTypeProto type = 1;
required uint32 bytesPerChecksum = 2;
@ -149,6 +148,14 @@ message OpBlockChecksumProto {
required BaseHeaderProto header = 1;
}
message OpBlockGroupChecksumProto {
required BaseHeaderProto header = 1;
required DatanodeInfosProto datanodes = 2;
// each internal block has a block token
repeated hadoop.common.TokenProto blockTokens = 3;
required ErasureCodingPolicyProto ecPolicy = 4;
}
/**
* An ID uniquely identifying a shared memory segment.
*/

View File

@ -570,6 +570,11 @@ int fuseConnectAsThreadUid(struct hdfsConn **conn)
ctx = fuse_get_context();
usrname = getUsername(ctx->uid);
if (!usrname) {
ERROR("fuseConnectAsThreadUid(): failed to get username for uid %"PRId64
"\n", (uint64_t)ctx->uid);
return EIO;
}
ret = fuseConnect(usrname, ctx, conn);
free(usrname);
return ret;

View File

@ -142,7 +142,9 @@ case ${COMMAND} in
echo "HDFS_DIR='${HDFS_DIR}'"
echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
echo "HADOOP_TOOLS_PATH='${HADOOP_TOOLS_PATH}'"
echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
exit 0
;;
erasurecode)
@ -166,7 +168,6 @@ case ${COMMAND} in
;;
haadmin)
CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
hadoop_add_to_classpath_toolspath
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
;;

View File

@ -415,12 +415,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_METRICS_PERCENTILES_INTERVALS_KEY = "dfs.metrics.percentiles.intervals";
public static final String DFS_DATANODE_HOST_NAME_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_HOST_NAME_KEY;
public static final String DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
public static final String DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
public static final String DFS_NAMENODE_CHECKPOINT_DIR_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
public static final String DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY;
public static final String DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY =
"dfs.namenode.hosts.provider.classname";
public static final String DFS_HOSTS = "dfs.hosts";
public static final String DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers";
@ -750,6 +750,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =
HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI;
public static final String DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_KEY = "dfs.namenode.edekcacheloader.interval.ms";
public static final int DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_DEFAULT = 1000;
public static final String DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY = "dfs.namenode.edekcacheloader.initial.delay.ms";
public static final int DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_DEFAULT = 3000;
// Journal-node related configs. These are read on the JN side.
public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";

View File

@ -26,11 +26,13 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
@ -111,6 +113,9 @@ public abstract class Receiver implements DataTransferProtocol {
case BLOCK_CHECKSUM:
opBlockChecksum(in);
break;
case BLOCK_GROUP_CHECKSUM:
opStripedBlockChecksum(in);
break;
case TRANSFER_BLOCK:
opTransferBlock(in);
break;
@ -290,4 +295,27 @@ public abstract class Receiver implements DataTransferProtocol {
if (traceScope != null) traceScope.close();
}
}
/** Receive OP_STRIPED_BLOCK_CHECKSUM. */
private void opStripedBlockChecksum(DataInputStream dis) throws IOException {
OpBlockGroupChecksumProto proto =
OpBlockGroupChecksumProto.parseFrom(vintPrefixed(dis));
TraceScope traceScope = continueTraceSpan(proto.getHeader(),
proto.getClass().getSimpleName());
StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(
PBHelperClient.convert(proto.getHeader().getBlock()),
PBHelperClient.convert(proto.getDatanodes()),
PBHelperClient.convertTokens(proto.getBlockTokensList()),
PBHelperClient.convertErasureCodingPolicy(proto.getEcPolicy())
);
try {
blockGroupChecksum(stripedBlockInfo,
PBHelperClient.convert(proto.getHeader().getToken()));
} finally {
if (traceScope != null) {
traceScope.close();
}
}
}
}

View File

@ -0,0 +1,250 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.google.common.collect.UnmodifiableIterator;
import com.google.common.collect.Iterables;
import com.google.common.collect.Collections2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import com.google.common.base.Predicate;
import org.apache.hadoop.hdfs.util.CombinedHostsFileReader;
/**
* This class manages datanode configuration using a json file.
* Please refer to {@link CombinedHostsFileReader} for the json format.
* <p/>
* <p/>
* Entries may or may not specify a port. If they don't, we consider
* them to apply to every DataNode on that host. The code canonicalizes the
* entries into IP addresses.
* <p/>
* <p/>
* The code ignores all entries that the DNS fails to resolve their IP
* addresses. This is okay because by default the NN rejects the registrations
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
* resolutions are only done during the loading time to minimize the latency.
*/
public class CombinedHostFileManager extends HostConfigManager {
private static final Logger LOG = LoggerFactory.getLogger(
CombinedHostFileManager.class);
private Configuration conf;
private HostProperties hostProperties = new HostProperties();
static class HostProperties {
private Multimap<InetAddress, DatanodeAdminProperties> allDNs =
HashMultimap.create();
// optimization. If every node in the file isn't in service, it implies
// any node is allowed to register with nn. This is equivalent to having
// an empty "include" file.
private boolean emptyInServiceNodeLists = true;
synchronized void add(InetAddress addr,
DatanodeAdminProperties properties) {
allDNs.put(addr, properties);
if (properties.getAdminState().equals(
AdminStates.NORMAL)) {
emptyInServiceNodeLists = false;
}
}
// If the includes list is empty, act as if everything is in the
// includes list.
synchronized boolean isIncluded(final InetSocketAddress address) {
return emptyInServiceNodeLists || Iterables.any(
allDNs.get(address.getAddress()),
new Predicate<DatanodeAdminProperties>() {
public boolean apply(DatanodeAdminProperties input) {
return input.getPort() == 0 ||
input.getPort() == address.getPort();
}
});
}
synchronized boolean isExcluded(final InetSocketAddress address) {
return Iterables.any(allDNs.get(address.getAddress()),
new Predicate<DatanodeAdminProperties>() {
public boolean apply(DatanodeAdminProperties input) {
return input.getAdminState().equals(
AdminStates.DECOMMISSIONED) &&
(input.getPort() == 0 ||
input.getPort() == address.getPort());
}
});
}
synchronized String getUpgradeDomain(final InetSocketAddress address) {
Iterable<DatanodeAdminProperties> datanode = Iterables.filter(
allDNs.get(address.getAddress()),
new Predicate<DatanodeAdminProperties>() {
public boolean apply(DatanodeAdminProperties input) {
return (input.getPort() == 0 ||
input.getPort() == address.getPort());
}
});
return datanode.iterator().hasNext() ?
datanode.iterator().next().getUpgradeDomain() : null;
}
Iterable<InetSocketAddress> getIncludes() {
return new Iterable<InetSocketAddress>() {
@Override
public Iterator<InetSocketAddress> iterator() {
return new HostIterator(allDNs.entries());
}
};
}
Iterable<InetSocketAddress> getExcludes() {
return new Iterable<InetSocketAddress>() {
@Override
public Iterator<InetSocketAddress> iterator() {
return new HostIterator(
Collections2.filter(allDNs.entries(),
new Predicate<java.util.Map.Entry<InetAddress,
DatanodeAdminProperties>>() {
public boolean apply(java.util.Map.Entry<InetAddress,
DatanodeAdminProperties> entry) {
return entry.getValue().getAdminState().equals(
AdminStates.DECOMMISSIONED);
}
}
));
}
};
}
static class HostIterator extends UnmodifiableIterator<InetSocketAddress> {
private final Iterator<Map.Entry<InetAddress,
DatanodeAdminProperties>> it;
public HostIterator(Collection<java.util.Map.Entry<InetAddress,
DatanodeAdminProperties>> nodes) {
this.it = nodes.iterator();
}
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public InetSocketAddress next() {
Map.Entry<InetAddress, DatanodeAdminProperties> e = it.next();
return new InetSocketAddress(e.getKey(), e.getValue().getPort());
}
}
}
@Override
public Iterable<InetSocketAddress> getIncludes() {
return hostProperties.getIncludes();
}
@Override
public Iterable<InetSocketAddress> getExcludes() {
return hostProperties.getExcludes();
}
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void refresh() throws IOException {
refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""));
}
private void refresh(final String hostsFile) throws IOException {
HostProperties hostProps = new HostProperties();
Set<DatanodeAdminProperties> all =
CombinedHostsFileReader.readFile(hostsFile);
for(DatanodeAdminProperties properties : all) {
InetSocketAddress addr = parseEntry(hostsFile,
properties.getHostName(), properties.getPort());
if (addr != null) {
hostProps.add(addr.getAddress(), properties);
}
}
refresh(hostProps);
}
@VisibleForTesting
static InetSocketAddress parseEntry(final String fn, final String hostName,
final int port) {
InetSocketAddress addr = new InetSocketAddress(hostName, port);
if (addr.isUnresolved()) {
LOG.warn("Failed to resolve {} in {}. ", hostName, fn);
return null;
}
return addr;
}
@Override
public synchronized boolean isIncluded(final DatanodeID dn) {
return hostProperties.isIncluded(dn.getResolvedAddress());
}
@Override
public synchronized boolean isExcluded(final DatanodeID dn) {
return isExcluded(dn.getResolvedAddress());
}
private boolean isExcluded(final InetSocketAddress address) {
return hostProperties.isExcluded(address);
}
@Override
public synchronized String getUpgradeDomain(final DatanodeID dn) {
return hostProperties.getUpgradeDomain(dn.getResolvedAddress());
}
/**
* Set the properties lists by the new instances. The
* old instance is discarded.
* @param hostProperties the new properties list
*/
@VisibleForTesting
private void refresh(final HostProperties hostProperties) {
synchronized (this) {
this.hostProperties = hostProperties;
}
}
}

View File

@ -111,7 +111,7 @@ public class DatanodeManager {
private final int defaultIpcPort;
/** Read include/exclude files. */
private final HostFileManager hostFileManager = new HostFileManager();
private HostConfigManager hostConfigManager;
/** The period to wait for datanode heartbeat.*/
private long heartbeatExpireInterval;
@ -204,9 +204,11 @@ public class DatanodeManager {
this.defaultIpcPort = NetUtils.createSocketAddr(
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
this.hostConfigManager = ReflectionUtils.newInstance(
conf.getClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
HostFileManager.class, HostConfigManager.class), conf);
try {
this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
this.hostConfigManager.refresh();
} catch (IOException e) {
LOG.error("error reading hosts files: ", e);
}
@ -224,7 +226,7 @@ public class DatanodeManager {
// in the cache; so future calls to resolve will be fast.
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
final ArrayList<String> locations = new ArrayList<>();
for (InetSocketAddress addr : hostFileManager.getIncludes()) {
for (InetSocketAddress addr : hostConfigManager.getIncludes()) {
locations.add(addr.getAddress().getHostAddress());
}
dnsToSwitchMapping.resolve(locations);
@ -337,8 +339,8 @@ public class DatanodeManager {
return decomManager;
}
HostFileManager getHostFileManager() {
return hostFileManager;
public HostConfigManager getHostConfigManager() {
return hostConfigManager;
}
@VisibleForTesting
@ -632,6 +634,7 @@ public class DatanodeManager {
networktopology.add(node); // may throw InvalidTopologyException
host2DatanodeMap.add(node);
checkIfClusterIsNowMultiRack(node);
resolveUpgradeDomain(node);
if (LOG.isDebugEnabled()) {
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
@ -704,7 +707,14 @@ public class DatanodeManager {
return new HashMap<> (this.datanodesSoftwareVersions);
}
}
void resolveUpgradeDomain(DatanodeDescriptor node) {
String upgradeDomain = hostConfigManager.getUpgradeDomain(node);
if (upgradeDomain != null && upgradeDomain.length() > 0) {
node.setUpgradeDomain(upgradeDomain);
}
}
/**
* Resolve a node's network location. If the DNS to switch mapping fails
* then this method guarantees default rack location.
@ -831,7 +841,7 @@ public class DatanodeManager {
*/
void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) {
// If the registered node is in exclude list, then decommission it
if (getHostFileManager().isExcluded(nodeReg)) {
if (getHostConfigManager().isExcluded(nodeReg)) {
decomManager.startDecommission(nodeReg);
}
}
@ -871,7 +881,7 @@ public class DatanodeManager {
// Checks if the node is not on the hosts list. If it is not, then
// it will be disallowed from registering.
if (!hostFileManager.isIncluded(nodeReg)) {
if (!hostConfigManager.isIncluded(nodeReg)) {
throw new DisallowedDatanodeException(nodeReg);
}
@ -939,7 +949,8 @@ public class DatanodeManager {
getNetworkDependenciesWithDefault(nodeS));
}
getNetworkTopology().add(nodeS);
resolveUpgradeDomain(nodeS);
// also treat the registration message as a heartbeat
heartbeatManager.register(nodeS);
incrementVersionCount(nodeS.getSoftwareVersion());
@ -971,7 +982,8 @@ public class DatanodeManager {
}
networktopology.add(nodeDescr);
nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion());
resolveUpgradeDomain(nodeDescr);
// register new datanode
addDatanode(nodeDescr);
blockManager.getBlockReportLeaseManager().register(nodeDescr);
@ -1026,9 +1038,9 @@ public class DatanodeManager {
// Update the file names and refresh internal includes and excludes list.
if (conf == null) {
conf = new HdfsConfiguration();
this.hostConfigManager.setConf(conf);
}
this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
this.hostConfigManager.refresh();
}
/**
@ -1044,15 +1056,16 @@ public class DatanodeManager {
}
for (DatanodeDescriptor node : copy.values()) {
// Check if not include.
if (!hostFileManager.isIncluded(node)) {
if (!hostConfigManager.isIncluded(node)) {
node.setDisallowed(true); // case 2.
} else {
if (hostFileManager.isExcluded(node)) {
if (hostConfigManager.isExcluded(node)) {
decomManager.startDecommission(node); // case 3.
} else {
decomManager.stopDecommission(node); // case 4.
}
}
node.setUpgradeDomain(hostConfigManager.getUpgradeDomain(node));
}
}
@ -1268,9 +1281,9 @@ public class DatanodeManager {
type == DatanodeReportType.DECOMMISSIONING;
ArrayList<DatanodeDescriptor> nodes;
final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet();
final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes();
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
final HostSet foundNodes = new HostSet();
final Iterable<InetSocketAddress> includedNodes =
hostConfigManager.getIncludes();
synchronized(this) {
nodes = new ArrayList<>(datanodeMap.size());
@ -1281,11 +1294,11 @@ public class DatanodeManager {
if (((listLiveNodes && !isDead) ||
(listDeadNodes && isDead) ||
(listDecommissioningNodes && isDecommissioning)) &&
hostFileManager.isIncluded(dn)) {
hostConfigManager.isIncluded(dn)) {
nodes.add(dn);
}
foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
foundNodes.add(dn.getResolvedAddress());
}
}
Collections.sort(nodes);
@ -1309,7 +1322,7 @@ public class DatanodeManager {
addr.getPort() == 0 ? defaultXferPort : addr.getPort(),
defaultInfoPort, defaultInfoSecurePort, defaultIpcPort));
setDatanodeDead(dn);
if (excludedNodes.match(addr)) {
if (hostConfigManager.isExcluded(dn)) {
dn.setDecommissioned();
}
nodes.add(dn);
@ -1318,8 +1331,8 @@ public class DatanodeManager {
if (LOG.isDebugEnabled()) {
LOG.debug("getDatanodeListForReport with " +
"includedNodes = " + hostFileManager.getIncludes() +
", excludedNodes = " + hostFileManager.getExcludes() +
"includedNodes = " + hostConfigManager.getIncludes() +
", excludedNodes = " + hostConfigManager.getExcludes() +
", foundNodes = " + foundNodes +
", nodes = " + nodes);
}

View File

@ -0,0 +1,80 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import java.io.IOException;
import java.net.InetSocketAddress;
/**
* This interface abstracts how datanode configuration is managed.
*
* Each implementation defines its own way to persist the configuration.
* For example, it can use one JSON file to store the configs for all
* datanodes; or it can use one file to store in-service datanodes and another
* file to store decommission-requested datanodes.
*
* These files control which DataNodes the NameNode expects to see in the
* cluster.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public abstract class HostConfigManager implements Configurable {
/**
* Return all the datanodes that are allowed to connect to the namenode.
* @return Iterable of all datanodes
*/
public abstract Iterable<InetSocketAddress> getIncludes();
/**
* Return all datanodes that should be in decommissioned state.
* @return Iterable of those datanodes
*/
public abstract Iterable<InetSocketAddress> getExcludes();
/**
* Check if a datanode is allowed to connect the namenode.
* @param dn the DatanodeID of the datanode
* @return boolean if dn is allowed to connect the namenode.
*/
public abstract boolean isIncluded(DatanodeID dn);
/**
* Check if a datanode needs to be decommissioned.
* @param dn the DatanodeID of the datanode
* @return boolean if dn needs to be decommissioned.
*/
public abstract boolean isExcluded(DatanodeID dn);
/**
* Reload the configuration.
*/
public abstract void refresh() throws IOException;
/**
* Get the upgrade domain of a datanode.
* @param dn the DatanodeID of the datanode
* @return the upgrade domain of dn.
*/
public abstract String getUpgradeDomain(DatanodeID dn);
}

View File

@ -18,28 +18,18 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Multimap;
import com.google.common.collect.UnmodifiableIterator;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.util.HostsFileReader;
import javax.annotation.Nullable;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Collection;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
/**
* This class manages the include and exclude files for HDFS.
@ -59,11 +49,27 @@ import java.util.Map;
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
* resolutions are only done during the loading time to minimize the latency.
*/
class HostFileManager {
public class HostFileManager extends HostConfigManager {
private static final Log LOG = LogFactory.getLog(HostFileManager.class);
private Configuration conf;
private HostSet includes = new HostSet();
private HostSet excludes = new HostSet();
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
@Override
public Configuration getConf() {
return conf;
}
@Override
public void refresh() throws IOException {
refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
}
private static HostSet readFile(String type, String filename)
throws IOException {
HostSet res = new HostSet();
@ -99,31 +105,37 @@ class HostFileManager {
return null;
}
static InetSocketAddress resolvedAddressFromDatanodeID(DatanodeID id) {
return new InetSocketAddress(id.getIpAddr(), id.getXferPort());
}
synchronized HostSet getIncludes() {
@Override
public synchronized HostSet getIncludes() {
return includes;
}
synchronized HostSet getExcludes() {
@Override
public synchronized HostSet getExcludes() {
return excludes;
}
// If the includes list is empty, act as if everything is in the
// includes list.
synchronized boolean isIncluded(DatanodeID dn) {
return includes.isEmpty() || includes.match
(resolvedAddressFromDatanodeID(dn));
@Override
public synchronized boolean isIncluded(DatanodeID dn) {
return includes.isEmpty() || includes.match(dn.getResolvedAddress());
}
synchronized boolean isExcluded(DatanodeID dn) {
return excludes.match(resolvedAddressFromDatanodeID(dn));
@Override
public synchronized boolean isExcluded(DatanodeID dn) {
return isExcluded(dn.getResolvedAddress());
}
synchronized boolean hasIncludes() {
return !includes.isEmpty();
private boolean isExcluded(InetSocketAddress address) {
return excludes.match(address);
}
@Override
public synchronized String getUpgradeDomain(final DatanodeID dn) {
// The include/exclude files based config doesn't support upgrade domain
// config.
return null;
}
/**
@ -133,7 +145,8 @@ class HostFileManager {
* @param excludeFile the path to the new excludes list
* @throws IOException thrown if there is a problem reading one of the files
*/
void refresh(String includeFile, String excludeFile) throws IOException {
private void refresh(String includeFile, String excludeFile)
throws IOException {
HostSet newIncludes = readFile("included", includeFile);
HostSet newExcludes = readFile("excluded", excludeFile);
@ -153,84 +166,4 @@ class HostFileManager {
excludes = newExcludes;
}
}
/**
* The HostSet allows efficient queries on matching wildcard addresses.
* <p/>
* For InetSocketAddress A and B with the same host address,
* we define a partial order between A and B, A <= B iff A.getPort() == B
* .getPort() || B.getPort() == 0.
*/
static class HostSet implements Iterable<InetSocketAddress> {
// Host -> lists of ports
private final Multimap<InetAddress, Integer> addrs = HashMultimap.create();
/**
* The function that checks whether there exists an entry foo in the set
* so that foo <= addr.
*/
boolean matchedBy(InetSocketAddress addr) {
Collection<Integer> ports = addrs.get(addr.getAddress());
return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr
.getPort());
}
/**
* The function that checks whether there exists an entry foo in the set
* so that addr <= foo.
*/
boolean match(InetSocketAddress addr) {
int port = addr.getPort();
Collection<Integer> ports = addrs.get(addr.getAddress());
boolean exactMatch = ports.contains(port);
boolean genericMatch = ports.contains(0);
return exactMatch || genericMatch;
}
boolean isEmpty() {
return addrs.isEmpty();
}
int size() {
return addrs.size();
}
void add(InetSocketAddress addr) {
Preconditions.checkArgument(!addr.isUnresolved());
addrs.put(addr.getAddress(), addr.getPort());
}
@Override
public Iterator<InetSocketAddress> iterator() {
return new UnmodifiableIterator<InetSocketAddress>() {
private final Iterator<Map.Entry<InetAddress,
Integer>> it = addrs.entries().iterator();
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public InetSocketAddress next() {
Map.Entry<InetAddress, Integer> e = it.next();
return new InetSocketAddress(e.getKey(), e.getValue());
}
};
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("HostSet(");
Joiner.on(",").appendTo(sb, Iterators.transform(iterator(),
new Function<InetSocketAddress, String>() {
@Override
public String apply(@Nullable InetSocketAddress addr) {
assert addr != null;
return addr.getAddress().getHostAddress() + ":" + addr.getPort();
}
}));
return sb.append(")").toString();
}
}
}

View File

@ -0,0 +1,114 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.google.common.base.Function;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Multimap;
import com.google.common.collect.UnmodifiableIterator;
import javax.annotation.Nullable;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Iterator;
import java.util.Map;
/**
* The HostSet allows efficient queries on matching wildcard addresses.
* <p/>
* For InetSocketAddress A and B with the same host address,
* we define a partial order between A and B, A <= B iff A.getPort() == B
* .getPort() || B.getPort() == 0.
*/
public class HostSet implements Iterable<InetSocketAddress> {
// Host -> lists of ports
private final Multimap<InetAddress, Integer> addrs = HashMultimap.create();
/**
* The function that checks whether there exists an entry foo in the set
* so that foo <= addr.
*/
boolean matchedBy(InetSocketAddress addr) {
Collection<Integer> ports = addrs.get(addr.getAddress());
return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr
.getPort());
}
/**
* The function that checks whether there exists an entry foo in the set
* so that addr <= foo.
*/
boolean match(InetSocketAddress addr) {
int port = addr.getPort();
Collection<Integer> ports = addrs.get(addr.getAddress());
boolean exactMatch = ports.contains(port);
boolean genericMatch = ports.contains(0);
return exactMatch || genericMatch;
}
boolean isEmpty() {
return addrs.isEmpty();
}
int size() {
return addrs.size();
}
void add(InetSocketAddress addr) {
Preconditions.checkArgument(!addr.isUnresolved());
addrs.put(addr.getAddress(), addr.getPort());
}
@Override
public Iterator<InetSocketAddress> iterator() {
return new UnmodifiableIterator<InetSocketAddress>() {
private final Iterator<Map.Entry<InetAddress,
Integer>> it = addrs.entries().iterator();
@Override
public boolean hasNext() {
return it.hasNext();
}
@Override
public InetSocketAddress next() {
Map.Entry<InetAddress, Integer> e = it.next();
return new InetSocketAddress(e.getKey(), e.getValue());
}
};
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("HostSet(");
Joiner.on(",").appendTo(sb, Iterators.transform(iterator(),
new Function<InetSocketAddress, String>() {
@Override
public String apply(@Nullable InetSocketAddress addr) {
assert addr != null;
return addr.getAddress().getHostAddress() + ":" + addr.getPort();
}
}));
return sb.append(")").toString();
}
}

View File

@ -19,16 +19,30 @@ package org.apache.hadoop.hdfs.server.datanode;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.security.MessageDigest;
@ -41,13 +55,87 @@ final class BlockChecksumHelper {
static final Logger LOG = LoggerFactory.getLogger(BlockChecksumHelper.class);
private BlockChecksumHelper() {}
private BlockChecksumHelper() {
}
/**
* The abstract base block checksum computer.
*/
static abstract class BlockChecksumComputer {
static abstract class AbstractBlockChecksumComputer {
private final DataNode datanode;
private byte[] outBytes;
private int bytesPerCRC = -1;
private DataChecksum.Type crcType = null;
private long crcPerBlock = -1;
private int checksumSize = -1;
AbstractBlockChecksumComputer(DataNode datanode) throws IOException {
this.datanode = datanode;
}
abstract void compute() throws IOException;
Sender createSender(IOStreamPair pair) {
DataOutputStream out = (DataOutputStream) pair.out;
return new Sender(out);
}
DataNode getDatanode() {
return datanode;
}
InputStream getBlockInputStream(ExtendedBlock block, long seekOffset)
throws IOException {
return datanode.data.getBlockInputStream(block, seekOffset);
}
void setOutBytes(byte[] bytes) {
this.outBytes = bytes;
}
byte[] getOutBytes() {
return outBytes;
}
int getBytesPerCRC() {
return bytesPerCRC;
}
public void setBytesPerCRC(int bytesPerCRC) {
this.bytesPerCRC = bytesPerCRC;
}
public void setCrcType(DataChecksum.Type crcType) {
this.crcType = crcType;
}
public void setCrcPerBlock(long crcPerBlock) {
this.crcPerBlock = crcPerBlock;
}
public void setChecksumSize(int checksumSize) {
this.checksumSize = checksumSize;
}
DataChecksum.Type getCrcType() {
return crcType;
}
long getCrcPerBlock() {
return crcPerBlock;
}
int getChecksumSize() {
return checksumSize;
}
}
/**
* The abstract base block checksum computer.
*/
static abstract class BlockChecksumComputer
extends AbstractBlockChecksumComputer {
private final ExtendedBlock block;
// client side now can specify a range of the block for checksum
private final long requestLength;
@ -56,17 +144,12 @@ final class BlockChecksumHelper {
private final long visibleLength;
private final boolean partialBlk;
private byte[] outBytes;
private int bytesPerCRC = -1;
private DataChecksum.Type crcType = null;
private long crcPerBlock = -1;
private int checksumSize = -1;
private BlockMetadataHeader header;
private DataChecksum checksum;
BlockChecksumComputer(DataNode datanode,
ExtendedBlock block) throws IOException {
this.datanode = datanode;
super(datanode);
this.block = block;
this.requestLength = block.getNumBytes();
Preconditions.checkArgument(requestLength >= 0);
@ -81,98 +164,80 @@ final class BlockChecksumHelper {
new BufferedInputStream(metadataIn, ioFileBufferSize));
}
protected DataNode getDatanode() {
return datanode;
Sender createSender(IOStreamPair pair) {
DataOutputStream out = (DataOutputStream) pair.out;
return new Sender(out);
}
protected ExtendedBlock getBlock() {
ExtendedBlock getBlock() {
return block;
}
protected long getRequestLength() {
long getRequestLength() {
return requestLength;
}
protected LengthInputStream getMetadataIn() {
LengthInputStream getMetadataIn() {
return metadataIn;
}
protected DataInputStream getChecksumIn() {
DataInputStream getChecksumIn() {
return checksumIn;
}
protected long getVisibleLength() {
long getVisibleLength() {
return visibleLength;
}
protected boolean isPartialBlk() {
boolean isPartialBlk() {
return partialBlk;
}
protected void setOutBytes(byte[] bytes) {
this.outBytes = bytes;
}
protected byte[] getOutBytes() {
return outBytes;
}
protected int getBytesPerCRC() {
return bytesPerCRC;
}
protected DataChecksum.Type getCrcType() {
return crcType;
}
protected long getCrcPerBlock() {
return crcPerBlock;
}
protected int getChecksumSize() {
return checksumSize;
}
protected BlockMetadataHeader getHeader() {
BlockMetadataHeader getHeader() {
return header;
}
protected DataChecksum getChecksum() {
DataChecksum getChecksum() {
return checksum;
}
/**
* Perform the block checksum computing.
*
* @throws IOException
*/
abstract void compute() throws IOException;
/**
* Read block metadata header.
*
* @throws IOException
*/
protected void readHeader() throws IOException {
void readHeader() throws IOException {
//read metadata file
header = BlockMetadataHeader.readHeader(checksumIn);
checksum = header.getChecksum();
checksumSize = checksum.getChecksumSize();
bytesPerCRC = checksum.getBytesPerChecksum();
crcPerBlock = checksumSize <= 0 ? 0 :
setChecksumSize(checksum.getChecksumSize());
setBytesPerCRC(checksum.getBytesPerChecksum());
long crcPerBlock = checksum.getChecksumSize() <= 0 ? 0 :
(metadataIn.getLength() -
BlockMetadataHeader.getHeaderSize()) / checksumSize;
crcType = checksum.getChecksumType();
BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize();
setCrcPerBlock(crcPerBlock);
setCrcType(checksum.getChecksumType());
}
/**
* Calculate partial block checksum.
*
* @return
* @throws IOException
*/
protected byte[] crcPartialBlock() throws IOException {
int partialLength = (int) (requestLength % bytesPerCRC);
byte[] crcPartialBlock() throws IOException {
int partialLength = (int) (requestLength % getBytesPerCRC());
if (partialLength > 0) {
byte[] buf = new byte[partialLength];
final InputStream blockIn = datanode.data.getBlockInputStream(block,
final InputStream blockIn = getBlockInputStream(block,
requestLength - partialLength);
try {
// Get the CRC of the partialLength.
@ -181,7 +246,7 @@ final class BlockChecksumHelper {
IOUtils.closeStream(blockIn);
}
checksum.update(buf, 0, partialLength);
byte[] partialCrc = new byte[checksumSize];
byte[] partialCrc = new byte[getChecksumSize()];
checksum.writeValue(partialCrc, 0, true);
return partialCrc;
}
@ -229,7 +294,7 @@ final class BlockChecksumHelper {
}
private MD5Hash checksumPartialBlock() throws IOException {
byte[] buffer = new byte[4*1024];
byte[] buffer = new byte[4 * 1024];
MessageDigest digester = MD5Hash.getDigester();
long remaining = (getRequestLength() / getBytesPerCRC())
@ -251,4 +316,115 @@ final class BlockChecksumHelper {
return new MD5Hash(digester.digest());
}
}
}
/**
* Non-striped block group checksum computer for striped blocks.
*/
static class BlockGroupNonStripedChecksumComputer
extends AbstractBlockChecksumComputer {
private final ExtendedBlock blockGroup;
private final ErasureCodingPolicy ecPolicy;
private final DatanodeInfo[] datanodes;
private final Token<BlockTokenIdentifier>[] blockTokens;
private final DataOutputBuffer md5writer = new DataOutputBuffer();
BlockGroupNonStripedChecksumComputer(DataNode datanode,
StripedBlockInfo stripedBlockInfo)
throws IOException {
super(datanode);
this.blockGroup = stripedBlockInfo.getBlock();
this.ecPolicy = stripedBlockInfo.getErasureCodingPolicy();
this.datanodes = stripedBlockInfo.getDatanodes();
this.blockTokens = stripedBlockInfo.getBlockTokens();
}
@Override
void compute() throws IOException {
for (int idx = 0; idx < ecPolicy.getNumDataUnits(); idx++) {
ExtendedBlock block =
StripedBlockUtil.constructInternalBlock(blockGroup,
ecPolicy.getCellSize(), ecPolicy.getNumDataUnits(), idx);
DatanodeInfo targetDatanode = datanodes[idx];
Token<BlockTokenIdentifier> blockToken = blockTokens[idx];
checksumBlock(block, idx, blockToken, targetDatanode);
}
MD5Hash md5out = MD5Hash.digest(md5writer.getData());
setOutBytes(md5out.getDigest());
}
private void checksumBlock(ExtendedBlock block, int blockIdx,
Token<BlockTokenIdentifier> blockToken,
DatanodeInfo targetDatanode) throws IOException {
int timeout = 3000;
try (IOStreamPair pair = getDatanode().connectToDN(targetDatanode,
timeout, block, blockToken)) {
LOG.debug("write to {}: {}, block={}",
getDatanode(), Op.BLOCK_CHECKSUM, block);
// get block MD5
createSender(pair).blockChecksum(block, blockToken);
final DataTransferProtos.BlockOpResponseProto reply =
DataTransferProtos.BlockOpResponseProto.parseFrom(
PBHelperClient.vintPrefixed(pair.in));
String logInfo = "for block " + block
+ " from datanode " + targetDatanode;
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
DataTransferProtos.OpBlockChecksumResponseProto checksumData =
reply.getChecksumResponse();
//read byte-per-checksum
final int bpc = checksumData.getBytesPerCrc();
if (blockIdx == 0) { //first block
setBytesPerCRC(bpc);
} else if (bpc != getBytesPerCRC()) {
throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
+ " but bytesPerCRC=" + getBytesPerCRC());
}
//read crc-per-block
final long cpb = checksumData.getCrcPerBlock();
if (blockIdx == 0) {
setCrcPerBlock(cpb);
}
//read md5
final MD5Hash md5 = new MD5Hash(
checksumData.getMd5().toByteArray());
md5.write(md5writer);
// read crc-type
final DataChecksum.Type ct;
if (checksumData.hasCrcType()) {
ct = PBHelperClient.convert(checksumData.getCrcType());
} else {
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
"inferring checksum by reading first byte");
ct = DataChecksum.Type.DEFAULT;
}
if (blockIdx == 0) { // first block
setCrcType(ct);
} else if (getCrcType() != DataChecksum.Type.MIXED &&
getCrcType() != ct) {
// if crc types are mixed in a file
setCrcType(DataChecksum.Type.MIXED);
}
if (LOG.isDebugEnabled()) {
if (blockIdx == 0) {
LOG.debug("set bytesPerCRC=" + getBytesPerCRC()
+ ", crcPerBlock=" + getCrcPerBlock());
}
LOG.debug("got reply from " + targetDatanode + ": md5=" + md5);
}
}
}
}
}

View File

@ -1157,7 +1157,7 @@ class BlockReceiver implements Closeable {
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
.append(": ").append(block).append(", type=").append(type);
if (type != PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
b.append(", downstreams=").append(downstreams.length)
.append(":").append(Arrays.asList(downstreams));
}

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
@ -46,7 +47,9 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.BlockChecksumComputer;
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.AbstractBlockChecksumComputer;
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.ReplicatedBlockChecksumComputer;
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.BlockGroupNonStripedChecksumComputer;
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
@ -923,6 +926,46 @@ class DataXceiver extends Receiver implements Runnable {
datanode.metrics.addBlockChecksumOp(elapsed());
}
@Override
public void blockGroupChecksum(final StripedBlockInfo stripedBlockInfo,
final Token<BlockTokenIdentifier> blockToken)
throws IOException {
updateCurrentThreadName("Getting checksum for block group" +
stripedBlockInfo.getBlock());
final DataOutputStream out = new DataOutputStream(getOutputStream());
checkAccess(out, true, stripedBlockInfo.getBlock(), blockToken,
Op.BLOCK_GROUP_CHECKSUM, BlockTokenIdentifier.AccessMode.READ);
AbstractBlockChecksumComputer maker =
new BlockGroupNonStripedChecksumComputer(datanode, stripedBlockInfo);
try {
maker.compute();
//write reply
BlockOpResponseProto.newBuilder()
.setStatus(SUCCESS)
.setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()
.setBytesPerCrc(maker.getBytesPerCRC())
.setCrcPerBlock(maker.getCrcPerBlock())
.setMd5(ByteString.copyFrom(maker.getOutBytes()))
.setCrcType(PBHelperClient.convert(maker.getCrcType())))
.build()
.writeDelimitedTo(out);
out.flush();
} catch (IOException ioe) {
LOG.info("blockChecksum " + stripedBlockInfo.getBlock() +
" received exception " + ioe);
incrDatanodeNetworkErrors();
throw ioe;
} finally {
IOUtils.closeStream(out);
}
//update metrics
datanode.metrics.addBlockChecksumOp(elapsed());
}
@Override
public void copyBlock(final ExtendedBlock block,
final Token<BlockTokenIdentifier> blockToken) throws IOException {

View File

@ -849,12 +849,13 @@ public final class ErasureCodingWorker {
* read directly from DN and need to check the replica is FINALIZED
* state, notice we should not use short-circuit local read which
* requires config for domain-socket in UNIX or legacy config in Windows.
* The network distance value isn't used for this scenario.
*/
return RemoteBlockReader2.newBlockReader(
"dummy", block, blockToken, offsetInBlock,
block.getNumBytes() - offsetInBlock, true,
"", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
null, cachingStrategy, datanode.getTracer());
null, cachingStrategy, datanode.getTracer(), -1);
} catch (IOException e) {
LOG.debug("Exception while creating remote block reader, datanode {}",
dnInfo, e);

View File

@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.TreeMap;
@ -380,4 +381,18 @@ public class EncryptionZoneManager {
public int getNumEncryptionZones() {
return encryptionZones.size();
}
/**
* @return a list of all key names.
*/
String[] getKeyNames() {
assert dir.hasReadLock();
String[] ret = new String[encryptionZones.size()];
int index = 0;
for (Map.Entry<Long, EncryptionZoneInt> entry : encryptionZones
.entrySet()) {
ret[index] = entry.getValue().getKeyName();
}
return ret;
}
}

View File

@ -23,6 +23,7 @@ import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.GeneralSecurityException;
import java.util.AbstractMap;
import java.util.concurrent.ExecutorService;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
@ -304,4 +305,86 @@ final class FSDirEncryptionZoneOp {
fsd.readUnlock();
}
}
/**
* Proactively warm up the edek cache. We'll get all the edek key names,
* then launch up a separate thread to warm them up.
*/
static void warmUpEdekCache(final ExecutorService executor,
final FSDirectory fsd, final int delay, final int interval) {
fsd.readLock();
try {
String[] edeks = fsd.ezManager.getKeyNames();
executor.execute(
new EDEKCacheLoader(edeks, fsd.getProvider(), delay, interval));
} finally {
fsd.readUnlock();
}
}
/**
* EDEKCacheLoader is being run in a separate thread to loop through all the
* EDEKs and warm them up in the KMS cache.
*/
static class EDEKCacheLoader implements Runnable {
private final String[] keyNames;
private final KeyProviderCryptoExtension kp;
private int initialDelay;
private int retryInterval;
EDEKCacheLoader(final String[] names, final KeyProviderCryptoExtension kp,
final int delay, final int interval) {
this.keyNames = names;
this.kp = kp;
this.initialDelay = delay;
this.retryInterval = interval;
}
@Override
public void run() {
NameNode.LOG.info("Warming up {} EDEKs... (initialDelay={}, "
+ "retryInterval={})", keyNames.length, initialDelay, retryInterval);
try {
Thread.sleep(initialDelay);
} catch (InterruptedException ie) {
NameNode.LOG.info("EDEKCacheLoader interrupted before warming up.");
return;
}
final int logCoolDown = 10000; // periodically print error log (if any)
int sinceLastLog = logCoolDown; // always print the first failure
boolean success = false;
IOException lastSeenIOE = null;
while (true) {
try {
kp.warmUpEncryptedKeys(keyNames);
NameNode.LOG
.info("Successfully warmed up {} EDEKs.", keyNames.length);
success = true;
break;
} catch (IOException ioe) {
lastSeenIOE = ioe;
if (sinceLastLog >= logCoolDown) {
NameNode.LOG.info("Failed to warm up EDEKs.", ioe);
sinceLastLog = 0;
} else {
NameNode.LOG.debug("Failed to warm up EDEKs.", ioe);
}
}
try {
Thread.sleep(retryInterval);
} catch (InterruptedException ie) {
NameNode.LOG.info("EDEKCacheLoader interrupted during retry.");
break;
}
sinceLastLog += retryInterval;
}
if (!success) {
NameNode.LOG.warn("Unable to warm up EDEKs.");
if (lastSeenIOE != null) {
NameNode.LOG.warn("Last seen exception:", lastSeenIOE);
}
}
}
}
}

View File

@ -116,6 +116,8 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
@ -283,6 +285,7 @@ import com.google.common.base.Charsets;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
/**
* FSNamesystem is a container of both transient
@ -425,6 +428,12 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
// A daemon to periodically clean up corrupt lazyPersist files
// from the name space.
Daemon lazyPersistFileScrubber = null;
// Executor to warm up EDEK cache
private ExecutorService edekCacheLoader = null;
private final int edekCacheLoaderDelay;
private final int edekCacheLoaderInterval;
/**
* When an active namenode will roll its own edit log, in # edits
*/
@ -787,6 +796,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
+ " must be zero (for disable) or greater than zero.");
}
this.edekCacheLoaderDelay = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY,
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_DEFAULT);
this.edekCacheLoaderInterval = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_KEY,
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_DEFAULT);
// For testing purposes, allow the DT secret manager to be started regardless
// of whether security is enabled.
alwaysUseDelegationTokensForTests = conf.getBoolean(
@ -1128,6 +1144,14 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
cacheManager.startMonitorThread();
blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
if (provider != null) {
edekCacheLoader = Executors.newSingleThreadExecutor(
new ThreadFactoryBuilder().setDaemon(true)
.setNameFormat("Warm Up EDEK Cache Thread #%d")
.build());
FSDirEncryptionZoneOp.warmUpEdekCache(edekCacheLoader, dir,
edekCacheLoaderDelay, edekCacheLoaderInterval);
}
} finally {
startingActiveService = false;
writeUnlock();
@ -1162,6 +1186,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
nnrmthread.interrupt();
}
if (edekCacheLoader != null) {
edekCacheLoader.shutdownNow();
}
if (nnEditLogRoller != null) {
((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
nnEditLogRoller.interrupt();

View File

@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_OFFSET;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_OFFSET;
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_MASK;
import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.*;
import java.io.BufferedOutputStream;
import java.io.File;
@ -434,36 +435,38 @@ class OfflineImageReconstructor {
Node node = new Node();
loadNodeChildren(node, "NameSection fields");
NameSystemSection.Builder b = NameSystemSection.newBuilder();
Integer namespaceId = node.removeChildInt("namespaceId");
Integer namespaceId = node.removeChildInt(NAME_SECTION_NAMESPACE_ID);
if (namespaceId == null) {
throw new IOException("<NameSection> is missing <namespaceId>");
}
b.setNamespaceId(namespaceId);
Long lval = node.removeChildLong("genstampV1");
Long lval = node.removeChildLong(NAME_SECTION_GENSTAMPV1);
if (lval != null) {
b.setGenstampV1(lval);
}
lval = node.removeChildLong("genstampV2");
lval = node.removeChildLong(NAME_SECTION_GENSTAMPV2);
if (lval != null) {
b.setGenstampV2(lval);
}
lval = node.removeChildLong("genstampV1Limit");
lval = node.removeChildLong(NAME_SECTION_GENSTAMPV1_LIMIT);
if (lval != null) {
b.setGenstampV1Limit(lval);
}
lval = node.removeChildLong("lastAllocatedBlockId");
lval = node.removeChildLong(NAME_SECTION_LAST_ALLOCATED_BLOCK_ID);
if (lval != null) {
b.setLastAllocatedBlockId(lval);
}
lval = node.removeChildLong("txid");
lval = node.removeChildLong(NAME_SECTION_TXID);
if (lval != null) {
b.setTransactionId(lval);
}
lval = node.removeChildLong("rollingUpgradeStartTime");
lval = node.removeChildLong(
NAME_SECTION_ROLLING_UPGRADE_START_TIME);
if (lval != null) {
b.setRollingUpgradeStartTime(lval);
}
lval = node.removeChildLong("lastAllocatedStripedBlockId");
lval = node.removeChildLong(
NAME_SECTION_LAST_ALLOCATED_STRIPED_BLOCK_ID);
if (lval != null) {
b.setLastAllocatedStripedBlockId(lval);
}
@ -486,11 +489,12 @@ class OfflineImageReconstructor {
Node headerNode = new Node();
loadNodeChildren(headerNode, "INodeSection fields", "inode");
INodeSection.Builder b = INodeSection.newBuilder();
Long lval = headerNode.removeChildLong("lastInodeId");
Long lval = headerNode.removeChildLong(INODE_SECTION_LAST_INODE_ID);
if (lval != null) {
b.setLastInodeId(lval);
}
Integer expectedNumINodes = headerNode.removeChildInt("numInodes");
Integer expectedNumINodes =
headerNode.removeChildInt(INODE_SECTION_NUM_INODES);
if (expectedNumINodes == null) {
throw new IOException("Failed to find <numInodes> in INodeSection.");
}
@ -501,7 +505,7 @@ class OfflineImageReconstructor {
int actualNumINodes = 0;
while (actualNumINodes < expectedNumINodes) {
try {
expectTag("inode", false);
expectTag(INODE_SECTION_INODE, false);
} catch (IOException e) {
throw new IOException("Only found " + actualNumINodes +
" <inode> entries out of " + expectedNumINodes, e);
@ -512,24 +516,24 @@ class OfflineImageReconstructor {
INodeSection.INode.Builder inodeBld = processINodeXml(inode);
inodeBld.build().writeDelimitedTo(out);
}
expectTagEnd("INodeSection");
expectTagEnd(INODE_SECTION_NAME);
recordSectionLength(SectionName.INODE.name());
}
}
private INodeSection.INode.Builder processINodeXml(Node node)
throws IOException {
String type = node.removeChildStr("type");
String type = node.removeChildStr(INODE_SECTION_TYPE);
if (type == null) {
throw new IOException("INode XML found with no <type> tag.");
}
INodeSection.INode.Builder inodeBld = INodeSection.INode.newBuilder();
Long id = node.removeChildLong("id");
Long id = node.removeChildLong(SECTION_ID);
if (id == null) {
throw new IOException("<inode> found without <id>");
}
inodeBld.setId(id);
String name = node.removeChildStr("name");
String name = node.removeChildStr(SECTION_NAME);
if (name != null) {
inodeBld.setName(ByteString.copyFrom(name, "UTF8"));
}
@ -555,46 +559,46 @@ class OfflineImageReconstructor {
throws IOException {
inodeBld.setType(INodeSection.INode.Type.FILE);
INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
Integer ival = node.removeChildInt("replication");
Integer ival = node.removeChildInt(SECTION_REPLICATION);
if (ival != null) {
bld.setReplication(ival);
}
Long lval = node.removeChildLong("mtime");
Long lval = node.removeChildLong(INODE_SECTION_MTIME);
if (lval != null) {
bld.setModificationTime(lval);
}
lval = node.removeChildLong("atime");
lval = node.removeChildLong(INODE_SECTION_ATIME);
if (lval != null) {
bld.setAccessTime(lval);
}
lval = node.removeChildLong("preferredBlockSize");
lval = node.removeChildLong(INODE_SECTION_PREFERRED_BLOCK_SIZE);
if (lval != null) {
bld.setPreferredBlockSize(lval);
}
String perm = node.removeChildStr("permission");
String perm = node.removeChildStr(INODE_SECTION_PERMISSION);
if (perm != null) {
bld.setPermission(permissionXmlToU64(perm));
}
Node blocks = node.removeChild("blocks");
Node blocks = node.removeChild(INODE_SECTION_BLOCKS);
if (blocks != null) {
while (true) {
Node block = blocks.removeChild("block");
Node block = blocks.removeChild(INODE_SECTION_BLOCK);
if (block == null) {
break;
}
HdfsProtos.BlockProto.Builder blockBld =
HdfsProtos.BlockProto.newBuilder();
Long id = block.removeChildLong("id");
Long id = block.removeChildLong(SECTION_ID);
if (id == null) {
throw new IOException("<block> found without <id>");
}
blockBld.setBlockId(id);
Long genstamp = block.removeChildLong("genstamp");
Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
if (genstamp == null) {
throw new IOException("<block> found without <genstamp>");
}
blockBld.setGenStamp(genstamp);
Long numBytes = block.removeChildLong("numBytes");
Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
if (numBytes == null) {
throw new IOException("<block> found without <numBytes>");
}
@ -602,19 +606,21 @@ class OfflineImageReconstructor {
bld.addBlocks(blockBld);
}
}
Node fileUnderConstruction = node.removeChild("file-under-construction");
Node fileUnderConstruction =
node.removeChild(INODE_SECTION_FILE_UNDER_CONSTRUCTION);
if (fileUnderConstruction != null) {
INodeSection.FileUnderConstructionFeature.Builder fb =
INodeSection.FileUnderConstructionFeature.newBuilder();
String clientName =
fileUnderConstruction.removeChildStr("clientName");
fileUnderConstruction.removeChildStr(INODE_SECTION_CLIENT_NAME);
if (clientName == null) {
throw new IOException("<file-under-construction> found without " +
"<clientName>");
}
fb.setClientName(clientName);
String clientMachine =
fileUnderConstruction.removeChildStr("clientMachine");
fileUnderConstruction
.removeChildStr(INODE_SECTION_CLIENT_MACHINE);
if (clientMachine == null) {
throw new IOException("<file-under-construction> found without " +
"<clientMachine>");
@ -622,19 +628,19 @@ class OfflineImageReconstructor {
fb.setClientMachine(clientMachine);
bld.setFileUC(fb);
}
Node acls = node.removeChild("acls");
Node acls = node.removeChild(INODE_SECTION_ACLS);
if (acls != null) {
bld.setAcl(aclXmlToProto(acls));
}
Node xattrs = node.removeChild("xattrs");
Node xattrs = node.removeChild(INODE_SECTION_XATTRS);
if (xattrs != null) {
bld.setXAttrs(xattrsXmlToProto(xattrs));
}
ival = node.removeChildInt("storagePolicyId");
ival = node.removeChildInt(INODE_SECTION_STORAGE_POLICY_ID);
if (ival != null) {
bld.setStoragePolicyID(ival);
}
Boolean bval = node.removeChildBool("isStriped");
Boolean bval = node.removeChildBool(INODE_SECTION_IS_STRIPED);
bld.setIsStriped(bval);
inodeBld.setFile(bld);
// Will check remaining keys and serialize in processINodeXml
@ -645,40 +651,40 @@ class OfflineImageReconstructor {
inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
INodeSection.INodeDirectory.Builder bld =
INodeSection.INodeDirectory.newBuilder();
Long lval = node.removeChildLong("mtime");
Long lval = node.removeChildLong(INODE_SECTION_MTIME);
if (lval != null) {
bld.setModificationTime(lval);
}
lval = node.removeChildLong("nsquota");
lval = node.removeChildLong(INODE_SECTION_NS_QUOTA);
if (lval != null) {
bld.setNsQuota(lval);
}
lval = node.removeChildLong("dsquota");
lval = node.removeChildLong(INODE_SECTION_DS_QUOTA);
if (lval != null) {
bld.setDsQuota(lval);
}
String perm = node.removeChildStr("permission");
String perm = node.removeChildStr(INODE_SECTION_PERMISSION);
if (perm != null) {
bld.setPermission(permissionXmlToU64(perm));
}
Node acls = node.removeChild("acls");
Node acls = node.removeChild(INODE_SECTION_ACLS);
if (acls != null) {
bld.setAcl(aclXmlToProto(acls));
}
Node xattrs = node.removeChild("xattrs");
Node xattrs = node.removeChild(INODE_SECTION_XATTRS);
if (xattrs != null) {
bld.setXAttrs(xattrsXmlToProto(xattrs));
}
INodeSection.QuotaByStorageTypeFeatureProto.Builder qf =
INodeSection.QuotaByStorageTypeFeatureProto.newBuilder();
while (true) {
Node typeQuota = node.removeChild("typeQuota");
Node typeQuota = node.removeChild(INODE_SECTION_TYPE_QUOTA);
if (typeQuota == null) {
break;
}
INodeSection.QuotaByStorageTypeEntryProto.Builder qbld =
INodeSection.QuotaByStorageTypeEntryProto.newBuilder();
String type = typeQuota.removeChildStr("type");
String type = typeQuota.removeChildStr(INODE_SECTION_TYPE);
if (type == null) {
throw new IOException("<typeQuota> was missing <type>");
}
@ -688,7 +694,7 @@ class OfflineImageReconstructor {
throw new IOException("<typeQuota> had unknown <type> " + type);
}
qbld.setStorageType(storageType);
Long quota = typeQuota.removeChildLong("quota");
Long quota = typeQuota.removeChildLong(INODE_SECTION_QUOTA);
if (quota == null) {
throw new IOException("<typeQuota> was missing <quota>");
}
@ -705,19 +711,19 @@ class OfflineImageReconstructor {
inodeBld.setType(INodeSection.INode.Type.SYMLINK);
INodeSection.INodeSymlink.Builder bld =
INodeSection.INodeSymlink.newBuilder();
String perm = node.removeChildStr("permission");
String perm = node.removeChildStr(INODE_SECTION_PERMISSION);
if (perm != null) {
bld.setPermission(permissionXmlToU64(perm));
}
String target = node.removeChildStr("target");
String target = node.removeChildStr(INODE_SECTION_TARGET);
if (target != null) {
bld.setTarget(ByteString.copyFrom(target, "UTF8"));
}
Long lval = node.removeChildLong("mtime");
Long lval = node.removeChildLong(INODE_SECTION_MTIME);
if (lval != null) {
bld.setModificationTime(lval);
}
lval = node.removeChildLong("atime");
lval = node.removeChildLong(INODE_SECTION_ATIME);
if (lval != null) {
bld.setAccessTime(lval);
}
@ -736,23 +742,23 @@ class OfflineImageReconstructor {
INodeSection.XAttrFeatureProto.Builder bld =
INodeSection.XAttrFeatureProto.newBuilder();
while (true) {
Node xattr = xattrs.removeChild("xattr");
Node xattr = xattrs.removeChild(INODE_SECTION_XATTR);
if (xattr == null) {
break;
}
INodeSection.XAttrCompactProto.Builder b =
INodeSection.XAttrCompactProto.newBuilder();
String ns = xattr.removeChildStr("ns");
String ns = xattr.removeChildStr(INODE_SECTION_NS);
if (ns == null) {
throw new IOException("<xattr> had no <ns> entry.");
}
int nsIdx = XAttrProtos.XAttrProto.
XAttrNamespaceProto.valueOf(ns).ordinal();
String name = xattr.removeChildStr("name");
String valStr = xattr.removeChildStr("val");
String name = xattr.removeChildStr(SECTION_NAME);
String valStr = xattr.removeChildStr(INODE_SECTION_VAL);
byte[] val = null;
if (valStr == null) {
String valHex = xattr.removeChildStr("valHex");
String valHex = xattr.removeChildStr(INODE_SECTION_VAL_HEX);
if (valHex == null) {
throw new IOException("<xattr> had no <val> or <valHex> entry.");
}
@ -787,24 +793,28 @@ class OfflineImageReconstructor {
loadNodeChildren(secretHeader, "SecretManager fields",
"delegationKey", "token");
SecretManagerSection.Builder b = SecretManagerSection.newBuilder();
Integer currentId = secretHeader.removeChildInt("currentId");
Integer currentId =
secretHeader.removeChildInt(SECRET_MANAGER_SECTION_CURRENT_ID);
if (currentId == null) {
throw new IOException("SecretManager section had no <currentId>");
}
b.setCurrentId(currentId);
Integer tokenSequenceNumber = secretHeader.removeChildInt("tokenSequenceNumber");
Integer tokenSequenceNumber = secretHeader.removeChildInt(
SECRET_MANAGER_SECTION_TOKEN_SEQUENCE_NUMBER);
if (tokenSequenceNumber == null) {
throw new IOException("SecretManager section had no " +
"<tokenSequenceNumber>");
}
b.setTokenSequenceNumber(tokenSequenceNumber);
Integer expectedNumKeys = secretHeader.removeChildInt("numDelegationKeys");
Integer expectedNumKeys = secretHeader.removeChildInt(
SECRET_MANAGER_SECTION_NUM_DELEGATION_KEYS);
if (expectedNumKeys == null) {
throw new IOException("SecretManager section had no " +
"<numDelegationKeys>");
}
b.setNumKeys(expectedNumKeys);
Integer expectedNumTokens = secretHeader.removeChildInt("numTokens");
Integer expectedNumTokens =
secretHeader.removeChildInt(SECRET_MANAGER_SECTION_NUM_TOKENS);
if (expectedNumTokens == null) {
throw new IOException("SecretManager section had no " +
"<numTokens>");
@ -815,7 +825,7 @@ class OfflineImageReconstructor {
for (int actualNumKeys = 0; actualNumKeys < expectedNumKeys;
actualNumKeys++) {
try {
expectTag("delegationKey", false);
expectTag(SECRET_MANAGER_SECTION_DELEGATION_KEY, false);
} catch (IOException e) {
throw new IOException("Only read " + actualNumKeys +
" delegation keys out of " + expectedNumKeys, e);
@ -824,32 +834,32 @@ class OfflineImageReconstructor {
SecretManagerSection.DelegationKey.newBuilder();
Node dkey = new Node();
loadNodeChildren(dkey, "Delegation key fields");
Integer id = dkey.removeChildInt("id");
Integer id = dkey.removeChildInt(SECTION_ID);
if (id == null) {
throw new IOException("Delegation key stanza <delegationKey> " +
"lacked an <id> field.");
}
dbld.setId(id);
String expiry = dkey.removeChildStr("expiry");
String expiry = dkey.removeChildStr(SECRET_MANAGER_SECTION_EXPIRY);
if (expiry == null) {
throw new IOException("Delegation key stanza <delegationKey> " +
"lacked an <expiry> field.");
}
dbld.setExpiryDate(dateStrToLong(expiry));
String keyHex = dkey.removeChildStr("key");
String keyHex = dkey.removeChildStr(SECRET_MANAGER_SECTION_KEY);
if (keyHex == null) {
throw new IOException("Delegation key stanza <delegationKey> " +
"lacked a <key> field.");
}
byte[] key = new HexBinaryAdapter().unmarshal(keyHex);
dkey.verifyNoRemainingKeys("delegationKey");
dkey.verifyNoRemainingKeys(SECRET_MANAGER_SECTION_DELEGATION_KEY);
dbld.setKey(ByteString.copyFrom(key));
dbld.build().writeDelimitedTo(out);
}
for (int actualNumTokens = 0; actualNumTokens < expectedNumTokens;
actualNumTokens++) {
try {
expectTag("token", false);
expectTag(SECRET_MANAGER_SECTION_TOKEN, false);
} catch (IOException e) {
throw new IOException("Only read " + actualNumTokens +
" tokens out of " + expectedNumTokens, e);
@ -858,46 +868,54 @@ class OfflineImageReconstructor {
SecretManagerSection.PersistToken.newBuilder();
Node token = new Node();
loadNodeChildren(token, "PersistToken key fields");
Integer version = token.removeChildInt("version");
Integer version =
token.removeChildInt(SECRET_MANAGER_SECTION_VERSION);
if (version != null) {
tbld.setVersion(version);
}
String owner = token.removeChildStr("owner");
String owner = token.removeChildStr(SECRET_MANAGER_SECTION_OWNER);
if (owner != null) {
tbld.setOwner(owner);
}
String renewer = token.removeChildStr("renewer");
String renewer =
token.removeChildStr(SECRET_MANAGER_SECTION_RENEWER);
if (renewer != null) {
tbld.setRenewer(renewer);
}
String realUser = token.removeChildStr("realUser");
String realUser =
token.removeChildStr(SECRET_MANAGER_SECTION_REAL_USER);
if (realUser != null) {
tbld.setRealUser(realUser);
}
String issueDateStr = token.removeChildStr("issueDate");
String issueDateStr =
token.removeChildStr(SECRET_MANAGER_SECTION_ISSUE_DATE);
if (issueDateStr != null) {
tbld.setIssueDate(dateStrToLong(issueDateStr));
}
String maxDateStr = token.removeChildStr("maxDate");
String maxDateStr =
token.removeChildStr(SECRET_MANAGER_SECTION_MAX_DATE);
if (maxDateStr != null) {
tbld.setMaxDate(dateStrToLong(maxDateStr));
}
Integer seqNo = token.removeChildInt("sequenceNumber");
Integer seqNo =
token.removeChildInt(SECRET_MANAGER_SECTION_SEQUENCE_NUMBER);
if (seqNo != null) {
tbld.setSequenceNumber(seqNo);
}
Integer masterKeyId = token.removeChildInt("masterKeyId");
Integer masterKeyId =
token.removeChildInt(SECRET_MANAGER_SECTION_MASTER_KEY_ID);
if (masterKeyId != null) {
tbld.setMasterKeyId(masterKeyId);
}
String expiryDateStr = token.removeChildStr("expiryDate");
String expiryDateStr =
token.removeChildStr(SECRET_MANAGER_SECTION_EXPIRY_DATE);
if (expiryDateStr != null) {
tbld.setExpiryDate(dateStrToLong(expiryDateStr));
}
token.verifyNoRemainingKeys("token");
tbld.build().writeDelimitedTo(out);
}
expectTagEnd("SecretManagerSection");
expectTagEnd(SECRET_MANAGER_SECTION_NAME);
recordSectionLength(SectionName.SECRET_MANAGER.name());
}
@ -919,17 +937,20 @@ class OfflineImageReconstructor {
Node node = new Node();
loadNodeChildren(node, "CacheManager fields", "pool", "directive");
CacheManagerSection.Builder b = CacheManagerSection.newBuilder();
Long nextDirectiveId = node.removeChildLong("nextDirectiveId");
Long nextDirectiveId =
node.removeChildLong(CACHE_MANAGER_SECTION_NEXT_DIRECTIVE_ID);
if (nextDirectiveId == null) {
throw new IOException("CacheManager section had no <nextDirectiveId>");
}
b.setNextDirectiveId(nextDirectiveId);
Integer expectedNumPools = node.removeChildInt("numPools");
Integer expectedNumPools =
node.removeChildInt(CACHE_MANAGER_SECTION_NUM_POOLS);
if (expectedNumPools == null) {
throw new IOException("CacheManager section had no <numPools>");
}
b.setNumPools(expectedNumPools);
Integer expectedNumDirectives = node.removeChildInt("numDirectives");
Integer expectedNumDirectives =
node.removeChildInt(CACHE_MANAGER_SECTION_NUM_DIRECTIVES);
if (expectedNumDirectives == null) {
throw new IOException("CacheManager section had no <numDirectives>");
}
@ -938,7 +959,7 @@ class OfflineImageReconstructor {
long actualNumPools = 0;
while (actualNumPools < expectedNumPools) {
try {
expectTag("pool", false);
expectTag(CACHE_MANAGER_SECTION_POOL, false);
} catch (IOException e) {
throw new IOException("Only read " + actualNumPools +
" cache pools out of " + expectedNumPools, e);
@ -951,7 +972,7 @@ class OfflineImageReconstructor {
long actualNumDirectives = 0;
while (actualNumDirectives < expectedNumDirectives) {
try {
expectTag("directive", false);
expectTag(CACHE_MANAGER_SECTION_DIRECTIVE, false);
} catch (IOException e) {
throw new IOException("Only read " + actualNumDirectives +
" cache pools out of " + expectedNumDirectives, e);
@ -961,38 +982,42 @@ class OfflineImageReconstructor {
loadNodeChildren(pool, "directive fields", "");
processDirectiveXml(node);
}
expectTagEnd("CacheManagerSection");
expectTagEnd(CACHE_MANAGER_SECTION_NAME);
recordSectionLength(SectionName.CACHE_MANAGER.name());
}
private void processPoolXml(Node pool) throws IOException {
CachePoolInfoProto.Builder bld = CachePoolInfoProto.newBuilder();
String poolName = pool.removeChildStr("poolName");
String poolName =
pool.removeChildStr(CACHE_MANAGER_SECTION_POOL_NAME);
if (poolName == null) {
throw new IOException("<pool> found without <poolName>");
}
bld.setPoolName(poolName);
String ownerName = pool.removeChildStr("ownerName");
String ownerName =
pool.removeChildStr(CACHE_MANAGER_SECTION_OWNER_NAME);
if (ownerName == null) {
throw new IOException("<pool> found without <ownerName>");
}
bld.setOwnerName(ownerName);
String groupName = pool.removeChildStr("groupName");
String groupName =
pool.removeChildStr(CACHE_MANAGER_SECTION_GROUP_NAME);
if (groupName == null) {
throw new IOException("<pool> found without <groupName>");
}
bld.setGroupName(groupName);
Integer mode = pool.removeChildInt("mode");
Integer mode = pool.removeChildInt(CACHE_MANAGER_SECTION_MODE);
if (mode == null) {
throw new IOException("<pool> found without <mode>");
}
bld.setMode(mode);
Long limit = pool.removeChildLong("limit");
Long limit = pool.removeChildLong(CACHE_MANAGER_SECTION_LIMIT);
if (limit == null) {
throw new IOException("<pool> found without <limit>");
}
bld.setLimit(limit);
Long maxRelativeExpiry = pool.removeChildLong("maxRelativeExpiry");
Long maxRelativeExpiry =
pool.removeChildLong(CACHE_MANAGER_SECTION_MAX_RELATIVE_EXPIRY);
if (maxRelativeExpiry == null) {
throw new IOException("<pool> found without <maxRelativeExpiry>");
}
@ -1004,37 +1029,39 @@ class OfflineImageReconstructor {
private void processDirectiveXml(Node directive) throws IOException {
CacheDirectiveInfoProto.Builder bld =
CacheDirectiveInfoProto.newBuilder();
Long id = directive.removeChildLong("id");
Long id = directive.removeChildLong(SECTION_ID);
if (id == null) {
throw new IOException("<directive> found without <id>");
}
bld.setId(id);
String path = directive.removeChildStr("path");
String path = directive.removeChildStr(SECTION_PATH);
if (path == null) {
throw new IOException("<directive> found without <path>");
}
bld.setPath(path);
Integer replication = directive.removeChildInt("replication");
Integer replication = directive.removeChildInt(SECTION_REPLICATION);
if (replication == null) {
throw new IOException("<directive> found without <replication>");
}
bld.setReplication(replication);
String pool = directive.removeChildStr("pool");
String pool = directive.removeChildStr(CACHE_MANAGER_SECTION_POOL);
if (path == null) {
throw new IOException("<directive> found without <pool>");
}
bld.setPool(pool);
Node expiration = directive.removeChild("expiration");
Node expiration =
directive.removeChild(CACHE_MANAGER_SECTION_EXPIRATION);
if (expiration != null) {
CacheDirectiveInfoExpirationProto.Builder ebld =
CacheDirectiveInfoExpirationProto.newBuilder();
Long millis = expiration.removeChildLong("millis");
Long millis =
expiration.removeChildLong(CACHE_MANAGER_SECTION_MILLIS);
if (millis == null) {
throw new IOException("cache directive <expiration> found " +
"without <millis>");
}
ebld.setMillis(millis);
if (expiration.removeChildBool("relative")) {
if (expiration.removeChildBool(CACHE_MANAGER_SECTION_RELATIVE)) {
ebld.setIsRelative(true);
} else {
ebld.setIsRelative(false);
@ -1054,7 +1081,7 @@ class OfflineImageReconstructor {
// There is no header for this section.
// We process the repeated <ref> elements.
while (true) {
XMLEvent ev = expectTag("ref", true);
XMLEvent ev = expectTag(INODE_REFERENCE_SECTION_REF, true);
if (ev.isEndElement()) {
break;
}
@ -1062,7 +1089,8 @@ class OfflineImageReconstructor {
FsImageProto.INodeReferenceSection.INodeReference.Builder bld =
FsImageProto.INodeReferenceSection.INodeReference.newBuilder();
loadNodeChildren(inodeRef, "INodeReference");
Long referredId = inodeRef.removeChildLong("referredId");
Long referredId =
inodeRef.removeChildLong(INODE_REFERENCE_SECTION_REFERRED_ID);
if (referredId != null) {
bld.setReferredId(referredId);
}
@ -1070,11 +1098,13 @@ class OfflineImageReconstructor {
if (name != null) {
bld.setName(ByteString.copyFrom(name, "UTF8"));
}
Integer dstSnapshotId = inodeRef.removeChildInt("dstSnapshotId");
Integer dstSnapshotId = inodeRef.removeChildInt(
INODE_REFERENCE_SECTION_DST_SNAPSHOT_ID);
if (dstSnapshotId != null) {
bld.setDstSnapshotId(dstSnapshotId);
}
Integer lastSnapshotId = inodeRef.removeChildInt("lastSnapshotId");
Integer lastSnapshotId = inodeRef.removeChildInt(
INODE_REFERENCE_SECTION_LAST_SNAPSHOT_ID);
if (lastSnapshotId != null) {
bld.setLastSnapshotId(lastSnapshotId);
}
@ -1093,7 +1123,7 @@ class OfflineImageReconstructor {
// No header for this section
// Process the repeated <directory> elements.
while (true) {
XMLEvent ev = expectTag("directory", true);
XMLEvent ev = expectTag(INODE_DIRECTORY_SECTION_DIRECTORY, true);
if (ev.isEndElement()) {
break;
}
@ -1101,19 +1131,22 @@ class OfflineImageReconstructor {
FsImageProto.INodeDirectorySection.DirEntry.Builder bld =
FsImageProto.INodeDirectorySection.DirEntry.newBuilder();
loadNodeChildren(directory, "directory");
Long parent = directory.removeChildLong("parent");
Long parent = directory.removeChildLong(
INODE_DIRECTORY_SECTION_PARENT);
if (parent != null) {
bld.setParent(parent);
}
while (true) {
Node child = directory.removeChild("child");
Node child = directory.removeChild(
INODE_DIRECTORY_SECTION_CHILD);
if (child == null) {
break;
}
bld.addChildren(Long.parseLong(child.getVal()));
}
while (true) {
Node refChild = directory.removeChild("refChild");
Node refChild = directory.removeChild(
INODE_DIRECTORY_SECTION_REF_CHILD);
if (refChild == null) {
break;
}
@ -1135,7 +1168,7 @@ class OfflineImageReconstructor {
// No header for this section type.
// Process the repeated files under construction elements.
while (true) {
XMLEvent ev = expectTag("inode", true);
XMLEvent ev = expectTag(INODE_SECTION_INODE, true);
if (ev.isEndElement()) {
break;
}
@ -1143,11 +1176,12 @@ class OfflineImageReconstructor {
loadNodeChildren(fileUnderConstruction, "file under construction");
FileUnderConstructionEntry.Builder bld =
FileUnderConstructionEntry.newBuilder();
Long id = fileUnderConstruction.removeChildLong("id");
Long id = fileUnderConstruction.removeChildLong(SECTION_ID);
if (id != null) {
bld.setInodeId(id);
}
String fullpath = fileUnderConstruction.removeChildStr("path");
String fullpath =
fileUnderConstruction.removeChildStr(SECTION_PATH);
if (fullpath != null) {
bld.setFullPath(fullpath);
}
@ -1167,24 +1201,26 @@ class OfflineImageReconstructor {
FsImageProto.SnapshotSection.newBuilder();
Node header = new Node();
loadNodeChildren(header, "SnapshotSection fields", "snapshot");
Integer snapshotCounter = header.removeChildInt("snapshotCounter");
Integer snapshotCounter = header.removeChildInt(
SNAPSHOT_SECTION_SNAPSHOT_COUNTER);
if (snapshotCounter == null) {
throw new IOException("No <snapshotCounter> entry found in " +
"SnapshotSection header");
}
bld.setSnapshotCounter(snapshotCounter);
Integer expectedNumSnapshots = header.removeChildInt("numSnapshots");
Integer expectedNumSnapshots = header.removeChildInt(
SNAPSHOT_SECTION_NUM_SNAPSHOTS);
if (expectedNumSnapshots == null) {
throw new IOException("No <numSnapshots> entry found in " +
"SnapshotSection header");
}
bld.setNumSnapshots(expectedNumSnapshots);
while (true) {
Node sd = header.removeChild("snapshottableDir");
Node sd = header.removeChild(SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR);
if (sd == null) {
break;
}
Long dir = sd.removeChildLong("dir");
Long dir = sd.removeChildLong(SNAPSHOT_SECTION_DIR);
sd.verifyNoRemainingKeys("<dir>");
bld.addSnapshottableDir(dir);
}
@ -1193,7 +1229,7 @@ class OfflineImageReconstructor {
int actualNumSnapshots = 0;
while (actualNumSnapshots < expectedNumSnapshots) {
try {
expectTag("snapshot", false);
expectTag(SNAPSHOT_SECTION_SNAPSHOT, false);
} catch (IOException e) {
throw new IOException("Only read " + actualNumSnapshots +
" <snapshot> entries out of " + expectedNumSnapshots, e);
@ -1203,17 +1239,17 @@ class OfflineImageReconstructor {
loadNodeChildren(snapshot, "snapshot fields");
FsImageProto.SnapshotSection.Snapshot.Builder s =
FsImageProto.SnapshotSection.Snapshot.newBuilder();
Integer snapshotId = snapshot.removeChildInt("id");
Integer snapshotId = snapshot.removeChildInt(SECTION_ID);
if (snapshotId == null) {
throw new IOException("<snapshot> section was missing <id>");
}
s.setSnapshotId(snapshotId);
Node snapshotRoot = snapshot.removeChild("root");
Node snapshotRoot = snapshot.removeChild(SNAPSHOT_SECTION_ROOT);
INodeSection.INode.Builder inodeBld = processINodeXml(snapshotRoot);
s.setRoot(inodeBld);
s.build().writeDelimitedTo(out);
}
expectTagEnd("SnapshotSection");
expectTagEnd(SNAPSHOT_SECTION_NAME);
recordSectionLength(SectionName.SNAPSHOT.name());
}
}
@ -1229,15 +1265,15 @@ class OfflineImageReconstructor {
XMLEvent ev = expectTag("[diff start tag]", true);
if (ev.isEndElement()) {
String name = ev.asEndElement().getName().getLocalPart();
if (name.equals("SnapshotDiffSection")) {
if (name.equals(SNAPSHOT_DIFF_SECTION_NAME)) {
break;
}
throw new IOException("Got unexpected end tag for " + name);
}
String tagName = ev.asStartElement().getName().getLocalPart();
if (tagName.equals("dirDiffEntry")) {
if (tagName.equals(SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY)) {
processDirDiffEntry();
} else if (tagName.equals("fileDiffEntry")) {
} else if (tagName.equals(SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY)) {
processFileDiffEntry();
} else {
throw new IOException("SnapshotDiffSection contained unexpected " +
@ -1253,12 +1289,14 @@ class OfflineImageReconstructor {
headerBld.setType(DiffEntry.Type.DIRECTORYDIFF);
Node dirDiffHeader = new Node();
loadNodeChildren(dirDiffHeader, "dirDiffEntry fields", "dirDiff");
Long inodeId = dirDiffHeader.removeChildLong("inodeId");
Long inodeId = dirDiffHeader.removeChildLong(
SNAPSHOT_DIFF_SECTION_INODE_ID);
if (inodeId == null) {
throw new IOException("<dirDiffEntry> contained no <inodeId> entry.");
}
headerBld.setInodeId(inodeId);
Integer expectedDiffs = dirDiffHeader.removeChildInt("count");
Integer expectedDiffs = dirDiffHeader.removeChildInt(
SNAPSHOT_DIFF_SECTION_COUNT);
if (expectedDiffs == null) {
throw new IOException("<dirDiffEntry> contained no <count> entry.");
}
@ -1267,7 +1305,7 @@ class OfflineImageReconstructor {
headerBld.build().writeDelimitedTo(out);
for (int actualDiffs = 0; actualDiffs < expectedDiffs; actualDiffs++) {
try {
expectTag("dirDiff", false);
expectTag(SNAPSHOT_DIFF_SECTION_DIR_DIFF, false);
} catch (IOException e) {
throw new IOException("Only read " + (actualDiffs + 1) +
" diffs out of " + expectedDiffs, e);
@ -1276,38 +1314,43 @@ class OfflineImageReconstructor {
loadNodeChildren(dirDiff, "dirDiff fields");
FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder bld =
FsImageProto.SnapshotDiffSection.DirectoryDiff.newBuilder();
Integer snapshotId = dirDiff.removeChildInt("snapshotId");
Integer snapshotId = dirDiff.removeChildInt(
SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID);
if (snapshotId != null) {
bld.setSnapshotId(snapshotId);
}
Integer childrenSize = dirDiff.removeChildInt("childrenSize");
Integer childrenSize = dirDiff.removeChildInt(
SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE);
if (childrenSize == null) {
throw new IOException("Expected to find <childrenSize> in " +
"<dirDiff> section.");
}
bld.setIsSnapshotRoot(dirDiff.removeChildBool("isSnapshotRoot"));
bld.setIsSnapshotRoot(dirDiff.removeChildBool(
SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT));
bld.setChildrenSize(childrenSize);
String name = dirDiff.removeChildStr("name");
String name = dirDiff.removeChildStr(SECTION_NAME);
if (name != null) {
bld.setName(ByteString.copyFrom(name, "UTF8"));
}
// TODO: add missing snapshotCopy field to XML
Integer expectedCreatedListSize =
dirDiff.removeChildInt("createdListSize");
Integer expectedCreatedListSize = dirDiff.removeChildInt(
SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE);
if (expectedCreatedListSize == null) {
throw new IOException("Expected to find <createdListSize> in " +
"<dirDiff> section.");
}
bld.setCreatedListSize(expectedCreatedListSize);
while (true) {
Node deleted = dirDiff.removeChild("deletedInode");
Node deleted = dirDiff.removeChild(
SNAPSHOT_DIFF_SECTION_DELETED_INODE);
if (deleted == null){
break;
}
bld.addDeletedINode(Long.parseLong(deleted.getVal()));
}
while (true) {
Node deleted = dirDiff.removeChild("deletedInoderef");
Node deleted = dirDiff.removeChild(
SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF);
if (deleted == null){
break;
}
@ -1317,11 +1360,12 @@ class OfflineImageReconstructor {
// After the DirectoryDiff header comes a list of CreatedListEntry PBs.
int actualCreatedListSize = 0;
while (true) {
Node created = dirDiff.removeChild("created");
Node created = dirDiff.removeChild(
SNAPSHOT_DIFF_SECTION_CREATED);
if (created == null){
break;
}
String cleName = created.removeChildStr("name");
String cleName = created.removeChildStr(SECTION_NAME);
if (cleName == null) {
throw new IOException("Expected <created> entry to have " +
"a <name> field");
@ -1339,7 +1383,7 @@ class OfflineImageReconstructor {
}
dirDiff.verifyNoRemainingKeys("dirDiff");
}
expectTagEnd("dirDiffEntry");
expectTagEnd(SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY);
}
private void processFileDiffEntry() throws IOException {
@ -1348,12 +1392,14 @@ class OfflineImageReconstructor {
headerBld.setType(DiffEntry.Type.FILEDIFF);
Node fileDiffHeader = new Node();
loadNodeChildren(fileDiffHeader, "fileDiffEntry fields", "fileDiff");
Long inodeId = fileDiffHeader.removeChildLong("inodeid");
Long inodeId = fileDiffHeader.removeChildLong(
SNAPSHOT_DIFF_SECTION_INODE_ID);
if (inodeId == null) {
throw new IOException("<fileDiffEntry> contained no <inodeid> entry.");
}
headerBld.setInodeId(inodeId);
Integer expectedDiffs = fileDiffHeader.removeChildInt("count");
Integer expectedDiffs = fileDiffHeader.removeChildInt(
SNAPSHOT_DIFF_SECTION_COUNT);
if (expectedDiffs == null) {
throw new IOException("<fileDiffEntry> contained no <count> entry.");
}
@ -1362,7 +1408,7 @@ class OfflineImageReconstructor {
headerBld.build().writeDelimitedTo(out);
for (int actualDiffs = 0; actualDiffs < expectedDiffs; actualDiffs++) {
try {
expectTag("fileDiff", false);
expectTag(SNAPSHOT_DIFF_SECTION_FILE_DIFF, false);
} catch (IOException e) {
throw new IOException("Only read " + (actualDiffs + 1) +
" diffs out of " + expectedDiffs, e);
@ -1371,15 +1417,17 @@ class OfflineImageReconstructor {
loadNodeChildren(fileDiff, "fileDiff fields");
FsImageProto.SnapshotDiffSection.FileDiff.Builder bld =
FsImageProto.SnapshotDiffSection.FileDiff.newBuilder();
Integer snapshotId = fileDiff.removeChildInt("snapshotId");
Integer snapshotId = fileDiff.removeChildInt(
SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID);
if (snapshotId != null) {
bld.setSnapshotId(snapshotId);
}
Long size = fileDiff.removeChildLong("size");
Long size = fileDiff.removeChildLong(
SNAPSHOT_DIFF_SECTION_SIZE);
if (size != null) {
bld.setFileSize(size);
}
String name = fileDiff.removeChildStr("name");
String name = fileDiff.removeChildStr(SECTION_NAME);
if (name != null) {
bld.setName(ByteString.copyFrom(name, "UTF8"));
}
@ -1388,7 +1436,7 @@ class OfflineImageReconstructor {
fileDiff.verifyNoRemainingKeys("fileDiff");
bld.build().writeDelimitedTo(out);
}
expectTagEnd("fileDiffEntry");
expectTagEnd(SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY);
}
}

View File

@ -77,6 +77,154 @@ import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_
*/
@InterfaceAudience.Private
public final class PBImageXmlWriter {
public static final String NAME_SECTION_NAME = "NameSection";
public static final String INODE_SECTION_NAME = "INodeSection";
public static final String SECRET_MANAGER_SECTION_NAME =
"SecretManagerSection";
public static final String CACHE_MANAGER_SECTION_NAME = "CacheManagerSection";
public static final String SNAPSHOT_DIFF_SECTION_NAME = "SnapshotDiffSection";
public static final String INODE_REFERENCE_SECTION_NAME =
"INodeReferenceSection";
public static final String INODE_DIRECTORY_SECTION_NAME =
"INodeDirectorySection";
public static final String FILE_UNDER_CONSTRUCTION_SECTION_NAME =
"FileUnderConstructionSection";
public static final String SNAPSHOT_SECTION_NAME = "SnapshotSection";
public static final String SECTION_ID = "id";
public static final String SECTION_REPLICATION = "replication";
public static final String SECTION_PATH = "path";
public static final String SECTION_NAME = "name";
public static final String NAME_SECTION_NAMESPACE_ID = "namespaceId";
public static final String NAME_SECTION_GENSTAMPV1 = "genstampV1";
public static final String NAME_SECTION_GENSTAMPV2 = "genstampV2";
public static final String NAME_SECTION_GENSTAMPV1_LIMIT = "genstampV1Limit";
public static final String NAME_SECTION_LAST_ALLOCATED_BLOCK_ID =
"lastAllocatedBlockId";
public static final String NAME_SECTION_TXID = "txid";
public static final String NAME_SECTION_ROLLING_UPGRADE_START_TIME =
"rollingUpgradeStartTime";
public static final String NAME_SECTION_LAST_ALLOCATED_STRIPED_BLOCK_ID =
"lastAllocatedStripedBlockId";
public static final String INODE_SECTION_LAST_INODE_ID = "lastInodeId";
public static final String INODE_SECTION_NUM_INODES = "numInodes";
public static final String INODE_SECTION_TYPE = "type";
public static final String INODE_SECTION_MTIME = "mtime";
public static final String INODE_SECTION_ATIME = "atime";
public static final String INODE_SECTION_PREFERRED_BLOCK_SIZE =
"preferredBlockSize";
public static final String INODE_SECTION_PERMISSION = "permission";
public static final String INODE_SECTION_BLOCKS = "blocks";
public static final String INODE_SECTION_BLOCK = "block";
public static final String INODE_SECTION_GEMSTAMP = "genstamp";
public static final String INODE_SECTION_NUM_BYTES = "numBytes";
public static final String INODE_SECTION_FILE_UNDER_CONSTRUCTION =
"file-under-construction";
public static final String INODE_SECTION_CLIENT_NAME = "clientName";
public static final String INODE_SECTION_CLIENT_MACHINE = "clientMachine";
public static final String INODE_SECTION_ACL = "acl";
public static final String INODE_SECTION_ACLS = "acls";
public static final String INODE_SECTION_XATTR = "xattr";
public static final String INODE_SECTION_XATTRS = "xattrs";
public static final String INODE_SECTION_STORAGE_POLICY_ID =
"storagePolicyId";
public static final String INODE_SECTION_IS_STRIPED = "isStriped";
public static final String INODE_SECTION_NS_QUOTA = "nsquota";
public static final String INODE_SECTION_DS_QUOTA = "dsquota";
public static final String INODE_SECTION_TYPE_QUOTA = "typeQuota";
public static final String INODE_SECTION_QUOTA = "quota";
public static final String INODE_SECTION_TARGET = "target";
public static final String INODE_SECTION_NS = "ns";
public static final String INODE_SECTION_VAL = "val";
public static final String INODE_SECTION_VAL_HEX = "valHex";
public static final String INODE_SECTION_INODE = "inode";
public static final String SECRET_MANAGER_SECTION_CURRENT_ID = "currentId";
public static final String SECRET_MANAGER_SECTION_TOKEN_SEQUENCE_NUMBER =
"tokenSequenceNumber";
public static final String SECRET_MANAGER_SECTION_NUM_DELEGATION_KEYS =
"numDelegationKeys";
public static final String SECRET_MANAGER_SECTION_NUM_TOKENS = "numTokens";
public static final String SECRET_MANAGER_SECTION_EXPIRY = "expiry";
public static final String SECRET_MANAGER_SECTION_KEY = "key";
public static final String SECRET_MANAGER_SECTION_DELEGATION_KEY =
"delegationKey";
public static final String SECRET_MANAGER_SECTION_VERSION = "version";
public static final String SECRET_MANAGER_SECTION_OWNER = "owner";
public static final String SECRET_MANAGER_SECTION_RENEWER = "renewer";
public static final String SECRET_MANAGER_SECTION_REAL_USER = "realUser";
public static final String SECRET_MANAGER_SECTION_ISSUE_DATE = "issueDate";
public static final String SECRET_MANAGER_SECTION_MAX_DATE = "maxDate";
public static final String SECRET_MANAGER_SECTION_SEQUENCE_NUMBER =
"sequenceNumber";
public static final String SECRET_MANAGER_SECTION_MASTER_KEY_ID =
"masterKeyId";
public static final String SECRET_MANAGER_SECTION_EXPIRY_DATE = "expiryDate";
public static final String SECRET_MANAGER_SECTION_TOKEN = "token";
public static final String CACHE_MANAGER_SECTION_NEXT_DIRECTIVE_ID =
"nextDirectiveId";
public static final String CACHE_MANAGER_SECTION_NUM_POOLS = "numPools";
public static final String CACHE_MANAGER_SECTION_NUM_DIRECTIVES =
"numDirectives";
public static final String CACHE_MANAGER_SECTION_POOL_NAME = "poolName";
public static final String CACHE_MANAGER_SECTION_OWNER_NAME = "ownerName";
public static final String CACHE_MANAGER_SECTION_GROUP_NAME = "groupName";
public static final String CACHE_MANAGER_SECTION_MODE = "mode";
public static final String CACHE_MANAGER_SECTION_LIMIT = "limit";
public static final String CACHE_MANAGER_SECTION_MAX_RELATIVE_EXPIRY =
"maxRelativeExpiry";
public static final String CACHE_MANAGER_SECTION_POOL = "pool";
public static final String CACHE_MANAGER_SECTION_EXPIRATION = "expiration";
public static final String CACHE_MANAGER_SECTION_MILLIS = "millis";
public static final String CACHE_MANAGER_SECTION_RELATIVE = "relative";
public static final String CACHE_MANAGER_SECTION_DIRECTIVE = "directive";
public static final String SNAPSHOT_DIFF_SECTION_INODE_ID = "inodeId";
public static final String SNAPSHOT_DIFF_SECTION_COUNT = "count";
public static final String SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID = "snapshotId";
public static final String SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE =
"childrenSize";
public static final String SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT =
"isSnapshotRoot";
public static final String SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE =
"createdListSize";
public static final String SNAPSHOT_DIFF_SECTION_DELETED_INODE =
"deletedInode";
public static final String SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF =
"deletedInoderef";
public static final String SNAPSHOT_DIFF_SECTION_CREATED = "created";
public static final String SNAPSHOT_DIFF_SECTION_SIZE = "size";
public static final String SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY =
"fileDiffEntry";
public static final String SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY =
"dirDiffEntry";
public static final String SNAPSHOT_DIFF_SECTION_FILE_DIFF = "fileDiff";
public static final String SNAPSHOT_DIFF_SECTION_DIR_DIFF = "dirDiff";
public static final String INODE_REFERENCE_SECTION_REFERRED_ID = "referredId";
public static final String INODE_REFERENCE_SECTION_DST_SNAPSHOT_ID =
"dstSnapshotId";
public static final String INODE_REFERENCE_SECTION_LAST_SNAPSHOT_ID =
"lastSnapshotId";
public static final String INODE_REFERENCE_SECTION_REF = "ref";
public static final String INODE_DIRECTORY_SECTION_PARENT = "parent";
public static final String INODE_DIRECTORY_SECTION_CHILD = "child";
public static final String INODE_DIRECTORY_SECTION_REF_CHILD = "refChild";
public static final String INODE_DIRECTORY_SECTION_DIRECTORY = "directory";
public static final String SNAPSHOT_SECTION_SNAPSHOT_COUNTER =
"snapshotCounter";
public static final String SNAPSHOT_SECTION_NUM_SNAPSHOTS = "numSnapshots";
public static final String SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR =
"snapshottableDir";
public static final String SNAPSHOT_SECTION_DIR = "dir";
public static final String SNAPSHOT_SECTION_ROOT = "root";
public static final String SNAPSHOT_SECTION_SNAPSHOT = "snapshot";
private final Configuration conf;
private final PrintStream out;
private final SimpleDateFormat isoDateFormat;
@ -177,98 +325,106 @@ public final class PBImageXmlWriter {
}
private void dumpCacheManagerSection(InputStream is) throws IOException {
out.print("<CacheManagerSection>");
out.print("<" + CACHE_MANAGER_SECTION_NAME + ">");
CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(is);
o("nextDirectiveId", s.getNextDirectiveId());
o("numDirectives", s.getNumDirectives());
o("numPools", s.getNumPools());
o(CACHE_MANAGER_SECTION_NEXT_DIRECTIVE_ID, s.getNextDirectiveId());
o(CACHE_MANAGER_SECTION_NUM_DIRECTIVES, s.getNumDirectives());
o(CACHE_MANAGER_SECTION_NUM_POOLS, s.getNumPools());
for (int i = 0; i < s.getNumPools(); ++i) {
CachePoolInfoProto p = CachePoolInfoProto.parseDelimitedFrom(is);
out.print("<pool>");
o("poolName", p.getPoolName()).o("ownerName", p.getOwnerName())
.o("groupName", p.getGroupName()).o("mode", p.getMode())
.o("limit", p.getLimit())
.o("maxRelativeExpiry", p.getMaxRelativeExpiry());
out.print("</pool>\n");
out.print("<" + CACHE_MANAGER_SECTION_POOL +">");
o(CACHE_MANAGER_SECTION_POOL_NAME, p.getPoolName()).
o(CACHE_MANAGER_SECTION_OWNER_NAME, p.getOwnerName())
.o(CACHE_MANAGER_SECTION_GROUP_NAME, p.getGroupName())
.o(CACHE_MANAGER_SECTION_MODE, p.getMode())
.o(CACHE_MANAGER_SECTION_LIMIT, p.getLimit())
.o(CACHE_MANAGER_SECTION_MAX_RELATIVE_EXPIRY,
p.getMaxRelativeExpiry());
out.print("</" + CACHE_MANAGER_SECTION_POOL + ">\n");
}
for (int i = 0; i < s.getNumDirectives(); ++i) {
CacheDirectiveInfoProto p = CacheDirectiveInfoProto
.parseDelimitedFrom(is);
out.print("<directive>");
o("id", p.getId()).o("path", p.getPath())
.o("replication", p.getReplication()).o("pool", p.getPool());
out.print("<expiration>");
out.print("<" + CACHE_MANAGER_SECTION_DIRECTIVE + ">");
o(SECTION_ID, p.getId()).o(SECTION_PATH, p.getPath())
.o(SECTION_REPLICATION, p.getReplication())
.o(CACHE_MANAGER_SECTION_POOL, p.getPool());
out.print("<" + CACHE_MANAGER_SECTION_EXPIRATION +">");
CacheDirectiveInfoExpirationProto e = p.getExpiration();
o("millis", e.getMillis()).o("relative", e.getIsRelative());
out.print("</expiration>\n");
out.print("</directive>\n");
o(CACHE_MANAGER_SECTION_MILLIS, e.getMillis())
.o(CACHE_MANAGER_SECTION_RELATIVE, e.getIsRelative());
out.print("</" + CACHE_MANAGER_SECTION_EXPIRATION+ ">\n");
out.print("</" + CACHE_MANAGER_SECTION_DIRECTIVE + ">\n");
}
out.print("</CacheManagerSection>\n");
out.print("</" + CACHE_MANAGER_SECTION_NAME + ">\n");
}
private void dumpFileUnderConstructionSection(InputStream in)
throws IOException {
out.print("<FileUnderConstructionSection>");
out.print("<" + FILE_UNDER_CONSTRUCTION_SECTION_NAME + ">");
while (true) {
FileUnderConstructionEntry e = FileUnderConstructionEntry
.parseDelimitedFrom(in);
if (e == null) {
break;
}
out.print("<inode>");
o("id", e.getInodeId()).o("path", e.getFullPath());
out.print("</inode>\n");
out.print("<" + INODE_SECTION_INODE + ">");
o(SECTION_ID, e.getInodeId())
.o(SECTION_PATH, e.getFullPath());
out.print("</" + INODE_SECTION_INODE + ">\n");
}
out.print("</FileUnderConstructionSection>\n");
out.print("</" + FILE_UNDER_CONSTRUCTION_SECTION_NAME + ">\n");
}
private void dumpXattrs(INodeSection.XAttrFeatureProto xattrs) {
out.print("<xattrs>");
out.print("<" + INODE_SECTION_XATTRS + ">");
for (INodeSection.XAttrCompactProto xattr : xattrs.getXAttrsList()) {
out.print("<xattr>");
out.print("<" + INODE_SECTION_XATTR + ">");
int encodedName = xattr.getName();
int ns = (XATTR_NAMESPACE_MASK & (encodedName >> XATTR_NAMESPACE_OFFSET)) |
((XATTR_NAMESPACE_EXT_MASK & (encodedName >> XATTR_NAMESPACE_EXT_OFFSET)) << 2);
o("ns", XAttrProtos.XAttrProto.
o(INODE_SECTION_NS, XAttrProtos.XAttrProto.
XAttrNamespaceProto.valueOf(ns).toString());
o("name", stringTable[XATTR_NAME_MASK & (encodedName >> XATTR_NAME_OFFSET)]);
o(SECTION_NAME,
stringTable[XATTR_NAME_MASK & (encodedName >> XATTR_NAME_OFFSET)]);
ByteString val = xattr.getValue();
if (val.isValidUtf8()) {
o("val", val.toStringUtf8());
o(INODE_SECTION_VAL, val.toStringUtf8());
} else {
o("valHex", Hex.encodeHexString(val.toByteArray()));
o(INODE_SECTION_VAL_HEX, Hex.encodeHexString(val.toByteArray()));
}
out.print("</xattr>");
out.print("</" + INODE_SECTION_XATTR + ">");
}
out.print("</xattrs>");
out.print("</" + INODE_SECTION_XATTRS + ">");
}
private void dumpINodeDirectory(INodeDirectory d) {
o("mtime", d.getModificationTime()).o("permission",
dumpPermission(d.getPermission()));
o(INODE_SECTION_MTIME, d.getModificationTime())
.o(INODE_SECTION_PERMISSION, dumpPermission(d.getPermission()));
if (d.hasXAttrs()) {
dumpXattrs(d.getXAttrs());
}
dumpAcls(d.getAcl());
if (d.hasDsQuota() && d.hasNsQuota()) {
o("nsquota", d.getNsQuota()).o("dsquota", d.getDsQuota());
o(INODE_SECTION_NS_QUOTA, d.getNsQuota())
.o(INODE_SECTION_DS_QUOTA, d.getDsQuota());
}
INodeSection.QuotaByStorageTypeFeatureProto typeQuotas =
d.getTypeQuotas();
if (typeQuotas != null) {
for (INodeSection.QuotaByStorageTypeEntryProto entry:
typeQuotas.getQuotasList()) {
out.print("<typeQuota>");
o("type", entry.getStorageType().toString());
o("quota", entry.getQuota());
out.print("</typeQuota>");
out.print("<" + INODE_SECTION_TYPE_QUOTA + ">");
o(INODE_SECTION_TYPE, entry.getStorageType().toString());
o(INODE_SECTION_QUOTA, entry.getQuota());
out.print("</" + INODE_SECTION_TYPE_QUOTA + ">");
}
}
}
private void dumpINodeDirectorySection(InputStream in) throws IOException {
out.print("<INodeDirectorySection>");
out.print("<" + INODE_DIRECTORY_SECTION_NAME + ">");
while (true) {
INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
.parseDelimitedFrom(in);
@ -276,21 +432,21 @@ public final class PBImageXmlWriter {
if (e == null) {
break;
}
out.print("<directory>");
o("parent", e.getParent());
out.print("<" + INODE_DIRECTORY_SECTION_DIRECTORY + ">");
o(INODE_DIRECTORY_SECTION_PARENT, e.getParent());
for (long id : e.getChildrenList()) {
o("child", id);
o(INODE_DIRECTORY_SECTION_CHILD, id);
}
for (int refId : e.getRefChildrenList()) {
o("refChild", refId);
o(INODE_DIRECTORY_SECTION_REF_CHILD, refId);
}
out.print("</directory>\n");
out.print("</" + INODE_DIRECTORY_SECTION_DIRECTORY + ">\n");
}
out.print("</INodeDirectorySection>\n");
out.print("</" + INODE_DIRECTORY_SECTION_NAME + ">\n");
}
private void dumpINodeReferenceSection(InputStream in) throws IOException {
out.print("<INodeReferenceSection>");
out.print("<" + INODE_REFERENCE_SECTION_NAME + ">");
while (true) {
INodeReferenceSection.INodeReference e = INodeReferenceSection
.INodeReference.parseDelimitedFrom(in);
@ -299,49 +455,53 @@ public final class PBImageXmlWriter {
}
dumpINodeReference(e);
}
out.print("</INodeReferenceSection>");
out.print("</" + INODE_REFERENCE_SECTION_NAME + ">");
}
private void dumpINodeReference(INodeReferenceSection.INodeReference r) {
out.print("<ref>");
o("referredId", r.getReferredId()).o("name", r.getName().toStringUtf8())
.o("dstSnapshotId", r.getDstSnapshotId())
.o("lastSnapshotId", r.getLastSnapshotId());
out.print("</ref>\n");
out.print("<" + INODE_REFERENCE_SECTION_REF + ">");
o(INODE_REFERENCE_SECTION_REFERRED_ID, r.getReferredId())
.o(SECTION_NAME, r.getName().toStringUtf8())
.o(INODE_REFERENCE_SECTION_DST_SNAPSHOT_ID, r.getDstSnapshotId())
.o(INODE_REFERENCE_SECTION_LAST_SNAPSHOT_ID,
r.getLastSnapshotId());
out.print("</" + INODE_REFERENCE_SECTION_REF + ">\n");
}
private void dumpINodeFile(INodeSection.INodeFile f) {
o("replication", f.getReplication()).o("mtime", f.getModificationTime())
.o("atime", f.getAccessTime())
.o("preferredBlockSize", f.getPreferredBlockSize())
.o("permission", dumpPermission(f.getPermission()));
o(SECTION_REPLICATION, f.getReplication())
.o(INODE_SECTION_MTIME, f.getModificationTime())
.o(INODE_SECTION_ATIME, f.getAccessTime())
.o(INODE_SECTION_PREFERRED_BLOCK_SIZE, f.getPreferredBlockSize())
.o(INODE_SECTION_PERMISSION, dumpPermission(f.getPermission()));
if (f.hasXAttrs()) {
dumpXattrs(f.getXAttrs());
}
dumpAcls(f.getAcl());
if (f.getBlocksCount() > 0) {
out.print("<blocks>");
out.print("<" + INODE_SECTION_BLOCKS + ">");
for (BlockProto b : f.getBlocksList()) {
out.print("<block>");
o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
b.getNumBytes());
out.print("</block>\n");
out.print("<" + INODE_SECTION_BLOCK + ">");
o(SECTION_ID, b.getBlockId())
.o(INODE_SECTION_GEMSTAMP, b.getGenStamp())
.o(INODE_SECTION_NUM_BYTES, b.getNumBytes());
out.print("</" + INODE_SECTION_BLOCK + ">\n");
}
out.print("</blocks>\n");
out.print("</" + INODE_SECTION_BLOCKS + ">\n");
}
if (f.hasStoragePolicyID()) {
o("storagePolicyId", f.getStoragePolicyID());
o(INODE_SECTION_STORAGE_POLICY_ID, f.getStoragePolicyID());
}
if (f.getIsStriped()) {
out.print("<isStriped/>");
out.print("<" + INODE_SECTION_IS_STRIPED + "/>");
}
if (f.hasFileUC()) {
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
out.print("<file-under-construction>");
o("clientName", u.getClientName()).o("clientMachine",
u.getClientMachine());
out.print("</file-under-construction>\n");
out.print("<" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">");
o(INODE_SECTION_CLIENT_NAME, u.getClientName())
.o(INODE_SECTION_CLIENT_MACHINE, u.getClientMachine());
out.print("</" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">\n");
}
}
@ -349,31 +509,31 @@ public final class PBImageXmlWriter {
ImmutableList<AclEntry> aclEntryList = FSImageFormatPBINode.Loader
.loadAclEntries(aclFeatureProto, stringTable);
if (aclEntryList.size() > 0) {
out.print("<acls>");
out.print("<" + INODE_SECTION_ACLS + ">");
for (AclEntry aclEntry : aclEntryList) {
o("acl", aclEntry.toString());
o(INODE_SECTION_ACL, aclEntry.toString());
}
out.print("</acls>");
out.print("</" + INODE_SECTION_ACLS + ">");
}
}
private void dumpINodeSection(InputStream in) throws IOException {
INodeSection s = INodeSection.parseDelimitedFrom(in);
out.print("<INodeSection>");
o("lastInodeId", s.getLastInodeId());
o("numInodes", s.getNumInodes());
out.print("<" + INODE_SECTION_NAME + ">");
o(INODE_SECTION_LAST_INODE_ID, s.getLastInodeId());
o(INODE_SECTION_NUM_INODES, s.getNumInodes());
for (int i = 0; i < s.getNumInodes(); ++i) {
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
out.print("<inode>");
out.print("<" + INODE_SECTION_INODE + ">");
dumpINodeFields(p);
out.print("</inode>\n");
out.print("</" + INODE_SECTION_INODE + ">\n");
}
out.print("</INodeSection>\n");
out.print("</" + INODE_SECTION_NAME + ">\n");
}
private void dumpINodeFields(INodeSection.INode p) {
o("id", p.getId()).o("type", p.getType()).o("name",
p.getName().toStringUtf8());
o(SECTION_ID, p.getId()).o(INODE_SECTION_TYPE, p.getType())
.o(SECTION_NAME, p.getName().toStringUtf8());
if (p.hasFile()) {
dumpINodeFile(p.getFile());
} else if (p.hasDirectory()) {
@ -384,20 +544,23 @@ public final class PBImageXmlWriter {
}
private void dumpINodeSymlink(INodeSymlink s) {
o("permission", dumpPermission(s.getPermission()))
.o("target", s.getTarget().toStringUtf8())
.o("mtime", s.getModificationTime()).o("atime", s.getAccessTime());
o(INODE_SECTION_PERMISSION, dumpPermission(s.getPermission()))
.o(INODE_SECTION_TARGET, s.getTarget().toStringUtf8())
.o(INODE_SECTION_MTIME, s.getModificationTime())
.o(INODE_SECTION_ATIME, s.getAccessTime());
}
private void dumpNameSection(InputStream in) throws IOException {
NameSystemSection s = NameSystemSection.parseDelimitedFrom(in);
out.print("<NameSection>");
o("namespaceId", s.getNamespaceId());
o("genstampV1", s.getGenstampV1()).o("genstampV2", s.getGenstampV2())
.o("genstampV1Limit", s.getGenstampV1Limit())
.o("lastAllocatedBlockId", s.getLastAllocatedBlockId())
.o("txid", s.getTransactionId());
out.print("</NameSection>\n");
out.print("<" + NAME_SECTION_NAME + ">");
o(NAME_SECTION_NAMESPACE_ID, s.getNamespaceId());
o(NAME_SECTION_GENSTAMPV1, s.getGenstampV1())
.o(NAME_SECTION_GENSTAMPV2, s.getGenstampV2())
.o(NAME_SECTION_GENSTAMPV1_LIMIT, s.getGenstampV1Limit())
.o(NAME_SECTION_LAST_ALLOCATED_BLOCK_ID,
s.getLastAllocatedBlockId())
.o(NAME_SECTION_TXID, s.getTransactionId());
out.print("</" + NAME_SECTION_NAME + ">\n");
}
private String dumpPermission(long permission) {
@ -408,59 +571,63 @@ public final class PBImageXmlWriter {
}
private void dumpSecretManagerSection(InputStream is) throws IOException {
out.print("<SecretManagerSection>");
out.print("<" + SECRET_MANAGER_SECTION_NAME + ">");
SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(is);
int expectedNumDelegationKeys = s.getNumKeys();
int expectedNumTokens = s.getNumTokens();
o("currentId", s.getCurrentId()).o("tokenSequenceNumber",
s.getTokenSequenceNumber()).
o("numDelegationKeys", expectedNumDelegationKeys).
o("numTokens", expectedNumTokens);
o(SECRET_MANAGER_SECTION_CURRENT_ID, s.getCurrentId())
.o(SECRET_MANAGER_SECTION_TOKEN_SEQUENCE_NUMBER,
s.getTokenSequenceNumber()).
o(SECRET_MANAGER_SECTION_NUM_DELEGATION_KEYS,
expectedNumDelegationKeys).
o(SECRET_MANAGER_SECTION_NUM_TOKENS, expectedNumTokens);
for (int i = 0; i < expectedNumDelegationKeys; i++) {
SecretManagerSection.DelegationKey dkey =
SecretManagerSection.DelegationKey.parseDelimitedFrom(is);
out.print("<delegationKey>");
o("id", dkey.getId());
o("key", Hex.encodeHexString(dkey.getKey().toByteArray()));
out.print("<" + SECRET_MANAGER_SECTION_DELEGATION_KEY + ">");
o(SECTION_ID, dkey.getId());
o(SECRET_MANAGER_SECTION_KEY,
Hex.encodeHexString(dkey.getKey().toByteArray()));
if (dkey.hasExpiryDate()) {
dumpDate("expiry", dkey.getExpiryDate());
dumpDate(SECRET_MANAGER_SECTION_EXPIRY, dkey.getExpiryDate());
}
out.print("</delegationKey>");
out.print("</" + SECRET_MANAGER_SECTION_DELEGATION_KEY + ">");
}
for (int i = 0; i < expectedNumTokens; i++) {
SecretManagerSection.PersistToken token =
SecretManagerSection.PersistToken.parseDelimitedFrom(is);
out.print("<token>");
out.print("<" + SECRET_MANAGER_SECTION_TOKEN + ">");
if (token.hasVersion()) {
o("version", token.getVersion());
o(SECRET_MANAGER_SECTION_VERSION, token.getVersion());
}
if (token.hasOwner()) {
o("owner", token.getOwner());
o(SECRET_MANAGER_SECTION_OWNER, token.getOwner());
}
if (token.hasRenewer()) {
o("renewer", token.getRenewer());
o(SECRET_MANAGER_SECTION_RENEWER, token.getRenewer());
}
if (token.hasRealUser()) {
o("realUser", token.getRealUser());
o(SECRET_MANAGER_SECTION_REAL_USER, token.getRealUser());
}
if (token.hasIssueDate()) {
dumpDate("issueDate", token.getIssueDate());
dumpDate(SECRET_MANAGER_SECTION_ISSUE_DATE, token.getIssueDate());
}
if (token.hasMaxDate()) {
dumpDate("maxDate", token.getMaxDate());
dumpDate(SECRET_MANAGER_SECTION_MAX_DATE, token.getMaxDate());
}
if (token.hasSequenceNumber()) {
o("sequenceNumber", token.getSequenceNumber());
o(SECRET_MANAGER_SECTION_SEQUENCE_NUMBER,
token.getSequenceNumber());
}
if (token.hasMasterKeyId()) {
o("masterKeyId", token.getMasterKeyId());
o(SECRET_MANAGER_SECTION_MASTER_KEY_ID, token.getMasterKeyId());
}
if (token.hasExpiryDate()) {
dumpDate("expiryDate", token.getExpiryDate());
dumpDate(SECRET_MANAGER_SECTION_EXPIRY_DATE, token.getExpiryDate());
}
out.print("</token>");
out.print("</" + SECRET_MANAGER_SECTION_TOKEN + ">");
}
out.print("</SecretManagerSection>");
out.print("</" + SECRET_MANAGER_SECTION_NAME + ">");
}
private void dumpDate(String tag, long date) {
@ -469,7 +636,7 @@ public final class PBImageXmlWriter {
}
private void dumpSnapshotDiffSection(InputStream in) throws IOException {
out.print("<SnapshotDiffSection>");
out.print("<" + SNAPSHOT_DIFF_SECTION_NAME + ">");
while (true) {
SnapshotDiffSection.DiffEntry e = SnapshotDiffSection.DiffEntry
.parseDelimitedFrom(in);
@ -478,52 +645,54 @@ public final class PBImageXmlWriter {
}
switch (e.getType()) {
case FILEDIFF:
out.print("<fileDiffEntry>");
out.print("<" + SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY + ">");
break;
case DIRECTORYDIFF:
out.print("<dirDiffEntry>");
out.print("<" + SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY + ">");
break;
default:
throw new IOException("unknown DiffEntry type " + e.getType());
}
o("inodeId", e.getInodeId());
o("count", e.getNumOfDiff());
o(SNAPSHOT_DIFF_SECTION_INODE_ID, e.getInodeId());
o(SNAPSHOT_DIFF_SECTION_COUNT, e.getNumOfDiff());
switch (e.getType()) {
case FILEDIFF: {
for (int i = 0; i < e.getNumOfDiff(); ++i) {
out.print("<fileDiff>");
out.print("<" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">");
SnapshotDiffSection.FileDiff f = SnapshotDiffSection.FileDiff
.parseDelimitedFrom(in);
o("snapshotId", f.getSnapshotId()).o("size", f.getFileSize()).o(
"name", f.getName().toStringUtf8());
out.print("</fileDiff>\n");
o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, f.getSnapshotId())
.o(SNAPSHOT_DIFF_SECTION_SIZE, f.getFileSize())
.o(SECTION_NAME, f.getName().toStringUtf8());
out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">\n");
}
}
break;
case DIRECTORYDIFF: {
for (int i = 0; i < e.getNumOfDiff(); ++i) {
out.print("<dirDiff>");
out.print("<" + SNAPSHOT_DIFF_SECTION_DIR_DIFF + ">");
SnapshotDiffSection.DirectoryDiff d = SnapshotDiffSection.DirectoryDiff
.parseDelimitedFrom(in);
o("snapshotId", d.getSnapshotId())
.o("childrenSize", d.getChildrenSize())
.o("isSnapshotRoot", d.getIsSnapshotRoot())
.o("name", d.getName().toStringUtf8())
.o("createdListSize", d.getCreatedListSize());
o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, d.getSnapshotId())
.o(SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE, d.getChildrenSize())
.o(SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT, d.getIsSnapshotRoot())
.o(SECTION_NAME, d.getName().toStringUtf8())
.o(SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE,
d.getCreatedListSize());
for (long did : d.getDeletedINodeList()) {
o("deletedInode", did);
o(SNAPSHOT_DIFF_SECTION_DELETED_INODE, did);
}
for (int dRefid : d.getDeletedINodeRefList()) {
o("deletedInoderef", dRefid);
o(SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF, dRefid);
}
for (int j = 0; j < d.getCreatedListSize(); ++j) {
SnapshotDiffSection.CreatedListEntry ce = SnapshotDiffSection.CreatedListEntry
.parseDelimitedFrom(in);
out.print("<created>");
o("name", ce.getName().toStringUtf8());
out.print("</created>\n");
out.print("<" + SNAPSHOT_DIFF_SECTION_CREATED + ">");
o(SECTION_NAME, ce.getName().toStringUtf8());
out.print("</" + SNAPSHOT_DIFF_SECTION_CREATED + ">\n");
}
out.print("</dirDiff>\n");
out.print("</" + SNAPSHOT_DIFF_SECTION_DIR_DIFF + ">\n");
}
break;
}
@ -532,41 +701,41 @@ public final class PBImageXmlWriter {
}
switch (e.getType()) {
case FILEDIFF:
out.print("</fileDiffEntry>");
out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY + ">");
break;
case DIRECTORYDIFF:
out.print("</dirDiffEntry>");
out.print("</" + SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY + ">");
break;
default:
throw new IOException("unknown DiffEntry type " + e.getType());
}
}
out.print("</SnapshotDiffSection>\n");
out.print("</" + SNAPSHOT_DIFF_SECTION_NAME + ">\n");
}
private void dumpSnapshotSection(InputStream in) throws IOException {
out.print("<SnapshotSection>");
out.print("<" + SNAPSHOT_SECTION_NAME + ">");
SnapshotSection s = SnapshotSection.parseDelimitedFrom(in);
o("snapshotCounter", s.getSnapshotCounter());
o("numSnapshots", s.getNumSnapshots());
o(SNAPSHOT_SECTION_SNAPSHOT_COUNTER, s.getSnapshotCounter());
o(SNAPSHOT_SECTION_NUM_SNAPSHOTS, s.getNumSnapshots());
if (s.getSnapshottableDirCount() > 0) {
out.print("<snapshottableDir>");
out.print("<" + SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR + ">");
for (long id : s.getSnapshottableDirList()) {
o("dir", id);
o(SNAPSHOT_SECTION_DIR, id);
}
out.print("</snapshottableDir>\n");
out.print("</" + SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR + ">\n");
}
for (int i = 0; i < s.getNumSnapshots(); ++i) {
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
.parseDelimitedFrom(in);
out.print("<snapshot>");
o("id", pbs.getSnapshotId());
out.print("<root>");
out.print("<" + SNAPSHOT_SECTION_SNAPSHOT + ">");
o(SECTION_ID, pbs.getSnapshotId());
out.print("<" + SNAPSHOT_SECTION_ROOT + ">");
dumpINodeFields(pbs.getRoot());
out.print("</root>");
out.print("</snapshot>");
out.print("</" + SNAPSHOT_SECTION_ROOT + ">");
out.print("</" + SNAPSHOT_SECTION_SNAPSHOT + ">");
}
out.print("</SnapshotSection>\n");
out.print("</" + SNAPSHOT_SECTION_NAME + ">\n");
}
private void loadStringTable(InputStream in) throws IOException {

View File

@ -2606,6 +2606,24 @@
</description>
</property>
<property>
<name>dfs.namenode.edekcacheloader.interval.ms</name>
<value>1000</value>
<description>When KeyProvider is configured, the interval time of warming
up edek cache on NN starts up / becomes active. All edeks will be loaded
from KMS into provider cache. The edek cache loader will try to warm up the
cache until succeed or NN leaves active state.
</description>
</property>
<property>
<name>dfs.namenode.edekcacheloader.initial.delay.ms</name>
<value>3000</value>
<description>When KeyProvider is configured, the time delayed until the first
attempt to warm up edek cache on NN start up / become active.
</description>
</property>
<property>
<name>dfs.namenode.inotify.max.events.per.rpc</name>
<value>1000</value>
@ -3004,4 +3022,18 @@
retries or failovers for WebHDFS client.
</description>
</property>
<property>
<name>dfs.namenode.hosts.provider.classname</name>
<value>org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager</value>
<description>
The class that provides access for host files.
org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager is used
by default which loads files specified by dfs.hosts and dfs.hosts.exclude.
If org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager is
used, it will load the JSON file defined in dfs.hosts.
To change class name, nn restart is required. "dfsadmin -refreshNodes" only
refreshes the configuration files used by the class.
</description>
</property>
</configuration>

View File

@ -106,7 +106,8 @@
</div>
{/if}
{@if cond="{NumberOfMissingBlocks} > 0"}
{@eq key=nnstat.State value="active"}
{@if cond="{NumberOfMissingBlocks} > 0"}
<div class="alert alert-dismissable alert-warning">
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">&times;</button>
@ -119,7 +120,8 @@
</div>
<p>Please check the logs or run fsck in order to identify the missing blocks.{@if cond="{NumberOfSnapshottableDirs} > 0"} Please run fsck with -includeSnapshots in order to see detailed reports about snapshots.{/if} See the Hadoop FAQ for common causes and potential solutions.</p>
</div>
{/if}
{/if}
{/eq}
{/nn}
<div class="page-header"><h1>Overview {#nnstat}<small>'{HostAndPort}' ({State})</small>{/nnstat}</h1></div>
@ -173,10 +175,13 @@
<tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
<tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
<tr><th><a href="#tab-datanode-volume-failures">Total Datanode Volume Failures</a></th><td>{VolumeFailuresTotal} ({EstimatedCapacityLostTotal|fmt_bytes})</td></tr>
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
<tr><th>Number of Blocks Pending Deletion</th><td>{PendingDeletionBlocks}</td></tr>
{@eq key=nnstat.State value="active"}
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
<tr><th>Number of Blocks Pending Deletion</th><td>{PendingDeletionBlocks}</td></tr>
{/eq}
<tr><th>Block Deletion Start Time</th><td>{BlockDeletionStartTime|date_tostring}</td></tr>
{/fs}
</table>
<div class="page-header"><h1>NameNode Journal Status</h1></div>

View File

@ -142,12 +142,16 @@ The `bin/hdfs dfsadmin` command supports a few HDFS administration related opera
during last upgrade.
* `-refreshNodes`: Updates the namenode with the set of datanodes
allowed to connect to the namenode. Namenodes re-read datanode
allowed to connect to the namenode. By default, Namenodes re-read datanode
hostnames in the file defined by `dfs.hosts`, `dfs.hosts.exclude`
Hosts defined in `dfs.hosts` are the datanodes that are part of the
cluster. If there are entries in `dfs.hosts`, only the hosts in it
are allowed to register with the namenode. Entries in
`dfs.hosts.exclude` are datanodes that need to be decommissioned.
Alternatively if `dfs.namenode.hosts.provider.classname` is set to
`org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager`,
all include and exclude hosts are specified in the JSON file defined by
`dfs.hosts`.
Datanodes complete decommissioning when all the replicas from them
are replicated to other datanodes. Decommissioned nodes are not
automatically shutdown and are not chosen for writing for new

View File

@ -65,7 +65,6 @@ WebHDFS REST API
* [Rename Snapshot](#Rename_Snapshot)
* [Delegation Token Operations](#Delegation_Token_Operations)
* [Get Delegation Token](#Get_Delegation_Token)
* [Get Delegation Tokens](#Get_Delegation_Tokens)
* [Renew Delegation Token](#Renew_Delegation_Token)
* [Cancel Delegation Token](#Cancel_Delegation_Token)
* [Error Responses](#Error_Responses)
@ -89,7 +88,6 @@ WebHDFS REST API
* [RemoteException JSON Schema](#RemoteException_JSON_Schema)
* [Token JSON Schema](#Token_JSON_Schema)
* [Token Properties](#Token_Properties)
* [Tokens JSON Schema](#Tokens_JSON_Schema)
* [HTTP Query Parameter Dictionary](#HTTP_Query_Parameter_Dictionary)
* [ACL Spec](#ACL_Spec)
* [XAttr Name](#XAttr_Name)
@ -148,7 +146,6 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum)
* [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory)
* [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken)
* [`GETDELEGATIONTOKENS`](#Get_Delegation_Tokens) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationTokens)
* [`GETXATTRS`](#Get_an_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttr)
* [`GETXATTRS`](#Get_multiple_XAttrs) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs)
* [`GETXATTRS`](#Get_all_XAttrs) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs)
@ -1033,32 +1030,6 @@ Delegation Token Operations
See also: [`renewer`](#Renewer), [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken, [`kind`](#Token_Kind), [`service`](#Token_Service)
### Get Delegation Tokens
* Submit a HTTP GET request.
curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKENS&renewer=<USER>"
The client receives a response with a [`Tokens` JSON object](#Tokens_JSON_Schema):
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{
"Tokens":
{
"Token":
[
{
"urlString":"KAAKSm9i ..."
}
]
}
}
See also: [`renewer`](#Renewer), [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationTokens
### Renew Delegation Token
* Submit a HTTP PUT request.
@ -1655,7 +1626,7 @@ See also: [`Token` Properties](#Token_Properties), [`GETDELEGATIONTOKEN`](#Get_D
#### Token Properties
JavaScript syntax is used to define `tokenProperties` so that it can be referred in both `Token` and `Tokens` JSON schemas.
JavaScript syntax is used to define `tokenProperties` so that it can be referred in `Token` JSON schema.
```json
var tokenProperties =
@ -1673,33 +1644,7 @@ var tokenProperties =
}
```
### Tokens JSON Schema
A `Tokens` JSON object represents an array of `Token` JSON objects.
```json
{
"name" : "Tokens",
"properties":
{
"Tokens":
{
"type" : "object",
"properties":
{
"Token":
{
"description": "An array of Token",
"type" : "array",
"items" : "Token": tokenProperties //See Token Properties
}
}
}
}
}
```
See also: [`Token` Properties](#Token_Properties), [`GETDELEGATIONTOKENS`](#Get_Delegation_Tokens), the note in [Delegation](#Delegation).
See also: [`Token` Properties](#Token_Properties), the note in [Delegation](#Delegation).
HTTP Query Parameter Dictionary
-------------------------------
@ -2013,7 +1958,7 @@ See also: [`RENAME`](#Rename_a_FileDirectory)
| Valid Values | Any valid username. |
| Syntax | Any string. |
See also: [`GETDELEGATIONTOKEN`](#Get_Delegation_Token), [`GETDELEGATIONTOKENS`](#Get_Delegation_Tokens)
See also: [`GETDELEGATIONTOKEN`](#Get_Delegation_Token)
### Replication

View File

@ -358,7 +358,7 @@ public class TestEnhancedByteBufferAccess {
fsIn.close();
fsIn = fs.open(TEST_PATH);
final ShortCircuitCache cache = ClientContext.get(
CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
CONTEXT, conf).getShortCircuitCache();
cache.accept(new CountingVisitor(0, 5, 5, 0));
results[0] = fsIn.read(null, BLOCK_SIZE,
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
@ -661,7 +661,7 @@ public class TestEnhancedByteBufferAccess {
final ExtendedBlock firstBlock =
DFSTestUtil.getFirstBlock(fs, TEST_PATH);
final ShortCircuitCache cache = ClientContext.get(
CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
CONTEXT, conf).getShortCircuitCache();
waitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
// Uncache the replica
fs.removeCacheDirective(directiveId);

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
import org.apache.hadoop.util.Time;
@ -736,7 +737,8 @@ public class TestBlockReaderLocal {
byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH];
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster = new MiniDFSCluster.Builder(conf).
hosts(new String[] {NetUtils.getLocalHostname()}).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, TEST_PATH,

View File

@ -99,8 +99,6 @@ public class TestConnCache {
DFSClient client = new DFSClient(
new InetSocketAddress("localhost",
util.getCluster().getNameNodePort()), util.getConf());
ClientContext cacheContext =
ClientContext.get(contextName, client.getConf());
DFSInputStream in = client.open(testFile.toString());
LOG.info("opened " + testFile.toString());
byte[] dataBuf = new byte[BLOCK_SIZE];

View File

@ -29,11 +29,16 @@ import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.junit.Test;
/**
@ -43,7 +48,57 @@ public class TestDatanodeReport {
static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
final static private Configuration conf = new HdfsConfiguration();
final static private int NUM_OF_DATANODES = 4;
/**
* This test verifies upgrade domain is set according to the JSON host file.
*/
@Test
public void testDatanodeReportWithUpgradeDomain() throws Exception {
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
CombinedHostFileManager.class, HostConfigManager.class);
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/datanodeReport");
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
final DFSClient client = cluster.getFileSystem().dfs;
final String ud1 = "ud1";
final String ud2 = "ud2";
try {
//wait until the cluster is up
cluster.waitActive();
DatanodeAdminProperties datanode = new DatanodeAdminProperties();
datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
datanode.setUpgradeDomain(ud1);
hostsFileWriter.initIncludeHosts(
new DatanodeAdminProperties[]{datanode});
client.refreshNodes();
DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), ud1);
datanode.setUpgradeDomain(null);
hostsFileWriter.initIncludeHosts(
new DatanodeAdminProperties[]{datanode});
client.refreshNodes();
all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), null);
datanode.setUpgradeDomain(ud2);
hostsFileWriter.initIncludeHosts(
new DatanodeAdminProperties[]{datanode});
client.refreshNodes();
all = client.datanodeReport(DatanodeReportType.ALL);
assertEquals(all[0].getUpgradeDomain(), ud2);
} finally {
cluster.shutdown();
}
}
/**
* This test attempts to different types of datanode report.
*/

View File

@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.DataChecksum;
@ -651,6 +652,67 @@ public class TestDistributedFileSystem {
assertEquals(largeReadOps, DFSTestUtil.getStatistics(fs).getLargeReadOps());
}
/** Checks read statistics. */
private void checkReadStatistics(FileSystem fs, int distance, long expectedReadBytes) {
long bytesRead = DFSTestUtil.getStatistics(fs).
getBytesReadByDistance(distance);
assertEquals(expectedReadBytes, bytesRead);
}
@Test
public void testLocalHostReadStatistics() throws Exception {
testReadFileSystemStatistics(0);
}
@Test
public void testLocalRackReadStatistics() throws Exception {
testReadFileSystemStatistics(2);
}
@Test
public void testRemoteRackOfFirstDegreeReadStatistics() throws Exception {
testReadFileSystemStatistics(4);
}
/** expectedDistance is the expected distance between client and dn.
* 0 means local host.
* 2 means same rack.
* 4 means remote rack of first degree.
*/
private void testReadFileSystemStatistics(int expectedDistance)
throws IOException {
MiniDFSCluster cluster = null;
final Configuration conf = getTestConfiguration();
// create a cluster with a dn with the expected distance.
if (expectedDistance == 0) {
cluster = new MiniDFSCluster.Builder(conf).
hosts(new String[] {NetUtils.getLocalHostname()}).build();
} else if (expectedDistance == 2) {
cluster = new MiniDFSCluster.Builder(conf).
hosts(new String[] {"hostFoo"}).build();
} else if (expectedDistance == 4) {
cluster = new MiniDFSCluster.Builder(conf).
racks(new String[] {"/rackFoo"}).build();
}
// create a file, read the file and verify the metrics
try {
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.getStatistics(fs).reset();
Path dir = new Path("/test");
Path file = new Path(dir, "file");
String input = "hello world";
DFSTestUtil.writeFile(fs, file, input);
FSDataInputStream stm = fs.open(file);
byte[] actual = new byte[4096];
stm.read(actual);
checkReadStatistics(fs, expectedDistance, input.length());
} finally {
if (cluster != null) cluster.shutdown();
}
}
@Test
public void testFileChecksum() throws Exception {
final long seed = RAN.nextLong();

View File

@ -117,10 +117,10 @@ import javax.xml.parsers.SAXParserFactory;
public class TestEncryptionZones {
private Configuration conf;
protected Configuration conf;
private FileSystemTestHelper fsHelper;
private MiniDFSCluster cluster;
protected MiniDFSCluster cluster;
protected HdfsAdmin dfsAdmin;
protected DistributedFileSystem fs;
private File testRootDir;

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
import static org.junit.Assert.assertTrue;
import com.google.common.base.Supplier;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
import org.apache.hadoop.security.Credentials;
@ -26,10 +27,12 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.io.File;
import java.util.Arrays;
@ -71,8 +74,10 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
final Path zonePath = new Path("/TestEncryptionZone");
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
assertTrue(((KMSClientProvider)fs.getClient().getKeyProvider()).
getEncKeyQueueSize(TEST_KEY) > 0);
@SuppressWarnings("unchecked")
KMSClientProvider kcp = (KMSClientProvider) Whitebox
.getInternalState(cluster.getNamesystem().getProvider(), "extension");
assertTrue(kcp.getEncKeyQueueSize(TEST_KEY) > 0);
}
@Test(timeout = 120000)
@ -92,4 +97,31 @@ public class TestEncryptionZonesWithKMS extends TestEncryptionZones {
Assert.assertEquals(0, tokens.length);
Assert.assertEquals(2, creds.numberOfTokens());
}
@Test(timeout = 120000)
public void testWarmupEDEKCacheOnStartup() throws Exception {
final Path zonePath = new Path("/TestEncryptionZone");
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
@SuppressWarnings("unchecked")
KMSClientProvider spy = (KMSClientProvider) Whitebox
.getInternalState(cluster.getNamesystem().getProvider(), "extension");
assertTrue("key queue is empty after creating encryption zone",
spy.getEncKeyQueueSize(TEST_KEY) > 0);
conf.setInt(
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY, 0);
cluster.restartNameNode(true);
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final KMSClientProvider kspy = (KMSClientProvider) Whitebox
.getInternalState(cluster.getNamesystem().getProvider(),
"extension");
return kspy.getEncKeyQueueSize(TEST_KEY) > 0;
}
}, 1000, 60000);
}
}

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.junit.Assert;
import org.junit.Test;
@ -246,6 +247,11 @@ public class TestExternalBlockReader {
return true;
}
@Override
public int getNetworkDistance() {
return 0;
}
synchronized String getError() {
return error;
}
@ -271,7 +277,7 @@ public class TestExternalBlockReader {
String uuid = UUID.randomUUID().toString();
conf.set(SYNTHETIC_BLOCK_READER_TEST_UUID_KEY, uuid);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1)
.hosts(new String[] {NetUtils.getLocalHostname()})
.build();
final int TEST_LENGTH = 2047;
DistributedFileSystem dfs = cluster.getFileSystem();

View File

@ -0,0 +1,247 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
/**
* This test serves a prototype to demo the idea proposed so far. It creates two
* files using the same data, one is in replica mode, the other is in stripped
* layout. For simple, it assumes 6 data blocks in both files and the block size
* are the same.
*/
public class TestFileChecksum {
public static final Log LOG = LogFactory.getLog(TestFileChecksum.class);
private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
private int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private Configuration conf;
private DFSClient client;
private int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
private int stripesPerBlock = 6;
private int blockSize = cellSize * stripesPerBlock;
private int numBlockGroups = 10;
private int stripSize = cellSize * dataBlocks;
private int blockGroupSize = stripesPerBlock * stripSize;
private int fileSize = numBlockGroups * blockGroupSize;
private String ecDir = "/striped";
private String stripedFile1 = ecDir + "/stripedFileChecksum1";
private String stripedFile2 = ecDir + "/stripedFileChecksum2";
private String replicatedFile = "/replicatedFileChecksum";
@Before
public void setup() throws IOException {
int numDNs = dataBlocks + parityBlocks + 2;
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
false);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
Path ecPath = new Path(ecDir);
cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir, null);
fs = cluster.getFileSystem();
client = fs.getClient();
prepareTestFiles();
getDataNodeToKill(stripedFile1);
getDataNodeToKill(replicatedFile);
}
@After
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@Test
public void testStripedFileChecksum1() throws Exception {
int length = 0;
testStripedFileChecksum(length, length + 10);
}
@Test
public void testStripedFileChecksum2() throws Exception {
int length = stripSize - 1;
testStripedFileChecksum(length, length - 10);
}
@Test
public void testStripedFileChecksum3() throws Exception {
int length = stripSize;
testStripedFileChecksum(length, length - 10);
}
@Test
public void testStripedFileChecksum4() throws Exception {
int length = stripSize + cellSize * 2;
testStripedFileChecksum(length, length - 10);
}
@Test
public void testStripedFileChecksum5() throws Exception {
int length = blockGroupSize;
testStripedFileChecksum(length, length - 10);
}
@Test
public void testStripedFileChecksum6() throws Exception {
int length = blockGroupSize + blockSize;
testStripedFileChecksum(length, length - 10);
}
@Test
public void testStripedFileChecksum7() throws Exception {
int length = -1; // whole file
testStripedFileChecksum(length, fileSize);
}
void testStripedFileChecksum(int range1, int range2) throws Exception {
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1,
range1, false);
FileChecksum stripedFileChecksum2 = getFileChecksum(stripedFile2,
range1, false);
FileChecksum stripedFileChecksum3 = getFileChecksum(stripedFile2,
range2, false);
LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
LOG.info("stripedFileChecksum2:" + stripedFileChecksum2);
LOG.info("stripedFileChecksum3:" + stripedFileChecksum3);
Assert.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
if (range1 >=0 && range1 != range2) {
Assert.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
}
}
@Test
public void testStripedAndReplicatedFileChecksum() throws Exception {
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1,
10, false);
FileChecksum replicatedFileChecksum = getFileChecksum(replicatedFile,
10, false);
Assert.assertFalse(stripedFileChecksum1.equals(replicatedFileChecksum));
}
/*
// TODO: allow datanode failure, HDFS-9833
@Test
public void testStripedAndReplicatedWithFailure() throws Exception {
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1,
10, true);
FileChecksum replicatedFileChecksum = getFileChecksum(replicatedFile,
10, true);
Assert.assertFalse(stripedFileChecksum1.equals(replicatedFileChecksum));
}*/
private FileChecksum getFileChecksum(String filePath, int range,
boolean killDn) throws Exception {
int dnIdxToDie = -1;
if (killDn) {
dnIdxToDie = getDataNodeToKill(filePath);
DataNode dnToDie = cluster.getDataNodes().get(dnIdxToDie);
shutdownDataNode(dnToDie);
}
Path testPath = new Path(filePath);
FileChecksum fc;
if (range >= 0) {
fc = fs.getFileChecksum(testPath, range);
} else {
fc = fs.getFileChecksum(testPath);
}
if (dnIdxToDie != -1) {
cluster.restartDataNode(dnIdxToDie, true);
}
return fc;
}
void prepareTestFiles() throws IOException {
byte[] fileData = StripedFileTestUtil.generateBytes(fileSize);
String[] filePaths = new String[] {
stripedFile1, stripedFile2, replicatedFile
};
for (String filePath : filePaths) {
Path testPath = new Path(filePath);
DFSTestUtil.writeFile(fs, testPath, fileData);
}
}
void shutdownDataNode(DataNode dataNode) throws IOException {
/*
* Kill the datanode which contains one replica
* We need to make sure it dead in namenode: clear its update time and
* trigger NN to check heartbeat.
*/
dataNode.shutdown();
cluster.setDataNodeDead(dataNode.getDatanodeId());
}
/**
* Determine the datanode that hosts the first block of the file. For simple
* this just returns the first datanode as it's firstly tried.
*/
int getDataNodeToKill(String filePath) throws IOException {
LocatedBlocks locatedBlocks = client.getLocatedBlocks(filePath, 0);
LocatedBlock locatedBlock = locatedBlocks.get(0);
DatanodeInfo[] datanodes = locatedBlock.getLocations();
DatanodeInfo chosenDn = datanodes[0];
int idx = 0;
for (DataNode dn : cluster.getDataNodes()) {
if (dn.getInfoPort() == chosenDn.getInfoPort()) {
return idx;
}
idx++;
}
return -1;
}
}

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertArrayEquals;
import java.util.ArrayList;
@ -39,6 +38,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Test;
@ -385,17 +385,8 @@ public class TestBlocksWithNotEnoughRacks {
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
Path dir = new Path(workingDir, "temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
@ -416,7 +407,7 @@ public class TestBlocksWithNotEnoughRacks {
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
@ -424,6 +415,7 @@ public class TestBlocksWithNotEnoughRacks {
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
hostsFileWriter.cleanup();
}
}
@ -438,17 +430,8 @@ public class TestBlocksWithNotEnoughRacks {
short REPLICATION_FACTOR = 5;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
Path dir = new Path(workingDir, "temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// All hosts are on two racks, only one host on /rack2
String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
@ -474,7 +457,7 @@ public class TestBlocksWithNotEnoughRacks {
for (String top : locs[0].getTopologyPaths()) {
if (!top.startsWith("/rack2")) {
String name = top.substring("/rack1".length()+1);
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
break;
@ -486,6 +469,7 @@ public class TestBlocksWithNotEnoughRacks {
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
} finally {
cluster.shutdown();
hostsFileWriter.cleanup();
}
}
}

View File

@ -383,9 +383,9 @@ public class TestDatanodeManager {
DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
HostFileManager hm = new HostFileManager();
HostFileManager.HostSet noNodes = new HostFileManager.HostSet();
HostFileManager.HostSet oneNode = new HostFileManager.HostSet();
HostFileManager.HostSet twoNodes = new HostFileManager.HostSet();
HostSet noNodes = new HostSet();
HostSet oneNode = new HostSet();
HostSet twoNodes = new HostSet();
DatanodeRegistration dr1 = new DatanodeRegistration(
new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123",
12345, 12345, 12345, 12345),
@ -402,7 +402,7 @@ public class TestDatanodeManager {
oneNode.add(entry("127.0.0.1:23456"));
hm.refresh(twoNodes, noNodes);
Whitebox.setInternalState(dm, "hostFileManager", hm);
Whitebox.setInternalState(dm, "hostConfigManager", hm);
// Register two data nodes to simulate them coming up.
// We need to add two nodes, because if we have only one node, removing it

View File

@ -40,7 +40,7 @@ public class TestHostFileManager {
@Test
public void testDeduplication() {
HostFileManager.HostSet s = new HostFileManager.HostSet();
HostSet s = new HostSet();
// These entries will be de-duped, since they refer to the same IP
// address + port combo.
s.add(entry("127.0.0.1:12345"));
@ -60,7 +60,7 @@ public class TestHostFileManager {
@Test
public void testRelation() {
HostFileManager.HostSet s = new HostFileManager.HostSet();
HostSet s = new HostSet();
s.add(entry("127.0.0.1:123"));
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
Assert.assertFalse(s.match(entry("127.0.0.1:12")));
@ -105,8 +105,8 @@ public class TestHostFileManager {
FSNamesystem fsn = mock(FSNamesystem.class);
Configuration conf = new Configuration();
HostFileManager hm = new HostFileManager();
HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();
HostSet includedNodes = new HostSet();
HostSet excludedNodes = new HostSet();
includedNodes.add(entry("127.0.0.1:12345"));
includedNodes.add(entry("localhost:12345"));
@ -122,7 +122,7 @@ public class TestHostFileManager {
hm.refresh(includedNodes, excludedNodes);
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
Whitebox.setInternalState(dm, "hostFileManager", hm);
Whitebox.setInternalState(dm, "hostConfigManager", hm);
Map<String, DatanodeDescriptor> dnMap = (Map<String,
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");

View File

@ -20,11 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.lang.management.ManagementFactory;
import java.io.File;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
@ -34,7 +33,13 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import javax.management.MBeanServer;
import javax.management.ObjectName;
@ -43,9 +48,21 @@ import javax.management.ObjectName;
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
*
*/
@RunWith(Parameterized.class)
public class TestHostsFiles {
private static final Log LOG =
LogFactory.getLog(TestHostsFiles.class.getName());
private Class hostFileMgrClass;
public TestHostsFiles(Class hostFileMgrClass) {
this.hostFileMgrClass = hostFileMgrClass;
}
@Parameterized.Parameters
public static Iterable<Object[]> data() {
return Arrays.asList(new Object[][]{
{HostFileManager.class}, {CombinedHostFileManager.class}});
}
/*
* Return a configuration object with low timeouts for testing and
@ -72,6 +89,10 @@ public class TestHostsFiles {
// Indicates we have multiple racks
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
// Host file manager
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
hostFileMgrClass, HostConfigManager.class);
return conf;
}
@ -80,18 +101,8 @@ public class TestHostsFiles {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
Path dir = new Path(workingDir, "temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
@ -112,9 +123,8 @@ public class TestHostsFiles {
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
String names = name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
LOG.info("adding '" + name + "' to decommission");
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
@ -131,9 +141,7 @@ public class TestHostsFiles {
if (cluster != null) {
cluster.shutdown();
}
if (localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
hostsFileWriter.cleanup();
}
}
@ -141,20 +149,10 @@ public class TestHostsFiles {
public void testHostsIncludeForDeadCount() throws Exception {
Configuration conf = getConf();
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
Path dir = new Path(workingDir, "temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
.append("\n");
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
hostsFileWriter.initIncludeHosts(new String[]
{"localhost:52","127.0.0.1:7777"});
MiniDFSCluster cluster = null;
try {
@ -174,9 +172,7 @@ public class TestHostsFiles {
if (cluster != null) {
cluster.shutdown();
}
if (localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
hostsFileWriter.cleanup();
}
}
}

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.net.ServerSocketUtil;
@ -44,9 +45,9 @@ import org.mortbay.util.ajax.JSON;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.io.File;
import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
@ -236,8 +237,8 @@ public class TestNameNodeMXBean {
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem localFileSys = null;
Path dir = null;
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@ -249,18 +250,12 @@ public class TestNameNodeMXBean {
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
// Define include file to generate deadNodes metrics
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
List<String> hosts = new ArrayList<>();
for(DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
hosts.add(dn.getDisplayName());
}
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
hostsFileWriter.initIncludeHosts(hosts.toArray(
new String[hosts.size()]));
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
@ -282,12 +277,10 @@ public class TestNameNodeMXBean {
assertTrue(deadNode.containsKey("xferaddr"));
}
} finally {
if ((localFileSys != null) && localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}

View File

@ -29,14 +29,12 @@ import java.io.IOException;
import java.lang.management.ManagementFactory;
import java.net.InetAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@ -56,6 +54,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.test.GenericTestUtils;
@ -568,27 +567,15 @@ public class TestStartup {
@Test
public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
FileSystem localFileSys;
Path hostsFile;
Path excludeFile;
int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(config);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn");
hostsFile = new Path(dir, "hosts");
excludeFile = new Path(dir, "exclude");
// Setup conf
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
writeConfigFile(localFileSys, excludeFile, null);
config.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
// write into hosts file
ArrayList<String>list = new ArrayList<String>();
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(config, "work-dir/restartnn");
byte b[] = {127, 0, 0, 1};
InetAddress inetAddress = InetAddress.getByAddress(b);
list.add(inetAddress.getHostName());
writeConfigFile(localFileSys, hostsFile, list);
hostsFileWriter.initIncludeHosts(new String[] {inetAddress.getHostName()});
int numDatanodes = 1;
try {
@ -613,37 +600,12 @@ public class TestStartup {
fail(StringUtils.stringifyException(e));
throw e;
} finally {
cleanupFile(localFileSys, excludeFile.getParent());
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
private void writeConfigFile(FileSystem localFileSys, Path name,
ArrayList<String> nodes) throws IOException {
// delete if it already exists
if (localFileSys.exists(name)) {
localFileSys.delete(name, true);
}
FSDataOutputStream stm = localFileSys.create(name);
if (nodes != null) {
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
String node = it.next();
stm.writeBytes(node);
stm.writeBytes("\n");
}
}
stm.close();
}
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
@Test(timeout = 120000)
public void testXattrConfiguration() throws Exception {

View File

@ -0,0 +1,169 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashSet;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithUpgradeDomain;
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.util.HostsFileWriter;
import org.apache.hadoop.net.StaticMapping;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* End-to-end test case for upgrade domain
* The test configs upgrade domain for nodes via admin json
* config file and put some nodes to decommission state.
* The test then verifies replicas are placed on the nodes that
* satisfy the upgrade domain policy.
*
*/
public class TestUpgradeDomainBlockPlacementPolicy {
private static final short REPLICATION_FACTOR = (short) 3;
private static final int DEFAULT_BLOCK_SIZE = 1024;
static final String[] racks =
{ "/RACK1", "/RACK1", "/RACK1", "/RACK2", "/RACK2", "/RACK2" };
/**
* Use host names that can be resolved (
* InetSocketAddress#isUnresolved == false). Otherwise,
* CombinedHostFileManager won't allow those hosts.
*/
static final String[] hosts =
{ "127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1",
"127.0.0.1", "127.0.0.1" };
static final String[] upgradeDomains =
{ "ud1", "ud2", "ud3", "ud1", "ud2", "ud3" };
static final Set<DatanodeID> expectedDatanodeIDs = new HashSet<>();
private MiniDFSCluster cluster = null;
private NamenodeProtocols nameNodeRpc = null;
private FSNamesystem namesystem = null;
private PermissionStatus perm = null;
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
BlockPlacementPolicyWithUpgradeDomain.class,
BlockPlacementPolicy.class);
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
CombinedHostFileManager.class, HostConfigManager.class);
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/upgradedomainpolicy");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
refreshDatanodeAdminProperties(hostsFileWriter);
hostsFileWriter.cleanup();
}
@After
public void teardown() {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Define admin properties for these datanodes as follows.
* dn0 and dn3 have upgrade domain ud1.
* dn1 and dn4 have upgrade domain ud2.
* dn2 and dn5 have upgrade domain ud3.
* dn0 and dn5 are decommissioned.
* Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
* rack2. Then any block's replicas should be on either
* {dn1, dn2, d3} or {dn2, dn3, dn4}.
*/
private void refreshDatanodeAdminProperties(HostsFileWriter hostsFileWriter)
throws IOException {
DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[
hosts.length];
for (int i = 0; i < hosts.length; i++) {
datanodes[i] = new DatanodeAdminProperties();
DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
datanodes[i].setHostName(datanodeID.getHostName());
datanodes[i].setPort(datanodeID.getXferPort());
datanodes[i].setUpgradeDomain(upgradeDomains[i]);
}
datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
hostsFileWriter.initIncludeHosts(datanodes);
cluster.getFileSystem().refreshNodes();
expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
expectedDatanodeIDs.add(cluster.getDataNodes().get(3).getDatanodeId());
}
@Test
public void testPlacement() throws Exception {
String clientMachine = "127.0.0.1";
for (int i = 0; i < 5; i++) {
String src = "/test-" + i;
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
null, null, fileStatus.getFileId(), null);
assertEquals("Block should be allocated sufficient locations",
REPLICATION_FACTOR, locatedBlock.getLocations().length);
Set<DatanodeInfo> locs = new HashSet<>(Arrays.asList(
locatedBlock.getLocations()));
for (DatanodeID datanodeID : expectedDatanodeIDs) {
locs.contains(datanodeID);
}
nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
src, clientMachine);
}
}
}

View File

@ -0,0 +1,122 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashSet;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import static org.junit.Assert.assertTrue;
public class HostsFileWriter {
private FileSystem localFileSys;
private Path fullDir;
private Path excludeFile;
private Path includeFile;
private Path combinedFile;
private boolean isLegacyHostsFile = false;
public void initialize(Configuration conf, String dir) throws IOException {
localFileSys = FileSystem.getLocal(conf);
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
this.fullDir = new Path(workingDir, dir);
assertTrue(localFileSys.mkdirs(this.fullDir));
if (conf.getClass(
DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
HostFileManager.class, HostConfigManager.class).equals(
HostFileManager.class)) {
isLegacyHostsFile = true;
}
if (isLegacyHostsFile) {
excludeFile = new Path(fullDir, "exclude");
includeFile = new Path(fullDir, "include");
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
} else {
combinedFile = new Path(fullDir, "all");
conf.set(DFSConfigKeys.DFS_HOSTS, combinedFile.toString());
}
}
public void initExcludeHost(String hostNameAndPort) throws IOException {
if (isLegacyHostsFile) {
DFSTestUtil.writeFile(localFileSys, excludeFile, hostNameAndPort);
} else {
DatanodeAdminProperties dn = new DatanodeAdminProperties();
String [] hostAndPort = hostNameAndPort.split(":");
dn.setHostName(hostAndPort[0]);
dn.setPort(Integer.parseInt(hostAndPort[1]));
dn.setAdminState(AdminStates.DECOMMISSIONED);
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
allDNs.add(dn);
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
}
}
public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
StringBuilder includeHosts = new StringBuilder();
if (isLegacyHostsFile) {
for(String hostNameAndPort : hostNameAndPorts) {
includeHosts.append(hostNameAndPort).append("\n");
}
DFSTestUtil.writeFile(localFileSys, includeFile,
includeHosts.toString());
} else {
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
for(String hostNameAndPort : hostNameAndPorts) {
String[] hostAndPort = hostNameAndPort.split(":");
DatanodeAdminProperties dn = new DatanodeAdminProperties();
dn.setHostName(hostAndPort[0]);
dn.setPort(Integer.parseInt(hostAndPort[1]));
allDNs.add(dn);
}
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
}
}
public void initIncludeHosts(DatanodeAdminProperties[] datanodes)
throws IOException {
CombinedHostsFileWriter.writeFile(combinedFile.toString(),
new HashSet<>(Arrays.asList(datanodes)));
}
public void cleanup() throws IOException {
if (localFileSys.exists(fullDir)) {
FileUtils.deleteQuietly(new File(fullDir.toUri().getPath()));
}
}
}

View File

@ -0,0 +1,79 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
import java.io.File;
import java.io.FileWriter;
import java.util.Set;
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
import org.junit.Before;
import org.junit.After;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
/*
* Test for JSON based HostsFileReader
*/
public class TestCombinedHostsFileReader {
// Using /test/build/data/tmp directory to store temporary files
static final String HOSTS_TEST_DIR = new File(System.getProperty(
"test.build.data", "/tmp")).getAbsolutePath();
File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json");
static final String TEST_CACHE_DATA_DIR =
System.getProperty("test.cache.data", "build/test/cache");
File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json");
@Before
public void setUp() throws Exception {
}
@After
public void tearDown() throws Exception {
// Delete test file after running tests
NEW_FILE.delete();
}
/*
* Load the existing test json file
*/
@Test
public void testLoadExistingJsonFile() throws Exception {
Set<DatanodeAdminProperties> all =
CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
assertEquals(5, all.size());
}
/*
* Test empty json config file
*/
@Test
public void testEmptyCombinedHostsFileReader() throws Exception {
FileWriter hosts = new FileWriter(NEW_FILE);
hosts.write("");
hosts.close();
Set<DatanodeAdminProperties> all =
CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
assertEquals(0, all.size());
}
}

Some files were not shown because too many files have changed in this diff Show More