Merge branch 'trunk' into HDFS-7240
This commit is contained in:
commit
30473ecfb4
@ -252,7 +252,7 @@ Building distributions:
|
|||||||
|
|
||||||
Create binary distribution without native code and without documentation:
|
Create binary distribution without native code and without documentation:
|
||||||
|
|
||||||
$ mvn package -Pdist -DskipTests -Dtar
|
$ mvn package -Pdist -DskipTests -Dtar -Dmaven.javadoc.skip=true
|
||||||
|
|
||||||
Create binary distribution with native code and with documentation:
|
Create binary distribution with native code and with documentation:
|
||||||
|
|
||||||
|
182
dev-support/bin/dist-tools-hooks-maker
Executable file
182
dev-support/bin/dist-tools-hooks-maker
Executable file
@ -0,0 +1,182 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
VERSION=${1:-3.0.0-SNAPSHOT}
|
||||||
|
TARGETDIR=${2:-/tmp/target}
|
||||||
|
TOOLSDIR=${3:-/tmp/tools}
|
||||||
|
|
||||||
|
function getfilename
|
||||||
|
{
|
||||||
|
declare module=$1
|
||||||
|
declare modtype=$2
|
||||||
|
|
||||||
|
if [[ ${modtype} = builtin ]]; then
|
||||||
|
echo "${TARGETDIR}/hadoop-${VERSION}/libexec/tools/${module}.sh"
|
||||||
|
else
|
||||||
|
echo "${TARGETDIR}/hadoop-${VERSION}/libexec/shellprofile.d/${module}.sh"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function header
|
||||||
|
{
|
||||||
|
declare fn=$1
|
||||||
|
|
||||||
|
cat >>"${fn}" <<-'TOKEN'
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
#
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# IMPORTANT: This file is automatically generated by hadoop-dist at
|
||||||
|
# -Pdist time.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
TOKEN
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function optional_prologue
|
||||||
|
{
|
||||||
|
declare fn=$1
|
||||||
|
declare module=$2
|
||||||
|
|
||||||
|
if [[ -z "${OPTMODS}" ]]; then
|
||||||
|
OPTMODS=${module}
|
||||||
|
else
|
||||||
|
OPTMODS="${OPTMODS},${module}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
{
|
||||||
|
echo "if hadoop_verify_entry HADOOP_TOOLS_OPTIONS \"${module}\"; then"
|
||||||
|
echo " hadoop_add_profile \"${module}\""
|
||||||
|
echo "fi"
|
||||||
|
echo ""
|
||||||
|
echo "function _${module}_hadoop_classpath"
|
||||||
|
echo "{"
|
||||||
|
} >> "${fn}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function builtin_prologue
|
||||||
|
{
|
||||||
|
declare fn=$1
|
||||||
|
declare module=$2
|
||||||
|
|
||||||
|
{
|
||||||
|
echo ""
|
||||||
|
echo "function hadoop_classpath_tools_${module}"
|
||||||
|
echo "{"
|
||||||
|
} >> "${fn}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function dependencywork
|
||||||
|
{
|
||||||
|
declare fn=$1
|
||||||
|
declare module=$2
|
||||||
|
declare depfn=$3
|
||||||
|
|
||||||
|
declare depline
|
||||||
|
declare jarname
|
||||||
|
|
||||||
|
while read -r depline; do
|
||||||
|
jarname=$(echo "${depline}" | awk -F: '{print $2"-"$4".jar"}')
|
||||||
|
|
||||||
|
if [[ -f "${TARGETDIR}/hadoop-${VERSION}/share/hadoop/tools/lib/${jarname}" ]]; then
|
||||||
|
{
|
||||||
|
echo " if [[ -f \"\${HADOOP_TOOLS_HOME}/\${HADOOP_TOOLS_LIB_JARS_DIR}/${jarname}\" ]]; then"
|
||||||
|
echo " hadoop_add_classpath \"\${HADOOP_TOOLS_HOME}/\${HADOOP_TOOLS_LIB_JARS_DIR}/${jarname}\""
|
||||||
|
echo " fi"
|
||||||
|
} >> "${fn}"
|
||||||
|
|
||||||
|
elif [[ -f "${TARGETDIR}/hadoop-${VERSION}/share/hadoop/common/${jarname}"
|
||||||
|
|| -f "${TARGETDIR}/hadoop-${VERSION}/share/hadoop/common/lib/${jarname}" ]]; then
|
||||||
|
true
|
||||||
|
else
|
||||||
|
echo "ERROR: ${module} has missing dependencies: ${jarname}"
|
||||||
|
fi
|
||||||
|
done < <(grep compile "${depfn}")
|
||||||
|
|
||||||
|
{
|
||||||
|
echo " hadoop_add_classpath \"\${HADOOP_TOOLS_HOME}/\${HADOOP_TOOLS_LIB_JARS_DIR}/${module}-${VERSION}.jar\""
|
||||||
|
echo "}"
|
||||||
|
echo ""
|
||||||
|
} >> "${fn}"
|
||||||
|
}
|
||||||
|
|
||||||
|
function document_optionals
|
||||||
|
{
|
||||||
|
echo "Rewriting ${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh"
|
||||||
|
sed -e "s^@@@HADOOP_OPTIONAL_TOOLS@@@^${OPTMODS}^" \
|
||||||
|
"${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh" \
|
||||||
|
> "${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh.new"
|
||||||
|
mv "${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh.new" \
|
||||||
|
"${TARGETDIR}/hadoop-${VERSION}/etc/hadoop/hadoop-env.sh"
|
||||||
|
}
|
||||||
|
|
||||||
|
function process
|
||||||
|
{
|
||||||
|
declare fn
|
||||||
|
declare basefn
|
||||||
|
declare modtype
|
||||||
|
declare module
|
||||||
|
declare newfile
|
||||||
|
declare newdir
|
||||||
|
|
||||||
|
while read -r fn; do
|
||||||
|
basefn=${fn##*/}
|
||||||
|
module=$(echo "${basefn}" | cut -f1 -d.)
|
||||||
|
modtype=$(echo "${basefn}" | cut -f2 -d.)
|
||||||
|
modtype=${modtype##tools-}
|
||||||
|
|
||||||
|
newfile=$(getfilename "${module}" "${modtype}")
|
||||||
|
newdir=$(dirname "${newfile}")
|
||||||
|
mkdir -p "${newdir}"
|
||||||
|
|
||||||
|
if [[ -f "${newfile}" ]]; then
|
||||||
|
rm "${newfile}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
touch "${newfile}"
|
||||||
|
|
||||||
|
header "${newfile}" "${module}"
|
||||||
|
|
||||||
|
"${modtype}_prologue" "${newfile}" "${module}"
|
||||||
|
|
||||||
|
dependencywork "${newfile}" "${module}" "${fn}"
|
||||||
|
|
||||||
|
chmod a+rx "${newfile}"
|
||||||
|
|
||||||
|
done < <(find "${TOOLSDIR}" -name '*.tools-builtin.txt' -o -name '*.tools-optional.txt')
|
||||||
|
|
||||||
|
document_optionals
|
||||||
|
}
|
||||||
|
|
||||||
|
process
|
@ -45,6 +45,14 @@ RUN apt-get update && apt-get install --no-install-recommends -y \
|
|||||||
# See http://wiki.apache.org/commons/VfsProblems
|
# See http://wiki.apache.org/commons/VfsProblems
|
||||||
RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
|
RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
|
||||||
|
|
||||||
|
######
|
||||||
|
# Install ISA-L library
|
||||||
|
######
|
||||||
|
RUN curl -L http://http.us.debian.org/debian/pool/main/libi/libisal/libisal2_2.15.0-2_amd64.deb \
|
||||||
|
-o /opt/libisal2_2.15.0-2_amd64.deb && \
|
||||||
|
dpkg -i /opt/libisal2_2.15.0-2_amd64.deb
|
||||||
|
|
||||||
|
|
||||||
#######
|
#######
|
||||||
# Oracle Java
|
# Oracle Java
|
||||||
#######
|
#######
|
||||||
|
@ -26,20 +26,10 @@
|
|||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.text.ParseException;
|
import java.text.ParseException;
|
||||||
|
|
||||||
import java.io.ByteArrayInputStream;
|
|
||||||
import java.io.UnsupportedEncodingException;
|
|
||||||
import java.security.PublicKey;
|
|
||||||
import java.security.cert.CertificateFactory;
|
|
||||||
import java.security.cert.X509Certificate;
|
|
||||||
import java.security.cert.CertificateException;
|
|
||||||
import java.security.interfaces.RSAPublicKey;
|
import java.security.interfaces.RSAPublicKey;
|
||||||
|
|
||||||
import org.apache.commons.codec.binary.Base64;
|
|
||||||
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
import org.apache.hadoop.security.authentication.client.AuthenticationException;
|
||||||
import org.apache.hadoop.security.authentication.server.AltKerberosAuthenticationHandler;
|
|
||||||
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
|
|
||||||
import org.apache.hadoop.security.authentication.util.CertificateUtil;
|
import org.apache.hadoop.security.authentication.util.CertificateUtil;
|
||||||
import org.apache.hadoop.security.authentication.util.KerberosName;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
@ -83,7 +73,8 @@ public class JWTRedirectAuthenticationHandler extends
|
|||||||
private static Logger LOG = LoggerFactory
|
private static Logger LOG = LoggerFactory
|
||||||
.getLogger(JWTRedirectAuthenticationHandler.class);
|
.getLogger(JWTRedirectAuthenticationHandler.class);
|
||||||
|
|
||||||
public static final String AUTHENTICATION_PROVIDER_URL = "authentication.provider.url";
|
public static final String AUTHENTICATION_PROVIDER_URL =
|
||||||
|
"authentication.provider.url";
|
||||||
public static final String PUBLIC_KEY_PEM = "public.key.pem";
|
public static final String PUBLIC_KEY_PEM = "public.key.pem";
|
||||||
public static final String EXPECTED_JWT_AUDIENCES = "expected.jwt.audiences";
|
public static final String EXPECTED_JWT_AUDIENCES = "expected.jwt.audiences";
|
||||||
public static final String JWT_COOKIE_NAME = "jwt.cookie.name";
|
public static final String JWT_COOKIE_NAME = "jwt.cookie.name";
|
||||||
@ -205,7 +196,6 @@ public AuthenticationToken alternateAuthenticate(HttpServletRequest request,
|
|||||||
protected String getJWTFromCookie(HttpServletRequest req) {
|
protected String getJWTFromCookie(HttpServletRequest req) {
|
||||||
String serializedJWT = null;
|
String serializedJWT = null;
|
||||||
Cookie[] cookies = req.getCookies();
|
Cookie[] cookies = req.getCookies();
|
||||||
String userName = null;
|
|
||||||
if (cookies != null) {
|
if (cookies != null) {
|
||||||
for (Cookie cookie : cookies) {
|
for (Cookie cookie : cookies) {
|
||||||
if (cookieName.equals(cookie.getName())) {
|
if (cookieName.equals(cookie.getName())) {
|
||||||
@ -350,7 +340,7 @@ protected boolean validateExpiration(SignedJWT jwtToken) {
|
|||||||
boolean valid = false;
|
boolean valid = false;
|
||||||
try {
|
try {
|
||||||
Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
|
Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
|
||||||
if (expires != null && new Date().before(expires)) {
|
if (expires == null || new Date().before(expires)) {
|
||||||
LOG.debug("JWT token expiration date has been "
|
LOG.debug("JWT token expiration date has been "
|
||||||
+ "successfully validated");
|
+ "successfully validated");
|
||||||
valid = true;
|
valid = true;
|
||||||
|
@ -13,19 +13,15 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.security.authentication.server;
|
package org.apache.hadoop.security.authentication.server;
|
||||||
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.net.MalformedURLException;
|
|
||||||
import java.net.URL;
|
|
||||||
import java.security.KeyPair;
|
import java.security.KeyPair;
|
||||||
import java.security.KeyPairGenerator;
|
import java.security.KeyPairGenerator;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
import java.security.interfaces.RSAPrivateKey;
|
import java.security.interfaces.RSAPrivateKey;
|
||||||
import java.security.interfaces.RSAPublicKey;
|
import java.security.interfaces.RSAPublicKey;
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
@ -50,8 +46,6 @@
|
|||||||
import com.nimbusds.jwt.JWTClaimsSet;
|
import com.nimbusds.jwt.JWTClaimsSet;
|
||||||
import com.nimbusds.jwt.SignedJWT;
|
import com.nimbusds.jwt.SignedJWT;
|
||||||
import com.nimbusds.jose.crypto.RSASSASigner;
|
import com.nimbusds.jose.crypto.RSASSASigner;
|
||||||
import com.nimbusds.jose.crypto.RSASSAVerifier;
|
|
||||||
import com.nimbusds.jose.util.Base64URL;
|
|
||||||
|
|
||||||
public class TestJWTRedirectAuthentictionHandler extends
|
public class TestJWTRedirectAuthentictionHandler extends
|
||||||
KerberosSecurityTestcase {
|
KerberosSecurityTestcase {
|
||||||
@ -261,6 +255,36 @@ public void testExpiredJWT() throws Exception {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testNoExpirationJWT() throws Exception {
|
||||||
|
try {
|
||||||
|
handler.setPublicKey(publicKey);
|
||||||
|
|
||||||
|
Properties props = getProperties();
|
||||||
|
handler.init(props);
|
||||||
|
|
||||||
|
SignedJWT jwt = getJWT("bob", null, privateKey);
|
||||||
|
|
||||||
|
Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
|
||||||
|
HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
|
||||||
|
Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
|
||||||
|
Mockito.when(request.getRequestURL()).thenReturn(
|
||||||
|
new StringBuffer(SERVICE_URL));
|
||||||
|
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
|
||||||
|
Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
|
||||||
|
SERVICE_URL);
|
||||||
|
|
||||||
|
AuthenticationToken token = handler.alternateAuthenticate(request,
|
||||||
|
response);
|
||||||
|
Assert.assertNotNull("Token should not be null.", token);
|
||||||
|
Assert.assertEquals("bob", token.getUserName());
|
||||||
|
} catch (ServletException se) {
|
||||||
|
fail("alternateAuthentication should NOT have thrown a ServletException");
|
||||||
|
} catch (AuthenticationException ae) {
|
||||||
|
fail("alternateAuthentication should NOT have thrown a AuthenticationException");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testInvalidAudienceJWT() throws Exception {
|
public void testInvalidAudienceJWT() throws Exception {
|
||||||
try {
|
try {
|
||||||
@ -442,7 +466,6 @@ protected SignedJWT getJWT(String sub, Date expires, RSAPrivateKey privateKey)
|
|||||||
JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
|
JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
|
||||||
|
|
||||||
SignedJWT signedJWT = new SignedJWT(header, claimsSet);
|
SignedJWT signedJWT = new SignedJWT(header, claimsSet);
|
||||||
Base64URL sigInput = Base64URL.encode(signedJWT.getSigningInput());
|
|
||||||
JWSSigner signer = new RSASSASigner(privateKey);
|
JWSSigner signer = new RSASSASigner(privateKey);
|
||||||
|
|
||||||
signedJWT.sign(signer);
|
signedJWT.sign(signer);
|
||||||
|
@ -114,7 +114,7 @@ case ${COMMAND} in
|
|||||||
;;
|
;;
|
||||||
archive)
|
archive)
|
||||||
CLASS=org.apache.hadoop.tools.HadoopArchives
|
CLASS=org.apache.hadoop.tools.HadoopArchives
|
||||||
hadoop_add_to_classpath_toolspath
|
hadoop_add_to_classpath_tools hadoop-archives
|
||||||
;;
|
;;
|
||||||
checknative)
|
checknative)
|
||||||
CLASS=org.apache.hadoop.util.NativeLibraryChecker
|
CLASS=org.apache.hadoop.util.NativeLibraryChecker
|
||||||
@ -133,11 +133,11 @@ case ${COMMAND} in
|
|||||||
;;
|
;;
|
||||||
distch)
|
distch)
|
||||||
CLASS=org.apache.hadoop.tools.DistCh
|
CLASS=org.apache.hadoop.tools.DistCh
|
||||||
hadoop_add_to_classpath_toolspath
|
hadoop_add_to_classpath_tools hadoop-extras
|
||||||
;;
|
;;
|
||||||
distcp)
|
distcp)
|
||||||
CLASS=org.apache.hadoop.tools.DistCp
|
CLASS=org.apache.hadoop.tools.DistCp
|
||||||
hadoop_add_to_classpath_toolspath
|
hadoop_add_to_classpath_tools hadoop-distcp
|
||||||
;;
|
;;
|
||||||
envvars)
|
envvars)
|
||||||
echo "JAVA_HOME='${JAVA_HOME}'"
|
echo "JAVA_HOME='${JAVA_HOME}'"
|
||||||
@ -146,7 +146,9 @@ case ${COMMAND} in
|
|||||||
echo "HADOOP_COMMON_LIB_JARS_DIR='${HADOOP_COMMON_LIB_JARS_DIR}'"
|
echo "HADOOP_COMMON_LIB_JARS_DIR='${HADOOP_COMMON_LIB_JARS_DIR}'"
|
||||||
echo "HADOOP_COMMON_LIB_NATIVE_DIR='${HADOOP_COMMON_LIB_NATIVE_DIR}'"
|
echo "HADOOP_COMMON_LIB_NATIVE_DIR='${HADOOP_COMMON_LIB_NATIVE_DIR}'"
|
||||||
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
|
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
|
||||||
echo "HADOOP_TOOLS_PATH='${HADOOP_TOOLS_PATH}'"
|
echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
|
||||||
|
echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
|
||||||
|
echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
fs)
|
fs)
|
||||||
|
@ -41,6 +41,44 @@ function hadoop_debug
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
## @description Given variable $1 delete $2 from it
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable no
|
||||||
|
function hadoop_delete_entry
|
||||||
|
{
|
||||||
|
if [[ ${!1} =~ \ ${2}\ ]] ; then
|
||||||
|
hadoop_debug "Removing ${2} from ${1}"
|
||||||
|
eval "${1}"=\""${!1// ${2} }"\"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Given variable $1 add $2 to it
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable no
|
||||||
|
function hadoop_add_entry
|
||||||
|
{
|
||||||
|
if [[ ! ${!1} =~ \ ${2}\ ]] ; then
|
||||||
|
hadoop_debug "Adding ${2} to ${1}"
|
||||||
|
#shellcheck disable=SC2140
|
||||||
|
eval "${1}"=\""${!1} ${2} "\"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
## @description Given variable $1 determine if $2 is in it
|
||||||
|
## @audience public
|
||||||
|
## @stability stable
|
||||||
|
## @replaceable no
|
||||||
|
## @return 0 = yes, 1 = no
|
||||||
|
function hadoop_verify_entry
|
||||||
|
{
|
||||||
|
# this unfortunately can't really be tested by bats. :(
|
||||||
|
# so if this changes, be aware that unit tests effectively
|
||||||
|
# do this function in them
|
||||||
|
[[ ${!1} =~ \ ${2}\ ]]
|
||||||
|
}
|
||||||
|
|
||||||
## @description Add a subcommand to the usage output
|
## @description Add a subcommand to the usage output
|
||||||
## @audience private
|
## @audience private
|
||||||
## @stability evolving
|
## @stability evolving
|
||||||
@ -264,10 +302,9 @@ function hadoop_bootstrap
|
|||||||
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
|
YARN_LIB_JARS_DIR=${YARN_LIB_JARS_DIR:-"share/hadoop/yarn/lib"}
|
||||||
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
|
MAPRED_DIR=${MAPRED_DIR:-"share/hadoop/mapreduce"}
|
||||||
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
|
MAPRED_LIB_JARS_DIR=${MAPRED_LIB_JARS_DIR:-"share/hadoop/mapreduce/lib"}
|
||||||
|
HADOOP_TOOLS_HOME=${HADOOP_TOOLS_HOME:-${HADOOP_PREFIX}}
|
||||||
# setup a default HADOOP_TOOLS_PATH
|
HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
||||||
hadoop_deprecate_envvar TOOL_PATH HADOOP_TOOLS_PATH
|
HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
||||||
HADOOP_TOOLS_PATH=${HADOOP_TOOLS_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
|
|
||||||
|
|
||||||
# usage output set to zero
|
# usage output set to zero
|
||||||
hadoop_reset_usage
|
hadoop_reset_usage
|
||||||
@ -322,6 +359,7 @@ function hadoop_exec_hadoopenv
|
|||||||
if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
|
if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
|
||||||
if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
|
if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
|
||||||
export HADOOP_ENV_PROCESSED=true
|
export HADOOP_ENV_PROCESSED=true
|
||||||
|
# shellcheck disable=SC1090
|
||||||
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
|
. "${HADOOP_CONF_DIR}/hadoop-env.sh"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
@ -334,6 +372,7 @@ function hadoop_exec_hadoopenv
|
|||||||
function hadoop_exec_userfuncs
|
function hadoop_exec_userfuncs
|
||||||
{
|
{
|
||||||
if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
|
if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
|
||||||
|
# shellcheck disable=SC1090
|
||||||
. "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
|
. "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@ -348,6 +387,7 @@ function hadoop_exec_hadooprc
|
|||||||
{
|
{
|
||||||
if [[ -f "${HOME}/.hadooprc" ]]; then
|
if [[ -f "${HOME}/.hadooprc" ]]; then
|
||||||
hadoop_debug "Applying the user's .hadooprc"
|
hadoop_debug "Applying the user's .hadooprc"
|
||||||
|
# shellcheck disable=SC1090
|
||||||
. "${HOME}/.hadooprc"
|
. "${HOME}/.hadooprc"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@ -373,11 +413,22 @@ function hadoop_import_shellprofiles
|
|||||||
files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
|
files2=(${HADOOP_CONF_DIR}/shellprofile.d/*.sh)
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# enable bundled shellprofiles that come
|
||||||
|
# from hadoop-tools. This converts the user-facing HADOOP_OPTIONAL_TOOLS
|
||||||
|
# to the HADOOP_TOOLS_OPTIONS that the shell profiles expect.
|
||||||
|
# See dist-tools-hooks-maker for how the example HADOOP_OPTIONAL_TOOLS
|
||||||
|
# gets populated into hadoop-env.sh
|
||||||
|
|
||||||
|
for i in ${HADOOP_OPTIONAL_TOOLS//,/ }; do
|
||||||
|
hadoop_add_entry HADOOP_TOOLS_OPTIONS "${i}"
|
||||||
|
done
|
||||||
|
|
||||||
for i in "${files1[@]}" "${files2[@]}"
|
for i in "${files1[@]}" "${files2[@]}"
|
||||||
do
|
do
|
||||||
if [[ -n "${i}"
|
if [[ -n "${i}"
|
||||||
&& -f "${i}" ]]; then
|
&& -f "${i}" ]]; then
|
||||||
hadoop_debug "Profiles: importing ${i}"
|
hadoop_debug "Profiles: importing ${i}"
|
||||||
|
# shellcheck disable=SC1090
|
||||||
. "${i}"
|
. "${i}"
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@ -945,34 +996,25 @@ function hadoop_add_common_to_classpath
|
|||||||
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
|
hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
|
||||||
}
|
}
|
||||||
|
|
||||||
## @description Add the HADOOP_TOOLS_PATH to the classpath
|
## @description Run libexec/tools/module.sh to add to the classpath
|
||||||
## @description environment
|
## @description environment
|
||||||
## @audience private
|
## @audience private
|
||||||
## @stability evolving
|
## @stability evolving
|
||||||
## @replaceable yes
|
## @replaceable yes
|
||||||
function hadoop_add_to_classpath_toolspath
|
## @param module
|
||||||
|
function hadoop_add_to_classpath_tools
|
||||||
{
|
{
|
||||||
declare -a array
|
declare module=$1
|
||||||
declare -i c=0
|
|
||||||
declare -i j
|
|
||||||
declare -i i
|
|
||||||
declare idx
|
|
||||||
|
|
||||||
if [[ -n "${HADOOP_TOOLS_PATH}" ]]; then
|
if [[ -f "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh" ]]; then
|
||||||
hadoop_debug "Adding HADOOP_TOOLS_PATH to CLASSPATH"
|
# shellcheck disable=SC1090
|
||||||
oldifs=${IFS}
|
. "${HADOOP_LIBEXEC_DIR}/tools/${module}.sh"
|
||||||
IFS=:
|
else
|
||||||
for idx in ${HADOOP_TOOLS_PATH}; do
|
hadoop_error "ERROR: Tools helper ${HADOOP_LIBEXEC_DIR}/tools/${module}.sh was not found."
|
||||||
array[${c}]=${idx}
|
fi
|
||||||
((c=c+1))
|
|
||||||
done
|
|
||||||
IFS=${oldifs}
|
|
||||||
((j=c-1)) || ${QATESTMODE}
|
|
||||||
|
|
||||||
for ((i=0; i<=j; i++)); do
|
|
||||||
hadoop_add_classpath "${array[$i]}" after
|
|
||||||
done
|
|
||||||
|
|
||||||
|
if declare -f hadoop_classpath_tools_${module} >/dev/null 2>&1; then
|
||||||
|
"hadoop_classpath_tools_${module}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
10
hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
Normal file → Executable file
10
hadoop-common-project/hadoop-common/src/main/bin/hadoop-layout.sh.example
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
# Copyright 2014 The Apache Software Foundation
|
# Copyright 2014 The Apache Software Foundation
|
||||||
#
|
#
|
||||||
# Licensed to the Apache Software Foundation (ASF) under one
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
@ -87,7 +88,10 @@
|
|||||||
# Misc paths
|
# Misc paths
|
||||||
####
|
####
|
||||||
|
|
||||||
# setup a default HADOOP_TOOLS_PATH, where things like distcp lives
|
# This is where things like distcp, S3, and other things live
|
||||||
# note that this path only gets added for certain commands and not
|
# note that this path only gets added for certain commands and not
|
||||||
# part of the general classpath
|
# part of the general classpath unless HADOOP_OPTIONAL_TOOLS is used
|
||||||
# export HADOOP_TOOLS_PATH="${HADOOP_PREFIX}/share/hadoop/tools/lib/*"
|
# to configure them in
|
||||||
|
# export HADOOP_TOOLS_HOME=${HADOOP_PREFIX}
|
||||||
|
# export HADOOP_TOOLS_DIR=${HADOOP_TOOLS_DIR:-"share/hadoop/tools"}
|
||||||
|
# export HADOOP_TOOLS_LIB_JARS_DIR=${HADOOP_TOOLS_LIB_JARS_DIR:-"${HADOOP_TOOLS_DIR}/lib"}
|
||||||
|
@ -49,7 +49,7 @@
|
|||||||
# preferred. Many sites configure these options outside of Hadoop,
|
# preferred. Many sites configure these options outside of Hadoop,
|
||||||
# such as in /etc/profile.d
|
# such as in /etc/profile.d
|
||||||
|
|
||||||
# The java implementation to use. By default, this environment
|
# The java implementation to use. By default, this environment
|
||||||
# variable is REQUIRED on ALL platforms except OS X!
|
# variable is REQUIRED on ALL platforms except OS X!
|
||||||
# export JAVA_HOME=
|
# export JAVA_HOME=
|
||||||
|
|
||||||
@ -64,15 +64,15 @@
|
|||||||
# path.
|
# path.
|
||||||
# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
|
# export HADOOP_CONF_DIR=$HADOOP_PREFIX/etc/hadoop
|
||||||
|
|
||||||
# The maximum amount of heap to use (Java -Xmx). If no unit
|
# The maximum amount of heap to use (Java -Xmx). If no unit
|
||||||
# is provided, it will be converted to MB. Daemons will
|
# is provided, it will be converted to MB. Daemons will
|
||||||
# prefer any Xmx setting in their respective _OPT variable.
|
# prefer any Xmx setting in their respective _OPT variable.
|
||||||
# There is no default; the JVM will autoscale based upon machine
|
# There is no default; the JVM will autoscale based upon machine
|
||||||
# memory size.
|
# memory size.
|
||||||
# export HADOOP_HEAPSIZE_MAX=
|
# export HADOOP_HEAPSIZE_MAX=
|
||||||
|
|
||||||
# The minimum amount of heap to use (Java -Xms). If no unit
|
# The minimum amount of heap to use (Java -Xms). If no unit
|
||||||
# is provided, it will be converted to MB. Daemons will
|
# is provided, it will be converted to MB. Daemons will
|
||||||
# prefer any Xms setting in their respective _OPT variable.
|
# prefer any Xms setting in their respective _OPT variable.
|
||||||
# There is no default; the JVM will autoscale based upon machine
|
# There is no default; the JVM will autoscale based upon machine
|
||||||
# memory size.
|
# memory size.
|
||||||
@ -107,8 +107,8 @@ case ${HADOOP_OS_TYPE} in
|
|||||||
esac
|
esac
|
||||||
|
|
||||||
# Extra Java runtime options for some Hadoop commands
|
# Extra Java runtime options for some Hadoop commands
|
||||||
# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
|
# and clients (i.e., hdfs dfs -blah). These get appended to HADOOP_OPTS for
|
||||||
# such commands. In most cases, # this should be left empty and
|
# such commands. In most cases, # this should be left empty and
|
||||||
# let users supply it on the command line.
|
# let users supply it on the command line.
|
||||||
# export HADOOP_CLIENT_OPTS=""
|
# export HADOOP_CLIENT_OPTS=""
|
||||||
|
|
||||||
@ -146,6 +146,11 @@ esac
|
|||||||
# names starting with a '-' are treated as negative matches. For example,
|
# names starting with a '-' are treated as negative matches. For example,
|
||||||
# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
|
# export HADOOP_CLIENT_CLASSLOADER_SYSTEM_CLASSES="-org.apache.hadoop.UserClass,java.,javax.,org.apache.hadoop."
|
||||||
|
|
||||||
|
# Enable optional, bundled Hadoop features
|
||||||
|
# This is a comma delimited list. It may NOT be overridden via .hadooprc
|
||||||
|
# Entries may be added/removed as needed.
|
||||||
|
# export HADOOP_OPTIONAL_TOOLS="@@@HADOOP_OPTIONAL_TOOLS@@@"
|
||||||
|
|
||||||
###
|
###
|
||||||
# Options for remote shell connectivity
|
# Options for remote shell connectivity
|
||||||
###
|
###
|
||||||
@ -181,7 +186,7 @@ esac
|
|||||||
# non-secure)
|
# non-secure)
|
||||||
#
|
#
|
||||||
|
|
||||||
# Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
|
# Where (primarily) daemon log files are stored. # $HADOOP_PREFIX/logs
|
||||||
# by default.
|
# by default.
|
||||||
# Java property: hadoop.log.dir
|
# Java property: hadoop.log.dir
|
||||||
# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
|
# export HADOOP_LOG_DIR=${HADOOP_PREFIX}/logs
|
||||||
@ -201,7 +206,7 @@ esac
|
|||||||
# Java property: hadoop.root.logger
|
# Java property: hadoop.root.logger
|
||||||
# export HADOOP_ROOT_LOGGER=INFO,console
|
# export HADOOP_ROOT_LOGGER=INFO,console
|
||||||
|
|
||||||
# Default log4j setting for daemons spawned explicitly by
|
# Default log4j setting for daemons spawned explicitly by
|
||||||
# --daemon option of hadoop, hdfs, mapred and yarn command.
|
# --daemon option of hadoop, hdfs, mapred and yarn command.
|
||||||
# Java property: hadoop.root.logger
|
# Java property: hadoop.root.logger
|
||||||
# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
|
# export HADOOP_DAEMON_ROOT_LOGGER=INFO,RFA
|
||||||
|
@ -849,12 +849,8 @@ public void drain(String keyName) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public int getEncKeyQueueSize(String keyName) throws IOException {
|
public int getEncKeyQueueSize(String keyName) {
|
||||||
try {
|
return encKeyVersionQueue.getSize(keyName);
|
||||||
return encKeyVersionQueue.getSize(keyName);
|
|
||||||
} catch (ExecutionException e) {
|
|
||||||
throw new IOException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -18,9 +18,11 @@
|
|||||||
package org.apache.hadoop.crypto.key.kms;
|
package org.apache.hadoop.crypto.key.kms;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
@ -240,13 +242,19 @@ public void drain(String keyName ) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get size of the Queue for keyName
|
* Get size of the Queue for keyName. This is only used in unit tests.
|
||||||
* @param keyName the key name
|
* @param keyName the key name
|
||||||
* @return int queue size
|
* @return int queue size
|
||||||
* @throws ExecutionException
|
|
||||||
*/
|
*/
|
||||||
public int getSize(String keyName) throws ExecutionException {
|
public int getSize(String keyName) {
|
||||||
return keyQueues.get(keyName).size();
|
// We can't do keyQueues.get(keyName).size() here,
|
||||||
|
// since that will have the side effect of populating the cache.
|
||||||
|
Map<String, LinkedBlockingQueue<E>> map =
|
||||||
|
keyQueues.getAllPresent(Arrays.asList(keyName));
|
||||||
|
if (map.get(keyName) == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return map.get(keyName).size();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -20,7 +20,7 @@
|
|||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.ref.PhantomReference;
|
import java.lang.ref.WeakReference;
|
||||||
import java.lang.ref.ReferenceQueue;
|
import java.lang.ref.ReferenceQueue;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
@ -3023,11 +3023,15 @@ public static final class Statistics {
|
|||||||
* need.
|
* need.
|
||||||
*/
|
*/
|
||||||
public static class StatisticsData {
|
public static class StatisticsData {
|
||||||
volatile long bytesRead;
|
private volatile long bytesRead;
|
||||||
volatile long bytesWritten;
|
private volatile long bytesWritten;
|
||||||
volatile int readOps;
|
private volatile int readOps;
|
||||||
volatile int largeReadOps;
|
private volatile int largeReadOps;
|
||||||
volatile int writeOps;
|
private volatile int writeOps;
|
||||||
|
private volatile long bytesReadLocalHost;
|
||||||
|
private volatile long bytesReadDistanceOfOneOrTwo;
|
||||||
|
private volatile long bytesReadDistanceOfThreeOrFour;
|
||||||
|
private volatile long bytesReadDistanceOfFiveOrLarger;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add another StatisticsData object to this one.
|
* Add another StatisticsData object to this one.
|
||||||
@ -3038,6 +3042,12 @@ void add(StatisticsData other) {
|
|||||||
this.readOps += other.readOps;
|
this.readOps += other.readOps;
|
||||||
this.largeReadOps += other.largeReadOps;
|
this.largeReadOps += other.largeReadOps;
|
||||||
this.writeOps += other.writeOps;
|
this.writeOps += other.writeOps;
|
||||||
|
this.bytesReadLocalHost += other.bytesReadLocalHost;
|
||||||
|
this.bytesReadDistanceOfOneOrTwo += other.bytesReadDistanceOfOneOrTwo;
|
||||||
|
this.bytesReadDistanceOfThreeOrFour +=
|
||||||
|
other.bytesReadDistanceOfThreeOrFour;
|
||||||
|
this.bytesReadDistanceOfFiveOrLarger +=
|
||||||
|
other.bytesReadDistanceOfFiveOrLarger;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3049,6 +3059,12 @@ void negate() {
|
|||||||
this.readOps = -this.readOps;
|
this.readOps = -this.readOps;
|
||||||
this.largeReadOps = -this.largeReadOps;
|
this.largeReadOps = -this.largeReadOps;
|
||||||
this.writeOps = -this.writeOps;
|
this.writeOps = -this.writeOps;
|
||||||
|
this.bytesReadLocalHost = -this.bytesReadLocalHost;
|
||||||
|
this.bytesReadDistanceOfOneOrTwo = -this.bytesReadDistanceOfOneOrTwo;
|
||||||
|
this.bytesReadDistanceOfThreeOrFour =
|
||||||
|
-this.bytesReadDistanceOfThreeOrFour;
|
||||||
|
this.bytesReadDistanceOfFiveOrLarger =
|
||||||
|
-this.bytesReadDistanceOfFiveOrLarger;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -3077,6 +3093,22 @@ public int getLargeReadOps() {
|
|||||||
public int getWriteOps() {
|
public int getWriteOps() {
|
||||||
return writeOps;
|
return writeOps;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long getBytesReadLocalHost() {
|
||||||
|
return bytesReadLocalHost;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getBytesReadDistanceOfOneOrTwo() {
|
||||||
|
return bytesReadDistanceOfOneOrTwo;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getBytesReadDistanceOfThreeOrFour() {
|
||||||
|
return bytesReadDistanceOfThreeOrFour;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getBytesReadDistanceOfFiveOrLarger() {
|
||||||
|
return bytesReadDistanceOfFiveOrLarger;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private interface StatisticsAggregator<T> {
|
private interface StatisticsAggregator<T> {
|
||||||
@ -3101,7 +3133,7 @@ private interface StatisticsAggregator<T> {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Set of all thread-local data areas. Protected by the Statistics lock.
|
* Set of all thread-local data areas. Protected by the Statistics lock.
|
||||||
* The references to the statistics data are kept using phantom references
|
* The references to the statistics data are kept using weak references
|
||||||
* to the associated threads. Proper clean-up is performed by the cleaner
|
* to the associated threads. Proper clean-up is performed by the cleaner
|
||||||
* thread when the threads are garbage collected.
|
* thread when the threads are garbage collected.
|
||||||
*/
|
*/
|
||||||
@ -3154,11 +3186,11 @@ public Void aggregate() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A phantom reference to a thread that also includes the data associated
|
* A weak reference to a thread that also includes the data associated
|
||||||
* with that thread. On the thread being garbage collected, it is enqueued
|
* with that thread. On the thread being garbage collected, it is enqueued
|
||||||
* to the reference queue for clean-up.
|
* to the reference queue for clean-up.
|
||||||
*/
|
*/
|
||||||
private class StatisticsDataReference extends PhantomReference<Thread> {
|
private class StatisticsDataReference extends WeakReference<Thread> {
|
||||||
private final StatisticsData data;
|
private final StatisticsData data;
|
||||||
|
|
||||||
public StatisticsDataReference(StatisticsData data, Thread thread) {
|
public StatisticsDataReference(StatisticsData data, Thread thread) {
|
||||||
@ -3267,6 +3299,33 @@ public void incrementWriteOps(int count) {
|
|||||||
getThreadStatistics().writeOps += count;
|
getThreadStatistics().writeOps += count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Increment the bytes read by the network distance in the statistics
|
||||||
|
* In the common network topology setup, distance value should be an even
|
||||||
|
* number such as 0, 2, 4, 6. To make it more general, we group distance
|
||||||
|
* by {1, 2}, {3, 4} and {5 and beyond} for accounting.
|
||||||
|
* @param distance the network distance
|
||||||
|
* @param newBytes the additional bytes read
|
||||||
|
*/
|
||||||
|
public void incrementBytesReadByDistance(int distance, long newBytes) {
|
||||||
|
switch (distance) {
|
||||||
|
case 0:
|
||||||
|
getThreadStatistics().bytesReadLocalHost += newBytes;
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
case 2:
|
||||||
|
getThreadStatistics().bytesReadDistanceOfOneOrTwo += newBytes;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
case 4:
|
||||||
|
getThreadStatistics().bytesReadDistanceOfThreeOrFour += newBytes;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
getThreadStatistics().bytesReadDistanceOfFiveOrLarger += newBytes;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Apply the given aggregator to all StatisticsData objects associated with
|
* Apply the given aggregator to all StatisticsData objects associated with
|
||||||
* this Statistics object.
|
* this Statistics object.
|
||||||
@ -3384,6 +3443,55 @@ public Integer aggregate() {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In the common network topology setup, distance value should be an even
|
||||||
|
* number such as 0, 2, 4, 6. To make it more general, we group distance
|
||||||
|
* by {1, 2}, {3, 4} and {5 and beyond} for accounting. So if the caller
|
||||||
|
* ask for bytes read for distance 2, the function will return the value
|
||||||
|
* for group {1, 2}.
|
||||||
|
* @param distance the network distance
|
||||||
|
* @return the total number of bytes read by the network distance
|
||||||
|
*/
|
||||||
|
public long getBytesReadByDistance(int distance) {
|
||||||
|
long bytesRead;
|
||||||
|
switch (distance) {
|
||||||
|
case 0:
|
||||||
|
bytesRead = getData().getBytesReadLocalHost();
|
||||||
|
break;
|
||||||
|
case 1:
|
||||||
|
case 2:
|
||||||
|
bytesRead = getData().getBytesReadDistanceOfOneOrTwo();
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
case 4:
|
||||||
|
bytesRead = getData().getBytesReadDistanceOfThreeOrFour();
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
bytesRead = getData().getBytesReadDistanceOfFiveOrLarger();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return bytesRead;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get all statistics data
|
||||||
|
* MR or other frameworks can use the method to get all statistics at once.
|
||||||
|
* @return the StatisticsData
|
||||||
|
*/
|
||||||
|
public StatisticsData getData() {
|
||||||
|
return visitAll(new StatisticsAggregator<StatisticsData>() {
|
||||||
|
private StatisticsData all = new StatisticsData();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void accept(StatisticsData data) {
|
||||||
|
all.add(data);
|
||||||
|
}
|
||||||
|
|
||||||
|
public StatisticsData aggregate() {
|
||||||
|
return all;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
@ -638,13 +638,27 @@ public static String getHostNameOfIP(String ipPort) {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Return hostname without throwing exception.
|
* Return hostname without throwing exception.
|
||||||
|
* The returned hostname String format is "hostname".
|
||||||
|
* @return hostname
|
||||||
|
*/
|
||||||
|
public static String getLocalHostname() {
|
||||||
|
try {
|
||||||
|
return InetAddress.getLocalHost().getHostName();
|
||||||
|
} catch(UnknownHostException uhe) {
|
||||||
|
return "" + uhe;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return hostname without throwing exception.
|
||||||
|
* The returned hostname String format is "hostname/ip address".
|
||||||
* @return hostname
|
* @return hostname
|
||||||
*/
|
*/
|
||||||
public static String getHostname() {
|
public static String getHostname() {
|
||||||
try {return "" + InetAddress.getLocalHost();}
|
try {return "" + InetAddress.getLocalHost();}
|
||||||
catch(UnknownHostException uhe) {return "" + uhe;}
|
catch(UnknownHostException uhe) {return "" + uhe;}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compose a "host:port" string from the address.
|
* Compose a "host:port" string from the address.
|
||||||
*/
|
*/
|
||||||
|
@ -369,6 +369,16 @@ protected boolean areChildrenLeaves() {
|
|||||||
int getNumOfLeaves() {
|
int getNumOfLeaves() {
|
||||||
return numOfLeaves;
|
return numOfLeaves;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return super.hashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object to) {
|
||||||
|
return super.equals(to);
|
||||||
|
}
|
||||||
} // end of InnerNode
|
} // end of InnerNode
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -607,9 +617,14 @@ public int getNumOfLeaves() {
|
|||||||
* or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster
|
* or {@link Integer#MAX_VALUE} if node1 or node2 do not belong to the cluster
|
||||||
*/
|
*/
|
||||||
public int getDistance(Node node1, Node node2) {
|
public int getDistance(Node node1, Node node2) {
|
||||||
if (node1 == node2) {
|
if ((node1 != null && node1.equals(node2)) ||
|
||||||
|
(node1 == null && node2 == null)) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
if (node1 == null || node2 == null) {
|
||||||
|
LOG.warn("One of the nodes is a null pointer");
|
||||||
|
return Integer.MAX_VALUE;
|
||||||
|
}
|
||||||
Node n1=node1, n2=node2;
|
Node n1=node1, n2=node2;
|
||||||
int dis = 0;
|
int dis = 0;
|
||||||
netlock.readLock().lock();
|
netlock.readLock().lock();
|
||||||
|
@ -112,7 +112,23 @@ private void set(String name, String location) {
|
|||||||
public static String getPath(Node node) {
|
public static String getPath(Node node) {
|
||||||
return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
|
return node.getNetworkLocation() + PATH_SEPARATOR_STR + node.getName();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object to) {
|
||||||
|
if (this == to) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (!(to instanceof NodeBase)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return getPath(this).equals(getPath((NodeBase)to));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return getPath(this).hashCode();
|
||||||
|
}
|
||||||
|
|
||||||
/** @return this node's path as its string representation */
|
/** @return this node's path as its string representation */
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
|
@ -52,7 +52,7 @@
|
|||||||
|| defined(__ppc64__) || defined(__ppc64le__) \
|
|| defined(__ppc64__) || defined(__ppc64le__) \
|
||||||
|| defined(__PPC64__) || defined(__PPC64LE__) \
|
|| defined(__PPC64__) || defined(__PPC64LE__) \
|
||||||
|| defined(__ia64) || defined(__itanium__) || defined(_M_IA64) \
|
|| defined(__ia64) || defined(__itanium__) || defined(_M_IA64) \
|
||||||
|| defined(__s390x__) ) /* Detects 64 bits mode */
|
|| defined(__s390x__) || defined(_LP64)) /* Detects 64 bits mode */
|
||||||
# define LZ4_ARCH64 1
|
# define LZ4_ARCH64 1
|
||||||
#else
|
#else
|
||||||
# define LZ4_ARCH64 0
|
# define LZ4_ARCH64 0
|
||||||
|
@ -1577,7 +1577,7 @@
|
|||||||
<value>DEFAULT</value>
|
<value>DEFAULT</value>
|
||||||
<description>
|
<description>
|
||||||
The hostname verifier to provide for HttpsURLConnections.
|
The hostname verifier to provide for HttpsURLConnections.
|
||||||
Valid values are: DEFAULT, STRICT, STRICT_I6, DEFAULT_AND_LOCALHOST and
|
Valid values are: DEFAULT, STRICT, STRICT_IE6, DEFAULT_AND_LOCALHOST and
|
||||||
ALLOW_ALL
|
ALLOW_ALL
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
@ -15,14 +15,7 @@
|
|||||||
Hadoop Groups Mapping
|
Hadoop Groups Mapping
|
||||||
===================
|
===================
|
||||||
|
|
||||||
* [Hadoop Groups Mapping](#Hadoop_Groups_Mapping)
|
<!-- MACRO{toc|fromDepth=0|toDepth=3} -->
|
||||||
* [Overview](#Overview)
|
|
||||||
* [LDAP Groups Mapping](#LDAP_Groups_Mapping)
|
|
||||||
* [Active Directory](#Active_Directory)
|
|
||||||
* [POSIX Groups](#POSIX_Groups)
|
|
||||||
* [SSL](#SSL)
|
|
||||||
* [Composite Groups Mapping](#Composite_Groups_Mapping)
|
|
||||||
* [Multiple group mapping providers configuration sample](#Multiple_group_mapping_providers_configuration_sample)
|
|
||||||
|
|
||||||
Overview
|
Overview
|
||||||
--------
|
--------
|
||||||
@ -61,6 +54,17 @@ For HDFS, the mapping of users to groups is performed on the NameNode. Thus, the
|
|||||||
|
|
||||||
Note that HDFS stores the user and group of a file or directory as strings; there is no conversion from user and group identity numbers as is conventional in Unix.
|
Note that HDFS stores the user and group of a file or directory as strings; there is no conversion from user and group identity numbers as is conventional in Unix.
|
||||||
|
|
||||||
|
Static Mapping
|
||||||
|
--------
|
||||||
|
It is possible to statically map users to groups by defining the mapping in `hadoop.user.group.static.mapping.overrides` in the format `user1=group1,group2;user2=;user3=group2`.
|
||||||
|
This property overrides any group mapping service provider. If a user's groups are defined in it, the groups are returned without more lookups; otherwise, the service provider defined in `hadoop.security.group.mapping` is used to look up the groups. By default, `dr.who=;` is defined, so the fake user dr.who will not have any groups.
|
||||||
|
|
||||||
|
Caching/Negative caching
|
||||||
|
--------
|
||||||
|
Since the group mapping resolution relies on external mechanisms, the NameNode performance may be impacted. To reduce the impact due to repeated lookups, Hadoop caches the groups returned by the service provider. The cache invalidate is configurable via `hadoop.security.groups.cache.secs`, and the default is 300 seconds.
|
||||||
|
|
||||||
|
To avoid spamming NameNode with unknown users, Hadoop employs negative caching so that if the result of the lookup is empty, return an empty group directly instead of performing more group mapping queries,
|
||||||
|
The cache invalidation is configurable via `hadoop.security.groups.negative-cache.secs`. The default is 30 seconds, so if group mapping service providers returns no group for a user, no lookup will be performed for the same user within 30 seconds.
|
||||||
|
|
||||||
LDAP Groups Mapping
|
LDAP Groups Mapping
|
||||||
--------
|
--------
|
||||||
@ -85,9 +89,9 @@ in order to be considered a member.
|
|||||||
The default configuration supports LDAP group name resolution with an Active Directory server.
|
The default configuration supports LDAP group name resolution with an Active Directory server.
|
||||||
|
|
||||||
### POSIX Groups ###
|
### POSIX Groups ###
|
||||||
If the LDAP server supports POSIX group semantics, Hadoop can perform LDAP group resolution queries to the server by setting both
|
If the LDAP server supports POSIX group semantics (RFC-2307), Hadoop can perform LDAP group resolution queries to the server by setting both
|
||||||
`hadoop.security.group.mapping.ldap.search.filter.user` to `posixAccount` and
|
`hadoop.security.group.mapping.ldap.search.filter.user` to `(&(objectClass=posixAccount)(uid={0}))` and
|
||||||
`hadoop.security.group.mapping.ldap.search.filter.group` to `posixGroup`.
|
`hadoop.security.group.mapping.ldap.search.filter.group` to `(objectClass=posixGroup)`.
|
||||||
|
|
||||||
### SSL ###
|
### SSL ###
|
||||||
To secure the connection, the implementation supports LDAP over SSL (LDAPS). SSL is enable by setting `hadoop.security.group.mapping.ldap.ssl` to `true`.
|
To secure the connection, the implementation supports LDAP over SSL (LDAPS). SSL is enable by setting `hadoop.security.group.mapping.ldap.ssl` to `true`.
|
||||||
|
@ -22,11 +22,11 @@ Introduction
|
|||||||
|
|
||||||
This document describes how to configure Hadoop HTTP web-consoles to require user authentication.
|
This document describes how to configure Hadoop HTTP web-consoles to require user authentication.
|
||||||
|
|
||||||
By default Hadoop HTTP web-consoles (JobTracker, NameNode, TaskTrackers and DataNodes) allow access without any form of authentication.
|
By default Hadoop HTTP web-consoles (ResourceManager, NameNode, NodeManagers and DataNodes) allow access without any form of authentication.
|
||||||
|
|
||||||
Hadoop HTTP web-consoles can be configured to require Kerberos authentication using HTTP SPNEGO protocol (supported by browsers like Firefox and Internet Explorer).
|
Hadoop HTTP web-consoles can be configured to require Kerberos authentication using HTTP SPNEGO protocol (supported by browsers like Firefox and Internet Explorer).
|
||||||
|
|
||||||
In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's Pseudo/Simple authentication. If this option is enabled, the user name must be specified in the first browser interaction using the user.name query string parameter. e.g. `http://localhost:50030/jobtracker.jsp?user.name=babu`.
|
In addition, Hadoop HTTP web-consoles support the equivalent of Hadoop's Pseudo/Simple authentication. If this option is enabled, the user name must be specified in the first browser interaction using the user.name query string parameter. e.g. `http://localhost:8088/cluster?user.name=babu`.
|
||||||
|
|
||||||
If a custom authentication mechanism is required for the HTTP web-consoles, it is possible to implement a plugin to support the alternate authentication mechanism (refer to Hadoop hadoop-auth for details on writing an `AuthenticatorHandler`).
|
If a custom authentication mechanism is required for the HTTP web-consoles, it is possible to implement a plugin to support the alternate authentication mechanism (refer to Hadoop hadoop-auth for details on writing an `AuthenticatorHandler`).
|
||||||
|
|
||||||
@ -43,7 +43,7 @@ The following properties should be in the `core-site.xml` of all the nodes in th
|
|||||||
| `hadoop.http.authentication.type` | `simple` | Defines authentication used for the HTTP web-consoles. The supported values are: `simple` \| `kerberos` \| `#AUTHENTICATION_HANDLER_CLASSNAME#`. |
|
| `hadoop.http.authentication.type` | `simple` | Defines authentication used for the HTTP web-consoles. The supported values are: `simple` \| `kerberos` \| `#AUTHENTICATION_HANDLER_CLASSNAME#`. |
|
||||||
| `hadoop.http.authentication.token.validity` | `36000` | Indicates how long (in seconds) an authentication token is valid before it has to be renewed. |
|
| `hadoop.http.authentication.token.validity` | `36000` | Indicates how long (in seconds) an authentication token is valid before it has to be renewed. |
|
||||||
| `hadoop.http.authentication.token.max-inactive-interval` | `-1` (disabled) | Specifies the time, in seconds, between client requests the server will invalidate the token. |
|
| `hadoop.http.authentication.token.max-inactive-interval` | `-1` (disabled) | Specifies the time, in seconds, between client requests the server will invalidate the token. |
|
||||||
| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, JobTracker, NameNode, DataNode and TastTracker. This file should be readable only by the Unix user running the daemons. |
|
| `hadoop.http.authentication.signature.secret.file` | `$user.home/hadoop-http-auth-signature-secret` | The signature secret file for signing the authentication tokens. The same secret should be used for all nodes in the cluster, ResourceManager, NameNode, DataNode and NodeManager. This file should be readable only by the Unix user running the daemons. |
|
||||||
| `hadoop.http.authentication.cookie.domain` | | The domain to use for the HTTP cookie that stores the authentication token. For authentication to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie. |
|
| `hadoop.http.authentication.cookie.domain` | | The domain to use for the HTTP cookie that stores the authentication token. For authentication to work correctly across all nodes in the cluster the domain must be correctly set. There is no default value, the HTTP cookie will not have a domain working only with the hostname issuing the HTTP cookie. |
|
||||||
| `hadoop.http.authentication.cookie.persistent` | `false` (session cookie) | Specifies the persistence of the HTTP cookie. If the value is true, the cookie is a persistent one. Otherwise, it is a session cookie. *IMPORTANT*: when using IP addresses, browsers ignore cookies with domain settings. For this setting to work properly all nodes in the cluster must be configured to generate URLs with `hostname.domain` names on it. |
|
| `hadoop.http.authentication.cookie.persistent` | `false` (session cookie) | Specifies the persistence of the HTTP cookie. If the value is true, the cookie is a persistent one. Otherwise, it is a session cookie. *IMPORTANT*: when using IP addresses, browsers ignore cookies with domain settings. For this setting to work properly all nodes in the cluster must be configured to generate URLs with `hostname.domain` names on it. |
|
||||||
| `hadoop.http.authentication.simple.anonymous.allowed` | `true` | Indicates whether anonymous requests are allowed when using 'simple' authentication. |
|
| `hadoop.http.authentication.simple.anonymous.allowed` | `true` | Indicates whether anonymous requests are allowed when using 'simple' authentication. |
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.LinkedBlockingQueue;
|
import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
@ -158,17 +157,12 @@ public void testgetAtMostPolicyALL() throws Exception {
|
|||||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
@Override
|
@Override
|
||||||
public Boolean get() {
|
public Boolean get() {
|
||||||
try {
|
|
||||||
int size = vq.getSize("k1");
|
int size = vq.getSize("k1");
|
||||||
if (size != 10) {
|
if (size != 10) {
|
||||||
LOG.info("Current ValueQueue size is " + size);
|
LOG.info("Current ValueQueue size is " + size);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
} catch (ExecutionException e) {
|
|
||||||
LOG.error("Exception when getSize.", e);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}, 100, 3000);
|
}, 100, 3000);
|
||||||
Assert.assertEquals("Failed in async call.", 10, filler.getTop().num);
|
Assert.assertEquals("Failed in async call.", 10, filler.getTop().num);
|
||||||
|
@ -29,6 +29,22 @@
|
|||||||
import org.apache.hadoop.security.token.SecretManager;
|
import org.apache.hadoop.security.token.SecretManager;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.TestProtos;
|
||||||
|
import org.apache.hadoop.ipc.protobuf.TestRpcServiceProtos;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.KerberosInfo;
|
||||||
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
import org.apache.hadoop.security.token.SecretManager;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
|
import org.apache.hadoop.security.token.TokenSelector;
|
||||||
|
import org.junit.Assert;
|
||||||
|
|
||||||
|
import java.io.DataInput;
|
||||||
|
import java.io.DataOutput;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
import java.lang.management.ThreadInfo;
|
import java.lang.management.ThreadInfo;
|
||||||
@ -37,6 +53,8 @@
|
|||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
|
||||||
@ -149,6 +167,89 @@ protected static int countThreads(String search) {
|
|||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static class TestTokenIdentifier extends TokenIdentifier {
|
||||||
|
private Text tokenid;
|
||||||
|
private Text realUser;
|
||||||
|
final static Text KIND_NAME = new Text("test.token");
|
||||||
|
|
||||||
|
public TestTokenIdentifier() {
|
||||||
|
this(new Text(), new Text());
|
||||||
|
}
|
||||||
|
public TestTokenIdentifier(Text tokenid) {
|
||||||
|
this(tokenid, new Text());
|
||||||
|
}
|
||||||
|
public TestTokenIdentifier(Text tokenid, Text realUser) {
|
||||||
|
this.tokenid = tokenid == null ? new Text() : tokenid;
|
||||||
|
this.realUser = realUser == null ? new Text() : realUser;
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public Text getKind() {
|
||||||
|
return KIND_NAME;
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public UserGroupInformation getUser() {
|
||||||
|
if (realUser.toString().isEmpty()) {
|
||||||
|
return UserGroupInformation.createRemoteUser(tokenid.toString());
|
||||||
|
} else {
|
||||||
|
UserGroupInformation realUgi = UserGroupInformation
|
||||||
|
.createRemoteUser(realUser.toString());
|
||||||
|
return UserGroupInformation
|
||||||
|
.createProxyUser(tokenid.toString(), realUgi);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void readFields(DataInput in) throws IOException {
|
||||||
|
tokenid.readFields(in);
|
||||||
|
realUser.readFields(in);
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public void write(DataOutput out) throws IOException {
|
||||||
|
tokenid.write(out);
|
||||||
|
realUser.write(out);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class TestTokenSecretManager extends
|
||||||
|
SecretManager<TestTokenIdentifier> {
|
||||||
|
@Override
|
||||||
|
public byte[] createPassword(TestTokenIdentifier id) {
|
||||||
|
return id.getBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public byte[] retrievePassword(TestTokenIdentifier id)
|
||||||
|
throws InvalidToken {
|
||||||
|
return id.getBytes();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TestTokenIdentifier createIdentifier() {
|
||||||
|
return new TestTokenIdentifier();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class TestTokenSelector implements
|
||||||
|
TokenSelector<TestTokenIdentifier> {
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
@Override
|
||||||
|
public Token<TestTokenIdentifier> selectToken(Text service,
|
||||||
|
Collection<Token<? extends TokenIdentifier>> tokens) {
|
||||||
|
if (service == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
for (Token<? extends TokenIdentifier> token : tokens) {
|
||||||
|
if (TestTokenIdentifier.KIND_NAME.equals(token.getKind())
|
||||||
|
&& service.equals(token.getService())) {
|
||||||
|
return (Token<TestTokenIdentifier>) token;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@KerberosInfo(serverPrincipal = SERVER_PRINCIPAL_KEY)
|
||||||
|
@TokenInfo(TestTokenSelector.class)
|
||||||
@ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRpcBase$TestRpcService",
|
@ProtocolInfo(protocolName = "org.apache.hadoop.ipc.TestRpcBase$TestRpcService",
|
||||||
protocolVersion = 1)
|
protocolVersion = 1)
|
||||||
public interface TestRpcService
|
public interface TestRpcService
|
||||||
@ -267,12 +368,80 @@ public TestProtos.EmptyResponseProto sleep(
|
|||||||
} catch (InterruptedException ignore) {}
|
} catch (InterruptedException ignore) {}
|
||||||
return TestProtos.EmptyResponseProto.newBuilder().build();
|
return TestProtos.EmptyResponseProto.newBuilder().build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TestProtos.AuthMethodResponseProto getAuthMethod(
|
||||||
|
RpcController controller, TestProtos.EmptyRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
AuthMethod authMethod = null;
|
||||||
|
try {
|
||||||
|
authMethod = UserGroupInformation.getCurrentUser()
|
||||||
|
.getAuthenticationMethod().getAuthMethod();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TestProtos.AuthMethodResponseProto.newBuilder()
|
||||||
|
.setCode(authMethod.code)
|
||||||
|
.setMechanismName(authMethod.getMechanismName())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TestProtos.AuthUserResponseProto getAuthUser(
|
||||||
|
RpcController controller, TestProtos.EmptyRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
UserGroupInformation authUser = null;
|
||||||
|
try {
|
||||||
|
authUser = UserGroupInformation.getCurrentUser();
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return TestProtos.AuthUserResponseProto.newBuilder()
|
||||||
|
.setAuthUser(authUser.getUserName())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TestProtos.EchoResponseProto echoPostponed(
|
||||||
|
RpcController controller, TestProtos.EchoRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
Server.Call call = Server.getCurCall().get();
|
||||||
|
call.postponeResponse();
|
||||||
|
postponedCalls.add(call);
|
||||||
|
|
||||||
|
return TestProtos.EchoResponseProto.newBuilder().setMessage(
|
||||||
|
request.getMessage())
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TestProtos.EmptyResponseProto sendPostponed(
|
||||||
|
RpcController controller, TestProtos.EmptyRequestProto request)
|
||||||
|
throws ServiceException {
|
||||||
|
Collections.shuffle(postponedCalls);
|
||||||
|
try {
|
||||||
|
for (Server.Call call : postponedCalls) {
|
||||||
|
call.sendResponse();
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new ServiceException(e);
|
||||||
|
}
|
||||||
|
postponedCalls.clear();
|
||||||
|
|
||||||
|
return TestProtos.EmptyResponseProto.newBuilder().build();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
|
protected static TestProtos.EmptyRequestProto newEmptyRequest() {
|
||||||
return TestProtos.EmptyRequestProto.newBuilder().build();
|
return TestProtos.EmptyRequestProto.newBuilder().build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected static TestProtos.EmptyResponseProto newEmptyResponse() {
|
||||||
|
return TestProtos.EmptyResponseProto.newBuilder().build();
|
||||||
|
}
|
||||||
|
|
||||||
protected static TestProtos.EchoRequestProto newEchoRequest(String msg) {
|
protected static TestProtos.EchoRequestProto newEchoRequest(String msg) {
|
||||||
return TestProtos.EchoRequestProto.newBuilder().setMessage(msg).build();
|
return TestProtos.EchoRequestProto.newBuilder().setMessage(msg).build();
|
||||||
}
|
}
|
||||||
@ -290,6 +459,27 @@ protected static TestProtos.SlowPingRequestProto newSlowPingRequest(
|
|||||||
protected static TestProtos.SleepRequestProto newSleepRequest(
|
protected static TestProtos.SleepRequestProto newSleepRequest(
|
||||||
int milliSeconds) {
|
int milliSeconds) {
|
||||||
return TestProtos.SleepRequestProto.newBuilder()
|
return TestProtos.SleepRequestProto.newBuilder()
|
||||||
.setMilliSeconds(milliSeconds).build();
|
.setMilliSeconds(milliSeconds).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static TestProtos.EchoResponseProto newEchoResponse(String msg) {
|
||||||
|
return TestProtos.EchoResponseProto.newBuilder().setMessage(msg).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static AuthMethod convert(
|
||||||
|
TestProtos.AuthMethodResponseProto authMethodResponse) {
|
||||||
|
String mechanism = authMethodResponse.getMechanismName();
|
||||||
|
if (mechanism.equals(AuthMethod.SIMPLE.getMechanismName())) {
|
||||||
|
return AuthMethod.SIMPLE;
|
||||||
|
} else if (mechanism.equals(AuthMethod.KERBEROS.getMechanismName())) {
|
||||||
|
return AuthMethod.KERBEROS;
|
||||||
|
} else if (mechanism.equals(AuthMethod.TOKEN.getMechanismName())) {
|
||||||
|
return AuthMethod.TOKEN;
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static String convert(TestProtos.AuthUserResponseProto response) {
|
||||||
|
return response.getAuthUser();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,53 +18,7 @@
|
|||||||
|
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
|
import com.google.protobuf.ServiceException;
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
|
||||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.KERBEROS;
|
|
||||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.SIMPLE;
|
|
||||||
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.TOKEN;
|
|
||||||
import static org.junit.Assert.assertEquals;
|
|
||||||
import static org.junit.Assert.assertFalse;
|
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertNotSame;
|
|
||||||
import static org.junit.Assert.assertNull;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
|
|
||||||
import java.io.DataInput;
|
|
||||||
import java.io.DataOutput;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.lang.annotation.Annotation;
|
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.security.PrivilegedExceptionAction;
|
|
||||||
import java.security.Security;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.Callable;
|
|
||||||
import java.util.concurrent.ExecutorService;
|
|
||||||
import java.util.concurrent.Executors;
|
|
||||||
import java.util.concurrent.Future;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
import java.util.concurrent.TimeoutException;
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
import java.util.regex.Pattern;
|
|
||||||
|
|
||||||
import javax.security.auth.callback.Callback;
|
|
||||||
import javax.security.auth.callback.CallbackHandler;
|
|
||||||
import javax.security.auth.callback.NameCallback;
|
|
||||||
import javax.security.auth.callback.PasswordCallback;
|
|
||||||
import javax.security.auth.callback.UnsupportedCallbackException;
|
|
||||||
import javax.security.sasl.AuthorizeCallback;
|
|
||||||
import javax.security.sasl.Sasl;
|
|
||||||
import javax.security.sasl.SaslClient;
|
|
||||||
import javax.security.sasl.SaslException;
|
|
||||||
import javax.security.sasl.SaslServer;
|
|
||||||
|
|
||||||
import org.apache.commons.lang.StringUtils;
|
import org.apache.commons.lang.StringUtils;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
@ -74,27 +28,13 @@
|
|||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.ipc.Client.ConnectionId;
|
import org.apache.hadoop.ipc.Client.ConnectionId;
|
||||||
import org.apache.hadoop.ipc.Server.Call;
|
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.KerberosInfo;
|
import org.apache.hadoop.security.*;
|
||||||
import org.apache.hadoop.security.SaslInputStream;
|
|
||||||
import org.apache.hadoop.security.SaslPlainServer;
|
|
||||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
|
||||||
import org.apache.hadoop.security.SaslRpcClient;
|
|
||||||
import org.apache.hadoop.security.SaslRpcServer;
|
|
||||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
||||||
import org.apache.hadoop.security.SecurityInfo;
|
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
|
||||||
import org.apache.hadoop.security.TestUserGroupInformation;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
import org.apache.hadoop.security.token.SecretManager;
|
import org.apache.hadoop.security.token.*;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.security.token.Token;
|
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
|
||||||
import org.apache.hadoop.security.token.TokenInfo;
|
|
||||||
import org.apache.hadoop.security.token.TokenSelector;
|
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
@ -104,9 +44,27 @@
|
|||||||
import org.junit.runners.Parameterized;
|
import org.junit.runners.Parameterized;
|
||||||
import org.junit.runners.Parameterized.Parameters;
|
import org.junit.runners.Parameterized.Parameters;
|
||||||
|
|
||||||
|
import javax.security.auth.callback.*;
|
||||||
|
import javax.security.sasl.*;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.lang.annotation.Annotation;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.security.Security;
|
||||||
|
import java.util.*;
|
||||||
|
import java.util.concurrent.*;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||||
|
import static org.apache.hadoop.security.SaslRpcServer.AuthMethod.*;
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
/** Unit tests for using Sasl over RPC. */
|
/** Unit tests for using Sasl over RPC. */
|
||||||
@RunWith(Parameterized.class)
|
@RunWith(Parameterized.class)
|
||||||
public class TestSaslRPC {
|
public class TestSaslRPC extends TestRpcBase {
|
||||||
@Parameters
|
@Parameters
|
||||||
public static Collection<Object[]> data() {
|
public static Collection<Object[]> data() {
|
||||||
Collection<Object[]> params = new ArrayList<Object[]>();
|
Collection<Object[]> params = new ArrayList<Object[]>();
|
||||||
@ -135,18 +93,14 @@ public TestSaslRPC(QualityOfProtection[] qop,
|
|||||||
this.expectedQop = expectedQop;
|
this.expectedQop = expectedQop;
|
||||||
this.saslPropertiesResolver = saslPropertiesResolver;
|
this.saslPropertiesResolver = saslPropertiesResolver;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String ADDRESS = "0.0.0.0";
|
|
||||||
|
|
||||||
public static final Log LOG =
|
public static final Log LOG =
|
||||||
LogFactory.getLog(TestSaslRPC.class);
|
LogFactory.getLog(TestSaslRPC.class);
|
||||||
|
|
||||||
static final String ERROR_MESSAGE = "Token is invalid";
|
static final String ERROR_MESSAGE = "Token is invalid";
|
||||||
static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
|
|
||||||
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
|
static final String SERVER_KEYTAB_KEY = "test.ipc.server.keytab";
|
||||||
static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
|
static final String SERVER_PRINCIPAL_1 = "p1/foo@BAR";
|
||||||
static final String SERVER_PRINCIPAL_2 = "p2/foo@BAR";
|
|
||||||
private static Configuration conf;
|
|
||||||
// If this is set to true AND the auth-method is not simple, secretManager
|
// If this is set to true AND the auth-method is not simple, secretManager
|
||||||
// will be enabled.
|
// will be enabled.
|
||||||
static Boolean enableSecretManager = null;
|
static Boolean enableSecretManager = null;
|
||||||
@ -155,7 +109,7 @@ public TestSaslRPC(QualityOfProtection[] qop,
|
|||||||
static Boolean forceSecretManager = null;
|
static Boolean forceSecretManager = null;
|
||||||
static Boolean clientFallBackToSimpleAllowed = true;
|
static Boolean clientFallBackToSimpleAllowed = true;
|
||||||
|
|
||||||
static enum UseToken {
|
enum UseToken {
|
||||||
NONE(),
|
NONE(),
|
||||||
VALID(),
|
VALID(),
|
||||||
INVALID(),
|
INVALID(),
|
||||||
@ -174,6 +128,7 @@ public void setup() {
|
|||||||
LOG.info("---------------------------------");
|
LOG.info("---------------------------------");
|
||||||
LOG.info("Testing QOP:"+ getQOPNames(qop));
|
LOG.info("Testing QOP:"+ getQOPNames(qop));
|
||||||
LOG.info("---------------------------------");
|
LOG.info("---------------------------------");
|
||||||
|
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
// the specific tests for kerberos will enable kerberos. forcing it
|
// the specific tests for kerberos will enable kerberos. forcing it
|
||||||
// for all tests will cause tests to fail if the user has a TGT
|
// for all tests will cause tests to fail if the user has a TGT
|
||||||
@ -187,6 +142,9 @@ public void setup() {
|
|||||||
enableSecretManager = null;
|
enableSecretManager = null;
|
||||||
forceSecretManager = null;
|
forceSecretManager = null;
|
||||||
clientFallBackToSimpleAllowed = true;
|
clientFallBackToSimpleAllowed = true;
|
||||||
|
|
||||||
|
// Set RPC engine to protobuf RPC engine
|
||||||
|
RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);
|
||||||
}
|
}
|
||||||
|
|
||||||
static String getQOPNames (QualityOfProtection[] qops){
|
static String getQOPNames (QualityOfProtection[] qops){
|
||||||
@ -210,68 +168,6 @@ static String getQOPNames (QualityOfProtection[] qops){
|
|||||||
((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
|
((Log4JLogger) SecurityUtil.LOG).getLogger().setLevel(Level.ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class TestTokenIdentifier extends TokenIdentifier {
|
|
||||||
private Text tokenid;
|
|
||||||
private Text realUser;
|
|
||||||
final static Text KIND_NAME = new Text("test.token");
|
|
||||||
|
|
||||||
public TestTokenIdentifier() {
|
|
||||||
this(new Text(), new Text());
|
|
||||||
}
|
|
||||||
public TestTokenIdentifier(Text tokenid) {
|
|
||||||
this(tokenid, new Text());
|
|
||||||
}
|
|
||||||
public TestTokenIdentifier(Text tokenid, Text realUser) {
|
|
||||||
this.tokenid = tokenid == null ? new Text() : tokenid;
|
|
||||||
this.realUser = realUser == null ? new Text() : realUser;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public Text getKind() {
|
|
||||||
return KIND_NAME;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public UserGroupInformation getUser() {
|
|
||||||
if (realUser.toString().isEmpty()) {
|
|
||||||
return UserGroupInformation.createRemoteUser(tokenid.toString());
|
|
||||||
} else {
|
|
||||||
UserGroupInformation realUgi = UserGroupInformation
|
|
||||||
.createRemoteUser(realUser.toString());
|
|
||||||
return UserGroupInformation
|
|
||||||
.createProxyUser(tokenid.toString(), realUgi);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void readFields(DataInput in) throws IOException {
|
|
||||||
tokenid.readFields(in);
|
|
||||||
realUser.readFields(in);
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void write(DataOutput out) throws IOException {
|
|
||||||
tokenid.write(out);
|
|
||||||
realUser.write(out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class TestTokenSecretManager extends
|
|
||||||
SecretManager<TestTokenIdentifier> {
|
|
||||||
@Override
|
|
||||||
public byte[] createPassword(TestTokenIdentifier id) {
|
|
||||||
return id.getBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public byte[] retrievePassword(TestTokenIdentifier id)
|
|
||||||
throws InvalidToken {
|
|
||||||
return id.getBytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public TestTokenIdentifier createIdentifier() {
|
|
||||||
return new TestTokenIdentifier();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class BadTokenSecretManager extends TestTokenSecretManager {
|
public static class BadTokenSecretManager extends TestTokenSecretManager {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -281,64 +177,6 @@ public byte[] retrievePassword(TestTokenIdentifier id)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class TestTokenSelector implements
|
|
||||||
TokenSelector<TestTokenIdentifier> {
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
@Override
|
|
||||||
public Token<TestTokenIdentifier> selectToken(Text service,
|
|
||||||
Collection<Token<? extends TokenIdentifier>> tokens) {
|
|
||||||
if (service == null) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
for (Token<? extends TokenIdentifier> token : tokens) {
|
|
||||||
if (TestTokenIdentifier.KIND_NAME.equals(token.getKind())
|
|
||||||
&& service.equals(token.getService())) {
|
|
||||||
return (Token<TestTokenIdentifier>) token;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@KerberosInfo(
|
|
||||||
serverPrincipal = SERVER_PRINCIPAL_KEY)
|
|
||||||
@TokenInfo(TestTokenSelector.class)
|
|
||||||
public interface TestSaslProtocol extends TestRPC.TestProtocol {
|
|
||||||
public AuthMethod getAuthMethod() throws IOException;
|
|
||||||
public String getAuthUser() throws IOException;
|
|
||||||
public String echoPostponed(String value) throws IOException;
|
|
||||||
public void sendPostponed() throws IOException;
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class TestSaslImpl extends TestRPC.TestImpl implements
|
|
||||||
TestSaslProtocol {
|
|
||||||
private List<Call> postponedCalls = new ArrayList<Call>();
|
|
||||||
@Override
|
|
||||||
public AuthMethod getAuthMethod() throws IOException {
|
|
||||||
return UserGroupInformation.getCurrentUser()
|
|
||||||
.getAuthenticationMethod().getAuthMethod();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String getAuthUser() throws IOException {
|
|
||||||
return UserGroupInformation.getCurrentUser().getUserName();
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public String echoPostponed(String value) {
|
|
||||||
Call call = Server.getCurCall().get();
|
|
||||||
call.postponeResponse();
|
|
||||||
postponedCalls.add(call);
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
@Override
|
|
||||||
public void sendPostponed() throws IOException {
|
|
||||||
Collections.shuffle(postponedCalls);
|
|
||||||
for (Call call : postponedCalls) {
|
|
||||||
call.sendResponse();
|
|
||||||
}
|
|
||||||
postponedCalls.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static class CustomSecurityInfo extends SecurityInfo {
|
public static class CustomSecurityInfo extends SecurityInfo {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -363,8 +201,8 @@ public String clientPrincipal() {
|
|||||||
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
|
public TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
|
||||||
return new TokenInfo() {
|
return new TokenInfo() {
|
||||||
@Override
|
@Override
|
||||||
public Class<? extends TokenSelector<? extends
|
public Class<? extends TokenSelector<? extends
|
||||||
TokenIdentifier>> value() {
|
TokenIdentifier>> value() {
|
||||||
return TestTokenSelector.class;
|
return TestTokenSelector.class;
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
@ -378,10 +216,7 @@ public Class<? extends Annotation> annotationType() {
|
|||||||
@Test
|
@Test
|
||||||
public void testDigestRpc() throws Exception {
|
public void testDigestRpc() throws Exception {
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
final Server server = new RPC.Builder(conf)
|
final Server server = setupTestServer(conf, 5, sm);
|
||||||
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
|
|
||||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
|
||||||
.setSecretManager(sm).build();
|
|
||||||
|
|
||||||
doDigestRpc(server, sm);
|
doDigestRpc(server, sm);
|
||||||
}
|
}
|
||||||
@ -391,10 +226,7 @@ public void testDigestRpcWithoutAnnotation() throws Exception {
|
|||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
try {
|
try {
|
||||||
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
|
SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
|
||||||
final Server server = new RPC.Builder(conf)
|
final Server server = setupTestServer(conf, 5, sm);
|
||||||
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
|
|
||||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5)
|
|
||||||
.setVerbose(true).setSecretManager(sm).build();
|
|
||||||
doDigestRpc(server, sm);
|
doDigestRpc(server, sm);
|
||||||
} finally {
|
} finally {
|
||||||
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
|
SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
|
||||||
@ -404,59 +236,47 @@ public void testDigestRpcWithoutAnnotation() throws Exception {
|
|||||||
@Test
|
@Test
|
||||||
public void testErrorMessage() throws Exception {
|
public void testErrorMessage() throws Exception {
|
||||||
BadTokenSecretManager sm = new BadTokenSecretManager();
|
BadTokenSecretManager sm = new BadTokenSecretManager();
|
||||||
final Server server = new RPC.Builder(conf)
|
final Server server = setupTestServer(conf, 5, sm);
|
||||||
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
|
|
||||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
|
||||||
.setSecretManager(sm).build();
|
|
||||||
|
|
||||||
boolean succeeded = false;
|
boolean succeeded = false;
|
||||||
try {
|
try {
|
||||||
doDigestRpc(server, sm);
|
doDigestRpc(server, sm);
|
||||||
} catch (RemoteException e) {
|
} catch (ServiceException e) {
|
||||||
LOG.info("LOGGING MESSAGE: " + e.getLocalizedMessage());
|
assertTrue(e.getCause() instanceof RemoteException);
|
||||||
assertEquals(ERROR_MESSAGE, e.getLocalizedMessage());
|
RemoteException re = (RemoteException) e.getCause();
|
||||||
assertTrue(e.unwrapRemoteException() instanceof InvalidToken);
|
LOG.info("LOGGING MESSAGE: " + re.getLocalizedMessage());
|
||||||
|
assertEquals(ERROR_MESSAGE, re.getLocalizedMessage());
|
||||||
|
assertTrue(re.unwrapRemoteException() instanceof InvalidToken);
|
||||||
succeeded = true;
|
succeeded = true;
|
||||||
}
|
}
|
||||||
assertTrue(succeeded);
|
assertTrue(succeeded);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void doDigestRpc(Server server, TestTokenSecretManager sm
|
private void doDigestRpc(Server server, TestTokenSecretManager sm)
|
||||||
) throws Exception {
|
throws Exception {
|
||||||
server.start();
|
|
||||||
|
|
||||||
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
addr = NetUtils.getConnectAddress(server);
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||||
.getUserName()));
|
.getUserName()));
|
||||||
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
|
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId, sm);
|
||||||
sm);
|
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
current.addToken(token);
|
current.addToken(token);
|
||||||
|
|
||||||
TestSaslProtocol proxy = null;
|
TestRpcService proxy = null;
|
||||||
try {
|
try {
|
||||||
proxy = RPC.getProxy(TestSaslProtocol.class,
|
proxy = getClient(addr, conf);
|
||||||
TestSaslProtocol.versionID, addr, conf);
|
AuthMethod authMethod = convert(
|
||||||
AuthMethod authMethod = proxy.getAuthMethod();
|
proxy.getAuthMethod(null, newEmptyRequest()));
|
||||||
assertEquals(TOKEN, authMethod);
|
assertEquals(TOKEN, authMethod);
|
||||||
//QOP must be auth
|
//QOP must be auth
|
||||||
assertEquals(expectedQop.saslQop,
|
assertEquals(expectedQop.saslQop,
|
||||||
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
||||||
proxy.ping();
|
proxy.ping(null, newEmptyRequest());
|
||||||
} finally {
|
} finally {
|
||||||
server.stop();
|
stop(server, proxy);
|
||||||
if (proxy != null) {
|
|
||||||
RPC.stopProxy(proxy);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static ConnectionId getConnectionId(Configuration conf) throws IOException {
|
|
||||||
return ConnectionId.getConnectionId(new InetSocketAddress(0),
|
|
||||||
TestSaslProtocol.class, null, 0, null, conf);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPingInterval() throws Exception {
|
public void testPingInterval() throws Exception {
|
||||||
Configuration newConf = new Configuration(conf);
|
Configuration newConf = new Configuration(conf);
|
||||||
@ -466,29 +286,26 @@ public void testPingInterval() throws Exception {
|
|||||||
|
|
||||||
// set doPing to true
|
// set doPing to true
|
||||||
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
|
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
|
||||||
ConnectionId remoteId = getConnectionId(newConf);
|
ConnectionId remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
|
||||||
|
TestRpcService.class, null, 0, null, newConf);
|
||||||
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
|
assertEquals(CommonConfigurationKeys.IPC_PING_INTERVAL_DEFAULT,
|
||||||
remoteId.getPingInterval());
|
remoteId.getPingInterval());
|
||||||
// set doPing to false
|
// set doPing to false
|
||||||
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
|
newConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
|
||||||
remoteId = getConnectionId(newConf);
|
remoteId = ConnectionId.getConnectionId(new InetSocketAddress(0),
|
||||||
|
TestRpcService.class, null, 0, null, newConf);
|
||||||
assertEquals(0, remoteId.getPingInterval());
|
assertEquals(0, remoteId.getPingInterval());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testPerConnectionConf() throws Exception {
|
public void testPerConnectionConf() throws Exception {
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
final Server server = new RPC.Builder(conf)
|
final Server server = setupTestServer(conf, 5, sm);
|
||||||
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
|
|
||||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
|
||||||
.setSecretManager(sm).build();
|
|
||||||
server.start();
|
|
||||||
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
|
||||||
.getUserName()));
|
.getUserName()));
|
||||||
Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
|
Token<TestTokenIdentifier> token = new Token<>(tokenId, sm);
|
||||||
sm);
|
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
current.addToken(token);
|
current.addToken(token);
|
||||||
|
|
||||||
@ -497,28 +314,25 @@ public void testPerConnectionConf() throws Exception {
|
|||||||
HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
|
HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, "");
|
||||||
|
|
||||||
Client client = null;
|
Client client = null;
|
||||||
TestSaslProtocol proxy1 = null;
|
TestRpcService proxy1 = null;
|
||||||
TestSaslProtocol proxy2 = null;
|
TestRpcService proxy2 = null;
|
||||||
TestSaslProtocol proxy3 = null;
|
TestRpcService proxy3 = null;
|
||||||
int timeouts[] = {111222, 3333333};
|
int timeouts[] = {111222, 3333333};
|
||||||
try {
|
try {
|
||||||
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
|
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[0]);
|
||||||
proxy1 = RPC.getProxy(TestSaslProtocol.class,
|
proxy1 = getClient(addr, newConf);
|
||||||
TestSaslProtocol.versionID, addr, newConf);
|
proxy1.getAuthMethod(null, newEmptyRequest());
|
||||||
proxy1.getAuthMethod();
|
client = ProtobufRpcEngine.getClient(newConf);
|
||||||
client = WritableRpcEngine.getClient(newConf);
|
|
||||||
Set<ConnectionId> conns = client.getConnectionIds();
|
Set<ConnectionId> conns = client.getConnectionIds();
|
||||||
assertEquals("number of connections in cache is wrong", 1, conns.size());
|
assertEquals("number of connections in cache is wrong", 1, conns.size());
|
||||||
// same conf, connection should be re-used
|
// same conf, connection should be re-used
|
||||||
proxy2 = RPC.getProxy(TestSaslProtocol.class,
|
proxy2 = getClient(addr, newConf);
|
||||||
TestSaslProtocol.versionID, addr, newConf);
|
proxy2.getAuthMethod(null, newEmptyRequest());
|
||||||
proxy2.getAuthMethod();
|
|
||||||
assertEquals("number of connections in cache is wrong", 1, conns.size());
|
assertEquals("number of connections in cache is wrong", 1, conns.size());
|
||||||
// different conf, new connection should be set up
|
// different conf, new connection should be set up
|
||||||
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]);
|
newConf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, timeouts[1]);
|
||||||
proxy3 = RPC.getProxy(TestSaslProtocol.class,
|
proxy3 = getClient(addr, newConf);
|
||||||
TestSaslProtocol.versionID, addr, newConf);
|
proxy3.getAuthMethod(null, newEmptyRequest());
|
||||||
proxy3.getAuthMethod();
|
|
||||||
assertEquals("number of connections in cache is wrong", 2, conns.size());
|
assertEquals("number of connections in cache is wrong", 2, conns.size());
|
||||||
// now verify the proxies have the correct connection ids and timeouts
|
// now verify the proxies have the correct connection ids and timeouts
|
||||||
ConnectionId[] connsArray = {
|
ConnectionId[] connsArray = {
|
||||||
@ -551,24 +365,14 @@ static void testKerberosRpc(String principal, String keytab) throws Exception {
|
|||||||
UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
UserGroupInformation current = UserGroupInformation.getCurrentUser();
|
||||||
System.out.println("UGI: " + current);
|
System.out.println("UGI: " + current);
|
||||||
|
|
||||||
Server server = new RPC.Builder(newConf)
|
Server server = setupTestServer(newConf, 5);
|
||||||
.setProtocol(TestSaslProtocol.class).setInstance(new TestSaslImpl())
|
TestRpcService proxy = null;
|
||||||
.setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
|
|
||||||
.build();
|
|
||||||
TestSaslProtocol proxy = null;
|
|
||||||
|
|
||||||
server.start();
|
|
||||||
|
|
||||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
|
||||||
try {
|
try {
|
||||||
proxy = RPC.getProxy(TestSaslProtocol.class,
|
proxy = getClient(addr, newConf);
|
||||||
TestSaslProtocol.versionID, addr, newConf);
|
proxy.ping(null, newEmptyRequest());
|
||||||
proxy.ping();
|
|
||||||
} finally {
|
} finally {
|
||||||
server.stop();
|
stop(server, proxy);
|
||||||
if (proxy != null) {
|
|
||||||
RPC.stopProxy(proxy);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
System.out.println("Test is successful.");
|
System.out.println("Test is successful.");
|
||||||
}
|
}
|
||||||
@ -887,14 +691,7 @@ public void testSaslResponseOrdering() throws Exception {
|
|||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
Server server = new RPC.Builder(conf)
|
Server server = setupTestServer(conf, 1, sm);
|
||||||
.setProtocol(TestSaslProtocol.class)
|
|
||||||
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
|
|
||||||
.setNumHandlers(1) // prevents ordering issues when unblocking calls.
|
|
||||||
.setVerbose(true)
|
|
||||||
.setSecretManager(sm)
|
|
||||||
.build();
|
|
||||||
server.start();
|
|
||||||
try {
|
try {
|
||||||
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||||
final UserGroupInformation clientUgi =
|
final UserGroupInformation clientUgi =
|
||||||
@ -903,14 +700,13 @@ public void testSaslResponseOrdering() throws Exception {
|
|||||||
|
|
||||||
TestTokenIdentifier tokenId = new TestTokenIdentifier(
|
TestTokenIdentifier tokenId = new TestTokenIdentifier(
|
||||||
new Text(clientUgi.getUserName()));
|
new Text(clientUgi.getUserName()));
|
||||||
Token<?> token = new Token<TestTokenIdentifier>(tokenId, sm);
|
Token<?> token = new Token<>(tokenId, sm);
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
clientUgi.addToken(token);
|
clientUgi.addToken(token);
|
||||||
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
|
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||||
@Override
|
@Override
|
||||||
public Void run() throws Exception {
|
public Void run() throws Exception {
|
||||||
final TestSaslProtocol proxy = RPC.getProxy(TestSaslProtocol.class,
|
final TestRpcService proxy = getClient(addr, conf);
|
||||||
TestSaslProtocol.versionID, addr, conf);
|
|
||||||
final ExecutorService executor = Executors.newCachedThreadPool();
|
final ExecutorService executor = Executors.newCachedThreadPool();
|
||||||
final AtomicInteger count = new AtomicInteger();
|
final AtomicInteger count = new AtomicInteger();
|
||||||
try {
|
try {
|
||||||
@ -922,7 +718,8 @@ public Void run() throws Exception {
|
|||||||
@Override
|
@Override
|
||||||
public Void call() throws Exception {
|
public Void call() throws Exception {
|
||||||
String expect = "future"+count.getAndIncrement();
|
String expect = "future"+count.getAndIncrement();
|
||||||
String answer = proxy.echoPostponed(expect);
|
String answer = convert(proxy.echoPostponed(null,
|
||||||
|
newEchoRequest(expect)));
|
||||||
assertEquals(expect, answer);
|
assertEquals(expect, answer);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -939,7 +736,7 @@ public Void call() throws Exception {
|
|||||||
// only 1 handler ensures that the prior calls are already
|
// only 1 handler ensures that the prior calls are already
|
||||||
// postponed. 1 handler also ensures that this call will
|
// postponed. 1 handler also ensures that this call will
|
||||||
// timeout if the postponing doesn't work (ie. free up handler)
|
// timeout if the postponing doesn't work (ie. free up handler)
|
||||||
proxy.sendPostponed();
|
proxy.sendPostponed(null, newEmptyRequest());
|
||||||
for (int i=0; i < futures.length; i++) {
|
for (int i=0; i < futures.length; i++) {
|
||||||
LOG.info("waiting for future"+i);
|
LOG.info("waiting for future"+i);
|
||||||
futures[i].get();
|
futures[i].get();
|
||||||
@ -1009,14 +806,7 @@ private String internalGetAuthMethod(
|
|||||||
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
|
Server server = serverUgi.doAs(new PrivilegedExceptionAction<Server>() {
|
||||||
@Override
|
@Override
|
||||||
public Server run() throws IOException {
|
public Server run() throws IOException {
|
||||||
Server server = new RPC.Builder(serverConf)
|
return setupTestServer(serverConf, 5, serverSm);
|
||||||
.setProtocol(TestSaslProtocol.class)
|
|
||||||
.setInstance(new TestSaslImpl()).setBindAddress(ADDRESS).setPort(0)
|
|
||||||
.setNumHandlers(5).setVerbose(true)
|
|
||||||
.setSecretManager(serverSm)
|
|
||||||
.build();
|
|
||||||
server.start();
|
|
||||||
return server;
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -1038,17 +828,17 @@ public Server run() throws IOException {
|
|||||||
Token<TestTokenIdentifier> token = null;
|
Token<TestTokenIdentifier> token = null;
|
||||||
switch (tokenType) {
|
switch (tokenType) {
|
||||||
case VALID:
|
case VALID:
|
||||||
token = new Token<TestTokenIdentifier>(tokenId, sm);
|
token = new Token<>(tokenId, sm);
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
break;
|
break;
|
||||||
case INVALID:
|
case INVALID:
|
||||||
token = new Token<TestTokenIdentifier>(
|
token = new Token<>(
|
||||||
tokenId.getBytes(), "bad-password!".getBytes(),
|
tokenId.getBytes(), "bad-password!".getBytes(),
|
||||||
tokenId.getKind(), null);
|
tokenId.getKind(), null);
|
||||||
SecurityUtil.setTokenService(token, addr);
|
SecurityUtil.setTokenService(token, addr);
|
||||||
break;
|
break;
|
||||||
case OTHER:
|
case OTHER:
|
||||||
token = new Token<TestTokenIdentifier>();
|
token = new Token<>();
|
||||||
break;
|
break;
|
||||||
case NONE: // won't get here
|
case NONE: // won't get here
|
||||||
}
|
}
|
||||||
@ -1060,19 +850,28 @@ public Server run() throws IOException {
|
|||||||
return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
|
return clientUgi.doAs(new PrivilegedExceptionAction<String>() {
|
||||||
@Override
|
@Override
|
||||||
public String run() throws IOException {
|
public String run() throws IOException {
|
||||||
TestSaslProtocol proxy = null;
|
TestRpcService proxy = null;
|
||||||
try {
|
try {
|
||||||
proxy = RPC.getProxy(TestSaslProtocol.class,
|
proxy = getClient(addr, clientConf);
|
||||||
TestSaslProtocol.versionID, addr, clientConf);
|
|
||||||
|
proxy.ping(null, newEmptyRequest());
|
||||||
proxy.ping();
|
|
||||||
// make sure the other side thinks we are who we said we are!!!
|
// make sure the other side thinks we are who we said we are!!!
|
||||||
assertEquals(clientUgi.getUserName(), proxy.getAuthUser());
|
assertEquals(clientUgi.getUserName(),
|
||||||
AuthMethod authMethod = proxy.getAuthMethod();
|
convert(proxy.getAuthUser(null, newEmptyRequest())));
|
||||||
|
AuthMethod authMethod =
|
||||||
|
convert(proxy.getAuthMethod(null, newEmptyRequest()));
|
||||||
// verify sasl completed with correct QOP
|
// verify sasl completed with correct QOP
|
||||||
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
|
assertEquals((authMethod != SIMPLE) ? expectedQop.saslQop : null,
|
||||||
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
RPC.getConnectionIdForProxy(proxy).getSaslQop());
|
||||||
return authMethod.toString();
|
return authMethod.toString();
|
||||||
|
} catch (ServiceException se) {
|
||||||
|
if (se.getCause() instanceof RemoteException) {
|
||||||
|
throw (RemoteException) se.getCause();
|
||||||
|
} else if (se.getCause() instanceof IOException) {
|
||||||
|
throw (IOException) se.getCause();
|
||||||
|
} else {
|
||||||
|
throw new RuntimeException(se.getCause());
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (proxy != null) {
|
if (proxy != null) {
|
||||||
RPC.stopProxy(proxy);
|
RPC.stopProxy(proxy);
|
||||||
|
@ -41,9 +41,9 @@
|
|||||||
import org.apache.hadoop.security.token.TokenInfo;
|
import org.apache.hadoop.security.token.TokenInfo;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager;
|
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSecretManager;
|
||||||
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier;
|
import org.apache.hadoop.ipc.TestRpcBase.TestTokenIdentifier;
|
||||||
import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSelector;
|
import org.apache.hadoop.ipc.TestRpcBase.TestTokenSelector;
|
||||||
import org.apache.commons.logging.*;
|
import org.apache.commons.logging.*;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
|
|
||||||
|
@ -81,4 +81,13 @@ message ExchangeRequestProto {
|
|||||||
|
|
||||||
message ExchangeResponseProto {
|
message ExchangeResponseProto {
|
||||||
repeated int32 values = 1;
|
repeated int32 values = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthMethodResponseProto {
|
||||||
|
required int32 code = 1;
|
||||||
|
required string mechanismName = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthUserResponseProto {
|
||||||
|
required string authUser = 1;
|
||||||
}
|
}
|
@ -39,6 +39,10 @@ service TestProtobufRpcProto {
|
|||||||
rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
|
rpc testServerGet(EmptyRequestProto) returns (EmptyResponseProto);
|
||||||
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
|
rpc exchange(ExchangeRequestProto) returns (ExchangeResponseProto);
|
||||||
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
|
rpc sleep(SleepRequestProto) returns (EmptyResponseProto);
|
||||||
|
rpc getAuthMethod(EmptyRequestProto) returns (AuthMethodResponseProto);
|
||||||
|
rpc getAuthUser(EmptyRequestProto) returns (AuthUserResponseProto);
|
||||||
|
rpc echoPostponed(EchoRequestProto) returns (EchoResponseProto);
|
||||||
|
rpc sendPostponed(EmptyRequestProto) returns (EmptyResponseProto);
|
||||||
}
|
}
|
||||||
|
|
||||||
service TestProtobufRpc2Proto {
|
service TestProtobufRpc2Proto {
|
||||||
|
@ -11,5 +11,5 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
|
org.apache.hadoop.ipc.TestRpcBase$TestTokenIdentifier
|
||||||
org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
|
org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
|
||||||
|
@ -0,0 +1,47 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
toolsetup () {
|
||||||
|
HADOOP_LIBEXEC_DIR="${TMP}/libexec"
|
||||||
|
mkdir -p "${HADOOP_LIBEXEC_DIR}/tools"
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_classpath_tools (load)" {
|
||||||
|
toolsetup
|
||||||
|
echo "unittest=libexec" > "${HADOOP_LIBEXEC_DIR}/tools/test.sh"
|
||||||
|
hadoop_add_to_classpath_tools test
|
||||||
|
[ -n "${unittest}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@test "hadoop_classpath_tools (not exist)" {
|
||||||
|
toolsetup
|
||||||
|
hadoop_add_to_classpath_tools test
|
||||||
|
[ -z "${unittest}" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_classpath_tools (function)" {
|
||||||
|
toolsetup
|
||||||
|
{
|
||||||
|
echo "function hadoop_classpath_tools_test {"
|
||||||
|
echo " unittest=libexec"
|
||||||
|
echo " }"
|
||||||
|
} > "${HADOOP_LIBEXEC_DIR}/tools/test.sh"
|
||||||
|
hadoop_add_to_classpath_tools test
|
||||||
|
declare -f
|
||||||
|
[ -n "${unittest}" ]
|
||||||
|
}
|
@ -1,74 +0,0 @@
|
|||||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
|
||||||
# contributor license agreements. See the NOTICE file distributed with
|
|
||||||
# this work for additional information regarding copyright ownership.
|
|
||||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
|
||||||
# (the "License"); you may not use this file except in compliance with
|
|
||||||
# the License. You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
load hadoop-functions_test_helper
|
|
||||||
|
|
||||||
freetheclasses () {
|
|
||||||
local j
|
|
||||||
|
|
||||||
for j in HADOOP_TOOLS_PATH \
|
|
||||||
CLASSPATH; do
|
|
||||||
unset ${j}
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
createdirs () {
|
|
||||||
local j
|
|
||||||
|
|
||||||
for j in new old foo bar baz; do
|
|
||||||
mkdir -p "${TMP}/${j}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hadoop_add_to_classpath_toolspath (nothing)" {
|
|
||||||
freetheclasses
|
|
||||||
hadoop_add_to_classpath_toolspath
|
|
||||||
[ -z "${CLASSPATH}" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hadoop_add_to_classpath_toolspath (none)" {
|
|
||||||
freetheclasses
|
|
||||||
CLASSPATH=test
|
|
||||||
hadoop_add_to_classpath_toolspath
|
|
||||||
[ "${CLASSPATH}" = "test" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hadoop_add_to_classpath_toolspath (only)" {
|
|
||||||
freetheclasses
|
|
||||||
createdirs
|
|
||||||
HADOOP_TOOLS_PATH="${TMP}/new"
|
|
||||||
hadoop_add_to_classpath_toolspath
|
|
||||||
[ "${CLASSPATH}" = "${TMP}/new" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hadoop_add_to_classpath_toolspath (1+1)" {
|
|
||||||
freetheclasses
|
|
||||||
createdirs
|
|
||||||
CLASSPATH=${TMP}/foo
|
|
||||||
HADOOP_TOOLS_PATH=${TMP}/foo
|
|
||||||
hadoop_add_to_classpath_toolspath
|
|
||||||
echo ">${CLASSPATH}<"
|
|
||||||
[ ${CLASSPATH} = "${TMP}/foo" ]
|
|
||||||
}
|
|
||||||
|
|
||||||
@test "hadoop_add_to_classpath_toolspath (3+2)" {
|
|
||||||
freetheclasses
|
|
||||||
createdirs
|
|
||||||
CLASSPATH=${TMP}/foo:${TMP}/bar:${TMP}/baz
|
|
||||||
HADOOP_TOOLS_PATH=${TMP}/new:${TMP}/old
|
|
||||||
hadoop_add_to_classpath_toolspath
|
|
||||||
echo ">${CLASSPATH}<"
|
|
||||||
[ ${CLASSPATH} = "${TMP}/foo:${TMP}/bar:${TMP}/baz:${TMP}/new:${TMP}/old" ]
|
|
||||||
}
|
|
@ -38,7 +38,8 @@ basicinitsetup () {
|
|||||||
dirvars="HADOOP_COMMON_HOME \
|
dirvars="HADOOP_COMMON_HOME \
|
||||||
HADOOP_MAPRED_HOME \
|
HADOOP_MAPRED_HOME \
|
||||||
HADOOP_HDFS_HOME \
|
HADOOP_HDFS_HOME \
|
||||||
HADOOP_YARN_HOME"
|
HADOOP_YARN_HOME \
|
||||||
|
HADOOP_TOOLS_HOME"
|
||||||
|
|
||||||
for j in ${testvars}; do
|
for j in ${testvars}; do
|
||||||
unset ${j}
|
unset ${j}
|
||||||
|
@ -15,13 +15,13 @@
|
|||||||
|
|
||||||
load hadoop-functions_test_helper
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
@test "hadoop_deprecate_envvar (no libexec)" {
|
@test "hadoop_bootstrap (no libexec)" {
|
||||||
unset HADOOP_LIBEXEC_DIR
|
unset HADOOP_LIBEXEC_DIR
|
||||||
run hadoop_bootstrap
|
run hadoop_bootstrap
|
||||||
[ "${status}" -eq 1 ]
|
[ "${status}" -eq 1 ]
|
||||||
}
|
}
|
||||||
|
|
||||||
@test "hadoop_deprecate_envvar (libexec)" {
|
@test "hadoop_bootstrap (libexec)" {
|
||||||
unset HADOOP_PREFIX
|
unset HADOOP_PREFIX
|
||||||
unset HADOOP_COMMON_DIR
|
unset HADOOP_COMMON_DIR
|
||||||
unset HADOOP_COMMON_LIB_JARS_DIR
|
unset HADOOP_COMMON_LIB_JARS_DIR
|
||||||
@ -31,7 +31,9 @@ load hadoop-functions_test_helper
|
|||||||
unset YARN_LIB_JARS_DIR
|
unset YARN_LIB_JARS_DIR
|
||||||
unset MAPRED_DIR
|
unset MAPRED_DIR
|
||||||
unset MAPRED_LIB_JARS_DIR
|
unset MAPRED_LIB_JARS_DIR
|
||||||
unset TOOL_PATH
|
unset HADOOP_TOOLS_HOME
|
||||||
|
unset HADOOP_TOOLS_DIR
|
||||||
|
unset HADOOP_TOOLS_LIB_JARS_DIR
|
||||||
unset HADOOP_OS_TYPE
|
unset HADOOP_OS_TYPE
|
||||||
|
|
||||||
hadoop_bootstrap
|
hadoop_bootstrap
|
||||||
@ -46,6 +48,9 @@ load hadoop-functions_test_helper
|
|||||||
[ -n ${YARN_LIB_JARS_DIR} ]
|
[ -n ${YARN_LIB_JARS_DIR} ]
|
||||||
[ -n ${MAPRED_DIR} ]
|
[ -n ${MAPRED_DIR} ]
|
||||||
[ -n ${MAPRED_LIB_JARS_DIR} ]
|
[ -n ${MAPRED_LIB_JARS_DIR} ]
|
||||||
[ -n ${TOOL_PATH} ]
|
|
||||||
[ -n ${HADOOP_OS_TYPE} ]
|
[ -n ${HADOOP_OS_TYPE} ]
|
||||||
}
|
[ -n ${HADOOP_TOOLS_PATH} ]
|
||||||
|
[ -n ${HADOOP_TOOLS_HOME} ]
|
||||||
|
[ -n ${HADOOP_TOOLS_DIR} ]
|
||||||
|
[ -n ${HADOOP_TOOLS_LIB_JARS_DIR} ]
|
||||||
|
}
|
||||||
|
@ -0,0 +1,49 @@
|
|||||||
|
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
# contributor license agreements. See the NOTICE file distributed with
|
||||||
|
# this work for additional information regarding copyright ownership.
|
||||||
|
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
# (the "License"); you may not use this file except in compliance with
|
||||||
|
# the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load hadoop-functions_test_helper
|
||||||
|
|
||||||
|
@test "hadoop_add_entry (positive 1)" {
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
echo ">${testvar}<"
|
||||||
|
[ "${testvar}" = " foo " ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_add_entry (negative)" {
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
echo ">${testvar}<"
|
||||||
|
[ "${testvar}" = " foo " ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_add_entry (positive 2)" {
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
hadoop_add_entry testvar bar
|
||||||
|
echo ">${testvar}<"
|
||||||
|
[ "${testvar}" = " foo bar " ]
|
||||||
|
}
|
||||||
|
|
||||||
|
@test "hadoop_add_entry (positive 3)" {
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
hadoop_add_entry testvar foo
|
||||||
|
hadoop_add_entry testvar bar
|
||||||
|
hadoop_add_entry testvar bar
|
||||||
|
hadoop_add_entry testvar baz
|
||||||
|
hadoop_add_entry testvar baz
|
||||||
|
|
||||||
|
echo ">${testvar}<"
|
||||||
|
[ "${testvar}" = " foo bar baz " ]
|
||||||
|
}
|
@ -66,6 +66,13 @@ _test_hadoop_finalize () {
|
|||||||
[ -z "${unittest}" ]
|
[ -z "${unittest}" ]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@test "hadoop_import_shellprofiles (H_O_T)" {
|
||||||
|
HADOOP_OPTIONAL_TOOLS=1,2
|
||||||
|
shellprofilesetup
|
||||||
|
hadoop_import_shellprofiles
|
||||||
|
[ "${HADOOP_TOOLS_OPTIONS}" == " 1 2 " ]
|
||||||
|
}
|
||||||
|
|
||||||
@test "hadoop_add_profile+hadoop_shellprofiles_init" {
|
@test "hadoop_add_profile+hadoop_shellprofiles_init" {
|
||||||
hadoop_add_profile test
|
hadoop_add_profile test
|
||||||
hadoop_shellprofiles_init
|
hadoop_shellprofiles_init
|
||||||
|
@ -41,10 +41,10 @@
|
|||||||
import javax.ws.rs.QueryParam;
|
import javax.ws.rs.QueryParam;
|
||||||
import javax.ws.rs.core.MediaType;
|
import javax.ws.rs.core.MediaType;
|
||||||
import javax.ws.rs.core.Response;
|
import javax.ws.rs.core.Response;
|
||||||
|
import javax.ws.rs.core.UriBuilder;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
@ -89,9 +89,9 @@ private static KeyProvider.KeyVersion removeKeyMaterial(
|
|||||||
keyVersion.getVersionName(), null);
|
keyVersion.getVersionName(), null);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static URI getKeyURI(String name) throws URISyntaxException {
|
private static URI getKeyURI(String domain, String keyName) {
|
||||||
return new URI(KMSRESTConstants.SERVICE_VERSION + "/" +
|
return UriBuilder.fromPath("{a}/{b}/{c}")
|
||||||
KMSRESTConstants.KEY_RESOURCE + "/" + name);
|
.build(domain, KMSRESTConstants.KEY_RESOURCE, keyName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@POST
|
@POST
|
||||||
@ -151,9 +151,9 @@ public KeyVersion run() throws Exception {
|
|||||||
String requestURL = KMSMDCFilter.getURL();
|
String requestURL = KMSMDCFilter.getURL();
|
||||||
int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
|
int idx = requestURL.lastIndexOf(KMSRESTConstants.KEYS_RESOURCE);
|
||||||
requestURL = requestURL.substring(0, idx);
|
requestURL = requestURL.substring(0, idx);
|
||||||
String keyURL = requestURL + KMSRESTConstants.KEY_RESOURCE + "/" + name;
|
return Response.created(getKeyURI(KMSRESTConstants.SERVICE_VERSION, name))
|
||||||
return Response.created(getKeyURI(name)).type(MediaType.APPLICATION_JSON).
|
.type(MediaType.APPLICATION_JSON)
|
||||||
header("Location", keyURL).entity(json).build();
|
.header("Location", getKeyURI(requestURL, name)).entity(json).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
@DELETE
|
@DELETE
|
||||||
|
@ -39,11 +39,15 @@
|
|||||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.log4j.Level;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||||
import javax.security.auth.login.AppConfigurationEntry;
|
import javax.security.auth.login.AppConfigurationEntry;
|
||||||
@ -69,12 +73,14 @@
|
|||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
|
|
||||||
public class TestKMS {
|
public class TestKMS {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
|
||||||
|
|
||||||
@Before
|
@Before
|
||||||
public void cleanUp() {
|
public void cleanUp() {
|
||||||
// resetting kerberos security
|
// resetting kerberos security
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
GenericTestUtils.setLogLevel(LOG, Level.INFO);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static File getTestDir() throws Exception {
|
public static File getTestDir() throws Exception {
|
||||||
@ -380,6 +386,42 @@ public void testStartStopHttpsKerberos() throws Exception {
|
|||||||
testStartStop(true, true);
|
testStartStop(true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 30000)
|
||||||
|
public void testSpecialKeyNames() throws Exception {
|
||||||
|
final String specialKey = "key %^[\n{]}|\"<>\\";
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set("hadoop.security.authentication", "kerberos");
|
||||||
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
File confDir = getTestDir();
|
||||||
|
conf = createBaseKMSConf(confDir);
|
||||||
|
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + specialKey + ".ALL", "*");
|
||||||
|
writeConf(confDir, conf);
|
||||||
|
|
||||||
|
runServer(null, null, confDir, new KMSCallable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
URI uri = createKMSUri(getKMSUrl());
|
||||||
|
KeyProvider kp = createProvider(uri, conf);
|
||||||
|
Assert.assertTrue(kp.getKeys().isEmpty());
|
||||||
|
Assert.assertEquals(0, kp.getKeysMetadata().length);
|
||||||
|
|
||||||
|
KeyProvider.Options options = new KeyProvider.Options(conf);
|
||||||
|
options.setCipher("AES/CTR/NoPadding");
|
||||||
|
options.setBitLength(128);
|
||||||
|
options.setDescription("l1");
|
||||||
|
LOG.info("Creating key with name '{}'", specialKey);
|
||||||
|
|
||||||
|
KeyProvider.KeyVersion kv0 = kp.createKey(specialKey, options);
|
||||||
|
Assert.assertNotNull(kv0);
|
||||||
|
Assert.assertEquals(specialKey, kv0.getName());
|
||||||
|
Assert.assertNotNull(kv0.getVersionName());
|
||||||
|
Assert.assertNotNull(kv0.getMaterial());
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testKMSProvider() throws Exception {
|
public void testKMSProvider() throws Exception {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
|
@ -103,6 +103,24 @@
|
|||||||
</arguments>
|
</arguments>
|
||||||
</configuration>
|
</configuration>
|
||||||
</execution>
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>toolshooks</id>
|
||||||
|
<phase>prepare-package</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>exec</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<executable>${shell-executable}</executable>
|
||||||
|
<workingDirectory>${basedir}</workingDirectory>
|
||||||
|
<requiresOnline>false</requiresOnline>
|
||||||
|
<arguments>
|
||||||
|
<argument>${basedir}/../dev-support/bin/dist-tools-hooks-maker</argument>
|
||||||
|
<argument>${project.version}</argument>
|
||||||
|
<argument>${project.build.directory}</argument>
|
||||||
|
<argument>${basedir}/../hadoop-tools</argument>
|
||||||
|
</arguments>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
<execution>
|
<execution>
|
||||||
<id>tar</id>
|
<id>tar</id>
|
||||||
<phase>package</phase>
|
<phase>package</phase>
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
<Class name="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
|
<Class name="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
|
||||||
<Class name="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"/>
|
<Class name="org.apache.hadoop.hdfs.protocol.BlockStoragePolicy"/>
|
||||||
<Class name="org.apache.hadoop.hdfs.protocol.CorruptFileBlocks"/>
|
<Class name="org.apache.hadoop.hdfs.protocol.CorruptFileBlocks"/>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.protocol.StripedBlockInfo"/>
|
||||||
<Class name="org.apache.hadoop.hdfs.protocol.DirectoryListing"/>
|
<Class name="org.apache.hadoop.hdfs.protocol.DirectoryListing"/>
|
||||||
<Class name="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
|
<Class name="org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier"/>
|
||||||
<Class name="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"/>
|
<Class name="org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey"/>
|
||||||
|
@ -83,11 +83,6 @@ public interface BlockReader extends ByteBufferReadable, Closeable {
|
|||||||
*/
|
*/
|
||||||
int readAll(byte[] buf, int offset, int len) throws IOException;
|
int readAll(byte[] buf, int offset, int len) throws IOException;
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true only if this is a local read.
|
|
||||||
*/
|
|
||||||
boolean isLocal();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return true only if this is a short-circuit read.
|
* @return true only if this is a short-circuit read.
|
||||||
* All short-circuit reads are also local.
|
* All short-circuit reads are also local.
|
||||||
@ -107,4 +102,9 @@ public interface BlockReader extends ByteBufferReadable, Closeable {
|
|||||||
* @return The DataChecksum used by the read block
|
* @return The DataChecksum used by the read block
|
||||||
*/
|
*/
|
||||||
DataChecksum getDataChecksum();
|
DataChecksum getDataChecksum();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the network distance between local machine and the remote machine.
|
||||||
|
*/
|
||||||
|
int getNetworkDistance();
|
||||||
}
|
}
|
||||||
|
@ -833,16 +833,19 @@ private static boolean isSecurityException(IOException ioe) {
|
|||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
private BlockReader getRemoteBlockReader(Peer peer) throws IOException {
|
private BlockReader getRemoteBlockReader(Peer peer) throws IOException {
|
||||||
|
int networkDistance = clientContext.getNetworkDistance(datanode);
|
||||||
if (conf.getShortCircuitConf().isUseLegacyBlockReader()) {
|
if (conf.getShortCircuitConf().isUseLegacyBlockReader()) {
|
||||||
return RemoteBlockReader.newBlockReader(fileName,
|
return RemoteBlockReader.newBlockReader(fileName,
|
||||||
block, token, startOffset, length, conf.getIoBufferSize(),
|
block, token, startOffset, length, conf.getIoBufferSize(),
|
||||||
verifyChecksum, clientName, peer, datanode,
|
verifyChecksum, clientName, peer, datanode,
|
||||||
clientContext.getPeerCache(), cachingStrategy, tracer);
|
clientContext.getPeerCache(), cachingStrategy, tracer,
|
||||||
|
networkDistance);
|
||||||
} else {
|
} else {
|
||||||
return RemoteBlockReader2.newBlockReader(
|
return RemoteBlockReader2.newBlockReader(
|
||||||
fileName, block, token, startOffset, length,
|
fileName, block, token, startOffset, length,
|
||||||
verifyChecksum, clientName, peer, datanode,
|
verifyChecksum, clientName, peer, datanode,
|
||||||
clientContext.getPeerCache(), cachingStrategy, tracer);
|
clientContext.getPeerCache(), cachingStrategy, tracer,
|
||||||
|
networkDistance);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -640,11 +640,6 @@ public synchronized int readAll(byte[] buf, int off, int len)
|
|||||||
return BlockReaderUtil.readAll(this, buf, off, len);
|
return BlockReaderUtil.readAll(this, buf, off, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isLocal() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isShortCircuit() {
|
public boolean isShortCircuit() {
|
||||||
return true;
|
return true;
|
||||||
@ -721,4 +716,9 @@ void forceUnanchorable() {
|
|||||||
public DataChecksum getDataChecksum() {
|
public DataChecksum getDataChecksum() {
|
||||||
return checksum;
|
return checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -722,11 +722,6 @@ public int available() {
|
|||||||
return Integer.MAX_VALUE;
|
return Integer.MAX_VALUE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isLocal() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isShortCircuit() {
|
public boolean isShortCircuit() {
|
||||||
return true;
|
return true;
|
||||||
@ -741,4 +736,9 @@ public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
|
|||||||
public DataChecksum getDataChecksum() {
|
public DataChecksum getDataChecksum() {
|
||||||
return checksum;
|
return checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,16 +17,28 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
|
import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
|
||||||
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
||||||
|
import org.apache.hadoop.net.DNSToSwitchMapping;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
|
import org.apache.hadoop.net.NodeBase;
|
||||||
|
import org.apache.hadoop.net.ScriptBasedMapping;
|
||||||
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
@ -101,7 +113,12 @@ public class ClientContext {
|
|||||||
*/
|
*/
|
||||||
private boolean printedConfWarning = false;
|
private boolean printedConfWarning = false;
|
||||||
|
|
||||||
private ClientContext(String name, DfsClientConf conf) {
|
private final NetworkTopology topology;
|
||||||
|
private final NodeBase clientNode;
|
||||||
|
private final Map<NodeBase, Integer> nodeToDistance;
|
||||||
|
|
||||||
|
private ClientContext(String name, DfsClientConf conf,
|
||||||
|
Configuration config) {
|
||||||
final ShortCircuitConf scConf = conf.getShortCircuitConf();
|
final ShortCircuitConf scConf = conf.getShortCircuitConf();
|
||||||
|
|
||||||
this.name = name;
|
this.name = name;
|
||||||
@ -116,14 +133,28 @@ private ClientContext(String name, DfsClientConf conf) {
|
|||||||
|
|
||||||
this.byteArrayManager = ByteArrayManager.newInstance(
|
this.byteArrayManager = ByteArrayManager.newInstance(
|
||||||
conf.getWriteByteArrayManagerConf());
|
conf.getWriteByteArrayManagerConf());
|
||||||
|
|
||||||
|
DNSToSwitchMapping dnsToSwitchMapping = ReflectionUtils.newInstance(
|
||||||
|
config.getClass(
|
||||||
|
CommonConfigurationKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
|
||||||
|
ScriptBasedMapping.class, DNSToSwitchMapping.class), config);
|
||||||
|
List<String> nodes = new ArrayList<>();
|
||||||
|
String clientHostName = NetUtils.getLocalHostname();
|
||||||
|
nodes.add(clientHostName);
|
||||||
|
clientNode = new NodeBase(clientHostName,
|
||||||
|
dnsToSwitchMapping.resolve(nodes).get(0));
|
||||||
|
this.topology = NetworkTopology.getInstance(config);
|
||||||
|
this.topology.add(clientNode);
|
||||||
|
this.nodeToDistance = new ConcurrentHashMap<>();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static ClientContext get(String name, DfsClientConf conf) {
|
public static ClientContext get(String name, DfsClientConf conf,
|
||||||
|
Configuration config) {
|
||||||
ClientContext context;
|
ClientContext context;
|
||||||
synchronized(ClientContext.class) {
|
synchronized(ClientContext.class) {
|
||||||
context = CACHES.get(name);
|
context = CACHES.get(name);
|
||||||
if (context == null) {
|
if (context == null) {
|
||||||
context = new ClientContext(name, conf);
|
context = new ClientContext(name, conf, config);
|
||||||
CACHES.put(name, context);
|
CACHES.put(name, context);
|
||||||
} else {
|
} else {
|
||||||
context.printConfWarningIfNeeded(conf);
|
context.printConfWarningIfNeeded(conf);
|
||||||
@ -132,6 +163,10 @@ public static ClientContext get(String name, DfsClientConf conf) {
|
|||||||
return context;
|
return context;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static ClientContext get(String name, Configuration config) {
|
||||||
|
return get(name, new DfsClientConf(config), config);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a client context, from a Configuration object.
|
* Get a client context, from a Configuration object.
|
||||||
*
|
*
|
||||||
@ -141,8 +176,7 @@ public static ClientContext get(String name, DfsClientConf conf) {
|
|||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static ClientContext getFromConf(Configuration conf) {
|
public static ClientContext getFromConf(Configuration conf) {
|
||||||
return get(conf.get(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
|
return get(conf.get(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
|
||||||
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT),
|
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT), conf);
|
||||||
new DfsClientConf(conf));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void printConfWarningIfNeeded(DfsClientConf conf) {
|
private void printConfWarningIfNeeded(DfsClientConf conf) {
|
||||||
@ -193,4 +227,16 @@ public DomainSocketFactory getDomainSocketFactory() {
|
|||||||
public ByteArrayManager getByteArrayManager() {
|
public ByteArrayManager getByteArrayManager() {
|
||||||
return byteArrayManager;
|
return byteArrayManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public int getNetworkDistance(DatanodeInfo datanodeInfo) {
|
||||||
|
NodeBase node = new NodeBase(datanodeInfo.getHostName(),
|
||||||
|
datanodeInfo.getNetworkLocation());
|
||||||
|
Integer distance = nodeToDistance.get(node);
|
||||||
|
if (distance == null) {
|
||||||
|
topology.add(node);
|
||||||
|
distance = topology.getDistance(clientNode, node);
|
||||||
|
nodeToDistance.put(node, distance);
|
||||||
|
}
|
||||||
|
return distance;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -212,7 +212,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||||||
final String clientName;
|
final String clientName;
|
||||||
final SocketFactory socketFactory;
|
final SocketFactory socketFactory;
|
||||||
final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
|
final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
|
||||||
final FileSystem.Statistics stats;
|
private final FileSystem.Statistics stats;
|
||||||
private final String authority;
|
private final String authority;
|
||||||
private final Random r = new Random();
|
private final Random r = new Random();
|
||||||
private SocketAddress[] localInterfaceAddrs;
|
private SocketAddress[] localInterfaceAddrs;
|
||||||
@ -357,7 +357,7 @@ public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
|
|||||||
new CachingStrategy(writeDropBehind, readahead);
|
new CachingStrategy(writeDropBehind, readahead);
|
||||||
this.clientContext = ClientContext.get(
|
this.clientContext = ClientContext.get(
|
||||||
conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
|
conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
|
||||||
dfsClientConf);
|
dfsClientConf, conf);
|
||||||
|
|
||||||
if (dfsClientConf.getHedgedReadThreadpoolSize() > 0) {
|
if (dfsClientConf.getHedgedReadThreadpoolSize() > 0) {
|
||||||
this.initThreadsNumForHedgedReads(dfsClientConf.
|
this.initThreadsNumForHedgedReads(dfsClientConf.
|
||||||
@ -1704,7 +1704,10 @@ public DataEncryptionKey newDataEncryptionKey() throws IOException {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the checksum of the whole file or a range of the file. Note that the
|
* Get the checksum of the whole file or a range of the file. Note that the
|
||||||
* range always starts from the beginning of the file.
|
* range always starts from the beginning of the file. The file can be
|
||||||
|
* in replicated form, or striped mode. It can be used to checksum and compare
|
||||||
|
* two replicated files, or two striped files, but not applicable for two
|
||||||
|
* files of different block layout forms.
|
||||||
* @param src The file path
|
* @param src The file path
|
||||||
* @param length the length of the range, i.e., the range is [0, length]
|
* @param length the length of the range, i.e., the range is [0, length]
|
||||||
* @return The checksum
|
* @return The checksum
|
||||||
@ -1717,7 +1720,11 @@ public MD5MD5CRC32FileChecksum getFileChecksum(String src, long length)
|
|||||||
|
|
||||||
LocatedBlocks blockLocations = getBlockLocations(src, length);
|
LocatedBlocks blockLocations = getBlockLocations(src, length);
|
||||||
|
|
||||||
FileChecksumHelper.FileChecksumComputer maker =
|
FileChecksumHelper.FileChecksumComputer maker;
|
||||||
|
ErasureCodingPolicy ecPolicy = blockLocations.getErasureCodingPolicy();
|
||||||
|
maker = ecPolicy != null ?
|
||||||
|
new FileChecksumHelper.StripedFileNonStripedChecksumComputer(src,
|
||||||
|
length, blockLocations, namenode, this, ecPolicy) :
|
||||||
new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
|
new FileChecksumHelper.ReplicatedFileChecksumComputer(src, length,
|
||||||
blockLocations, namenode, this);
|
blockLocations, namenode, this);
|
||||||
|
|
||||||
@ -2740,6 +2747,13 @@ public Peer newConnectedPeer(InetSocketAddress addr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateFileSystemReadStats(int distance, int nRead) {
|
||||||
|
if (stats != null) {
|
||||||
|
stats.incrementBytesRead(nRead);
|
||||||
|
stats.incrementBytesReadByDistance(distance, nRead);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create hedged reads thread pool, HEDGED_READ_THREAD_POOL, if
|
* Create hedged reads thread pool, HEDGED_READ_THREAD_POOL, if
|
||||||
* it does not already exist.
|
* it does not already exist.
|
||||||
|
@ -775,7 +775,7 @@ protected void updateReadStatistics(ReadStatistics readStatistics,
|
|||||||
synchronized(infoLock) {
|
synchronized(infoLock) {
|
||||||
if (blockReader.isShortCircuit()) {
|
if (blockReader.isShortCircuit()) {
|
||||||
readStatistics.addShortCircuitBytes(nRead);
|
readStatistics.addShortCircuitBytes(nRead);
|
||||||
} else if (blockReader.isLocal()) {
|
} else if (blockReader.getNetworkDistance() == 0) {
|
||||||
readStatistics.addLocalBytes(nRead);
|
readStatistics.addLocalBytes(nRead);
|
||||||
} else {
|
} else {
|
||||||
readStatistics.addRemoteBytes(nRead);
|
readStatistics.addRemoteBytes(nRead);
|
||||||
@ -798,6 +798,8 @@ public int doRead(BlockReader blockReader, int off, int len)
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
int nRead = blockReader.read(buf, off, len);
|
int nRead = blockReader.read(buf, off, len);
|
||||||
updateReadStatistics(readStatistics, nRead, blockReader);
|
updateReadStatistics(readStatistics, nRead, blockReader);
|
||||||
|
dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
|
||||||
|
nRead);
|
||||||
return nRead;
|
return nRead;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -828,6 +830,8 @@ public int doRead(BlockReader blockReader, int off, int len)
|
|||||||
int ret = blockReader.read(buf);
|
int ret = blockReader.read(buf);
|
||||||
success = true;
|
success = true;
|
||||||
updateReadStatistics(readStatistics, ret, blockReader);
|
updateReadStatistics(readStatistics, ret, blockReader);
|
||||||
|
dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
|
||||||
|
ret);
|
||||||
if (ret == 0) {
|
if (ret == 0) {
|
||||||
DFSClient.LOG.warn("zero");
|
DFSClient.LOG.warn("zero");
|
||||||
}
|
}
|
||||||
@ -939,9 +943,6 @@ protected synchronized int readWithStrategy(ReaderStrategy strategy, int off,
|
|||||||
// got a EOS from reader though we expect more data on it.
|
// got a EOS from reader though we expect more data on it.
|
||||||
throw new IOException("Unexpected EOS from the reader");
|
throw new IOException("Unexpected EOS from the reader");
|
||||||
}
|
}
|
||||||
if (dfsClient.stats != null) {
|
|
||||||
dfsClient.stats.incrementBytesRead(result);
|
|
||||||
}
|
|
||||||
return result;
|
return result;
|
||||||
} catch (ChecksumException ce) {
|
} catch (ChecksumException ce) {
|
||||||
throw ce;
|
throw ce;
|
||||||
@ -1194,6 +1195,8 @@ void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block,
|
|||||||
datanode.storageType, datanode.info);
|
datanode.storageType, datanode.info);
|
||||||
int nread = reader.readAll(buf, offset, len);
|
int nread = reader.readAll(buf, offset, len);
|
||||||
updateReadStatistics(readStatistics, nread, reader);
|
updateReadStatistics(readStatistics, nread, reader);
|
||||||
|
dfsClient.updateFileSystemReadStats(
|
||||||
|
reader.getNetworkDistance(), nread);
|
||||||
if (nread != len) {
|
if (nread != len) {
|
||||||
throw new IOException("truncated return from reader.read(): " +
|
throw new IOException("truncated return from reader.read(): " +
|
||||||
"excpected " + len + ", got " + nread);
|
"excpected " + len + ", got " + nread);
|
||||||
@ -1276,7 +1279,7 @@ private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
|
|||||||
// chooseDataNode is a commitment. If no node, we go to
|
// chooseDataNode is a commitment. If no node, we go to
|
||||||
// the NN to reget block locations. Only go here on first read.
|
// the NN to reget block locations. Only go here on first read.
|
||||||
chosenNode = chooseDataNode(block, ignored);
|
chosenNode = chooseDataNode(block, ignored);
|
||||||
bb = ByteBuffer.wrap(buf, offset, len);
|
bb = ByteBuffer.allocate(len);
|
||||||
Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
|
Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
|
||||||
chosenNode, block, start, end, bb,
|
chosenNode, block, start, end, bb,
|
||||||
corruptedBlocks, hedgedReadId++);
|
corruptedBlocks, hedgedReadId++);
|
||||||
@ -1287,7 +1290,9 @@ private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
|
|||||||
Future<ByteBuffer> future = hedgedService.poll(
|
Future<ByteBuffer> future = hedgedService.poll(
|
||||||
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
|
conf.getHedgedReadThresholdMillis(), TimeUnit.MILLISECONDS);
|
||||||
if (future != null) {
|
if (future != null) {
|
||||||
future.get();
|
ByteBuffer result = future.get();
|
||||||
|
System.arraycopy(result.array(), result.position(), buf, offset,
|
||||||
|
len);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
|
DFSClient.LOG.debug("Waited {}ms to read from {}; spawning hedged "
|
||||||
@ -1325,13 +1330,9 @@ private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
|
|||||||
ByteBuffer result = getFirstToComplete(hedgedService, futures);
|
ByteBuffer result = getFirstToComplete(hedgedService, futures);
|
||||||
// cancel the rest.
|
// cancel the rest.
|
||||||
cancelAll(futures);
|
cancelAll(futures);
|
||||||
if (result.array() != buf) { // compare the array pointers
|
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
|
||||||
dfsClient.getHedgedReadMetrics().incHedgedReadWins();
|
System.arraycopy(result.array(), result.position(), buf, offset,
|
||||||
System.arraycopy(result.array(), result.position(), buf, offset,
|
len);
|
||||||
len);
|
|
||||||
} else {
|
|
||||||
dfsClient.getHedgedReadMetrics().incHedgedReadOps();
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
// Ignore and retry
|
// Ignore and retry
|
||||||
@ -1479,9 +1480,6 @@ private int pread(long position, byte[] buffer, int offset, int length)
|
|||||||
offset += bytesToRead;
|
offset += bytesToRead;
|
||||||
}
|
}
|
||||||
assert remaining == 0 : "Wrong number of bytes read.";
|
assert remaining == 0 : "Wrong number of bytes read.";
|
||||||
if (dfsClient.stats != null) {
|
|
||||||
dfsClient.stats.incrementBytesRead(realLen);
|
|
||||||
}
|
|
||||||
return realLen;
|
return realLen;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -447,9 +447,6 @@ protected synchronized int readWithStrategy(ReaderStrategy strategy,
|
|||||||
result += ret;
|
result += ret;
|
||||||
pos += ret;
|
pos += ret;
|
||||||
}
|
}
|
||||||
if (dfsClient.stats != null) {
|
|
||||||
dfsClient.stats.incrementBytesRead(result);
|
|
||||||
}
|
|
||||||
return result;
|
return result;
|
||||||
} finally {
|
} finally {
|
||||||
// Check if need to report block replicas corruption either read
|
// Check if need to report block replicas corruption either read
|
||||||
|
@ -109,11 +109,6 @@ public int readAll(byte[] buf, int offset, int len) throws IOException {
|
|||||||
return BlockReaderUtil.readAll(this, buf, offset, len);
|
return BlockReaderUtil.readAll(this, buf, offset, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isLocal() {
|
|
||||||
return accessor.isLocal();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isShortCircuit() {
|
public boolean isShortCircuit() {
|
||||||
return accessor.isShortCircuit();
|
return accessor.isShortCircuit();
|
||||||
@ -129,4 +124,9 @@ public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
|
|||||||
public DataChecksum getDataChecksum() {
|
public DataChecksum getDataChecksum() {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return accessor.getNetworkDistance();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,10 +22,13 @@
|
|||||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
||||||
@ -75,7 +78,7 @@ static abstract class FileChecksumComputer {
|
|||||||
private int bytesPerCRC = -1;
|
private int bytesPerCRC = -1;
|
||||||
private DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
|
private DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
|
||||||
private long crcPerBlock = 0;
|
private long crcPerBlock = 0;
|
||||||
private boolean refetchBlocks = false;
|
private boolean isRefetchBlocks = false;
|
||||||
private int lastRetriedIndex = -1;
|
private int lastRetriedIndex = -1;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -127,8 +130,11 @@ LocatedBlocks getBlockLocations() {
|
|||||||
return blockLocations;
|
return blockLocations;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setBlockLocations(LocatedBlocks blockLocations) {
|
void refetchBlocks() throws IOException {
|
||||||
this.blockLocations = blockLocations;
|
this.blockLocations = getClient().getBlockLocations(getSrc(),
|
||||||
|
getLength());
|
||||||
|
this.locatedBlocks = getBlockLocations().getLocatedBlocks();
|
||||||
|
this.isRefetchBlocks = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
int getTimeout() {
|
int getTimeout() {
|
||||||
@ -143,10 +149,6 @@ List<LocatedBlock> getLocatedBlocks() {
|
|||||||
return locatedBlocks;
|
return locatedBlocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setLocatedBlocks(List<LocatedBlock> locatedBlocks) {
|
|
||||||
this.locatedBlocks = locatedBlocks;
|
|
||||||
}
|
|
||||||
|
|
||||||
long getRemaining() {
|
long getRemaining() {
|
||||||
return remaining;
|
return remaining;
|
||||||
}
|
}
|
||||||
@ -180,11 +182,11 @@ void setCrcPerBlock(long crcPerBlock) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
boolean isRefetchBlocks() {
|
boolean isRefetchBlocks() {
|
||||||
return refetchBlocks;
|
return isRefetchBlocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setRefetchBlocks(boolean refetchBlocks) {
|
void setRefetchBlocks(boolean refetchBlocks) {
|
||||||
this.refetchBlocks = refetchBlocks;
|
this.isRefetchBlocks = refetchBlocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
int getLastRetriedIndex() {
|
int getLastRetriedIndex() {
|
||||||
@ -278,10 +280,7 @@ void checksumBlocks() throws IOException {
|
|||||||
blockIdx < getLocatedBlocks().size() && getRemaining() >= 0;
|
blockIdx < getLocatedBlocks().size() && getRemaining() >= 0;
|
||||||
blockIdx++) {
|
blockIdx++) {
|
||||||
if (isRefetchBlocks()) { // refetch to get fresh tokens
|
if (isRefetchBlocks()) { // refetch to get fresh tokens
|
||||||
setBlockLocations(getClient().getBlockLocations(getSrc(),
|
refetchBlocks();
|
||||||
getLength()));
|
|
||||||
setLocatedBlocks(getBlockLocations().getLocatedBlocks());
|
|
||||||
setRefetchBlocks(false);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
LocatedBlock locatedBlock = getLocatedBlocks().get(blockIdx);
|
LocatedBlock locatedBlock = getLocatedBlocks().get(blockIdx);
|
||||||
@ -380,15 +379,13 @@ private void tryDatanode(LocatedBlock locatedBlock,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//read md5
|
//read md5
|
||||||
final MD5Hash md5 = new MD5Hash(
|
final MD5Hash md5 = new MD5Hash(checksumData.getMd5().toByteArray());
|
||||||
checksumData.getMd5().toByteArray());
|
|
||||||
md5.write(getMd5out());
|
md5.write(getMd5out());
|
||||||
|
|
||||||
// read crc-type
|
// read crc-type
|
||||||
final DataChecksum.Type ct;
|
final DataChecksum.Type ct;
|
||||||
if (checksumData.hasCrcType()) {
|
if (checksumData.hasCrcType()) {
|
||||||
ct = PBHelperClient.convert(checksumData
|
ct = PBHelperClient.convert(checksumData.getCrcType());
|
||||||
.getCrcType());
|
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
|
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
|
||||||
"inferring checksum by reading first byte");
|
"inferring checksum by reading first byte");
|
||||||
@ -413,4 +410,160 @@ && getCrcType() != ct) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Striped file checksum computing.
|
||||||
|
*/
|
||||||
|
static class StripedFileNonStripedChecksumComputer
|
||||||
|
extends FileChecksumComputer {
|
||||||
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
|
private int bgIdx;
|
||||||
|
|
||||||
|
StripedFileNonStripedChecksumComputer(String src, long length,
|
||||||
|
LocatedBlocks blockLocations,
|
||||||
|
ClientProtocol namenode,
|
||||||
|
DFSClient client,
|
||||||
|
ErasureCodingPolicy ecPolicy)
|
||||||
|
throws IOException {
|
||||||
|
super(src, length, blockLocations, namenode, client);
|
||||||
|
|
||||||
|
this.ecPolicy = ecPolicy;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void checksumBlocks() throws IOException {
|
||||||
|
int tmpTimeout = 3000 * 1 + getClient().getConf().getSocketTimeout();
|
||||||
|
setTimeout(tmpTimeout);
|
||||||
|
|
||||||
|
for (bgIdx = 0;
|
||||||
|
bgIdx < getLocatedBlocks().size() && getRemaining() >= 0; bgIdx++) {
|
||||||
|
if (isRefetchBlocks()) { // refetch to get fresh tokens
|
||||||
|
refetchBlocks();
|
||||||
|
}
|
||||||
|
|
||||||
|
LocatedBlock locatedBlock = getLocatedBlocks().get(bgIdx);
|
||||||
|
LocatedStripedBlock blockGroup = (LocatedStripedBlock) locatedBlock;
|
||||||
|
|
||||||
|
if (!checksumBlockGroup(blockGroup)) {
|
||||||
|
throw new IOException("Fail to get block MD5 for " + locatedBlock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private boolean checksumBlockGroup(
|
||||||
|
LocatedStripedBlock blockGroup) throws IOException {
|
||||||
|
ExtendedBlock block = blockGroup.getBlock();
|
||||||
|
if (getRemaining() < block.getNumBytes()) {
|
||||||
|
block.setNumBytes(getRemaining());
|
||||||
|
}
|
||||||
|
setRemaining(getRemaining() - block.getNumBytes());
|
||||||
|
|
||||||
|
StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(block,
|
||||||
|
blockGroup.getLocations(), blockGroup.getBlockTokens(), ecPolicy);
|
||||||
|
DatanodeInfo[] datanodes = blockGroup.getLocations();
|
||||||
|
|
||||||
|
//try each datanode in the block group.
|
||||||
|
boolean done = false;
|
||||||
|
for (int j = 0; !done && j < datanodes.length; j++) {
|
||||||
|
try {
|
||||||
|
tryDatanode(blockGroup, stripedBlockInfo, datanodes[j]);
|
||||||
|
done = true;
|
||||||
|
} catch (InvalidBlockTokenException ibte) {
|
||||||
|
if (bgIdx > getLastRetriedIndex()) {
|
||||||
|
LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
|
||||||
|
+ "for file {} for block {} from datanode {}. Will retry "
|
||||||
|
+ "the block once.",
|
||||||
|
getSrc(), block, datanodes[j]);
|
||||||
|
setLastRetriedIndex(bgIdx);
|
||||||
|
done = true; // actually it's not done; but we'll retry
|
||||||
|
bgIdx--; // repeat at bgIdx-th block
|
||||||
|
setRefetchBlocks(true);
|
||||||
|
}
|
||||||
|
} catch (IOException ie) {
|
||||||
|
LOG.warn("src={}" + ", datanodes[{}]={}",
|
||||||
|
getSrc(), j, datanodes[j], ie);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return done;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return true when sounds good to continue or retry, false when severe
|
||||||
|
* condition or totally failed.
|
||||||
|
*/
|
||||||
|
private void tryDatanode(LocatedStripedBlock blockGroup,
|
||||||
|
StripedBlockInfo stripedBlockInfo,
|
||||||
|
DatanodeInfo datanode) throws IOException {
|
||||||
|
|
||||||
|
try (IOStreamPair pair = getClient().connectToDN(datanode,
|
||||||
|
getTimeout(), blockGroup.getBlockToken())) {
|
||||||
|
|
||||||
|
LOG.debug("write to {}: {}, blockGroup={}",
|
||||||
|
datanode, Op.BLOCK_GROUP_CHECKSUM, blockGroup);
|
||||||
|
|
||||||
|
// get block MD5
|
||||||
|
createSender(pair).blockGroupChecksum(stripedBlockInfo,
|
||||||
|
blockGroup.getBlockToken());
|
||||||
|
|
||||||
|
BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(
|
||||||
|
PBHelperClient.vintPrefixed(pair.in));
|
||||||
|
|
||||||
|
String logInfo = "for blockGroup " + blockGroup +
|
||||||
|
" from datanode " + datanode;
|
||||||
|
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
|
||||||
|
|
||||||
|
OpBlockChecksumResponseProto checksumData = reply.getChecksumResponse();
|
||||||
|
|
||||||
|
//read byte-per-checksum
|
||||||
|
final int bpc = checksumData.getBytesPerCrc();
|
||||||
|
if (bgIdx == 0) { //first block
|
||||||
|
setBytesPerCRC(bpc);
|
||||||
|
} else {
|
||||||
|
if (bpc != getBytesPerCRC()) {
|
||||||
|
throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
|
||||||
|
+ " but bytesPerCRC=" + getBytesPerCRC());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//read crc-per-block
|
||||||
|
final long cpb = checksumData.getCrcPerBlock();
|
||||||
|
if (getLocatedBlocks().size() > 1 && bgIdx == 0) { // first block
|
||||||
|
setCrcPerBlock(cpb);
|
||||||
|
}
|
||||||
|
|
||||||
|
//read md5
|
||||||
|
final MD5Hash md5 = new MD5Hash(
|
||||||
|
checksumData.getMd5().toByteArray());
|
||||||
|
md5.write(getMd5out());
|
||||||
|
|
||||||
|
// read crc-type
|
||||||
|
final DataChecksum.Type ct;
|
||||||
|
if (checksumData.hasCrcType()) {
|
||||||
|
ct = PBHelperClient.convert(checksumData.getCrcType());
|
||||||
|
} else {
|
||||||
|
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
|
||||||
|
"inferring checksum by reading first byte");
|
||||||
|
ct = getClient().inferChecksumTypeByReading(blockGroup, datanode);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bgIdx == 0) {
|
||||||
|
setCrcType(ct);
|
||||||
|
} else if (getCrcType() != DataChecksum.Type.MIXED &&
|
||||||
|
getCrcType() != ct) {
|
||||||
|
// if crc types are mixed in a file
|
||||||
|
setCrcType(DataChecksum.Type.MIXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
if (bgIdx == 0) {
|
||||||
|
LOG.debug("set bytesPerCRC=" + getBytesPerCRC()
|
||||||
|
+ ", crcPerBlock=" + getCrcPerBlock());
|
||||||
|
}
|
||||||
|
LOG.debug("got reply from " + datanode + ": md5=" + md5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,6 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.core.TraceScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
@ -93,11 +92,6 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||||||
*/
|
*/
|
||||||
private final long bytesNeededToFinish;
|
private final long bytesNeededToFinish;
|
||||||
|
|
||||||
/**
|
|
||||||
* True if we are reading from a local DataNode.
|
|
||||||
*/
|
|
||||||
private final boolean isLocal;
|
|
||||||
|
|
||||||
private boolean eos = false;
|
private boolean eos = false;
|
||||||
private boolean sentStatusCode = false;
|
private boolean sentStatusCode = false;
|
||||||
|
|
||||||
@ -109,6 +103,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||||||
|
|
||||||
private final Tracer tracer;
|
private final Tracer tracer;
|
||||||
|
|
||||||
|
private final int networkDistance;
|
||||||
|
|
||||||
/* FSInputChecker interface */
|
/* FSInputChecker interface */
|
||||||
|
|
||||||
/* same interface as inputStream java.io.InputStream#read()
|
/* same interface as inputStream java.io.InputStream#read()
|
||||||
@ -342,7 +338,8 @@ private synchronized int readChunkImpl(long pos, byte[] buf, int offset,
|
|||||||
private RemoteBlockReader(String file, String bpid, long blockId,
|
private RemoteBlockReader(String file, String bpid, long blockId,
|
||||||
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
|
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
|
||||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||||
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
|
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer,
|
||||||
|
int networkDistance) {
|
||||||
// Path is used only for printing block and file information in debug
|
// Path is used only for printing block and file information in debug
|
||||||
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
|
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
|
||||||
":" + bpid + ":of:"+ file)/*too non path-like?*/,
|
":" + bpid + ":of:"+ file)/*too non path-like?*/,
|
||||||
@ -351,9 +348,6 @@ private RemoteBlockReader(String file, String bpid, long blockId,
|
|||||||
checksum.getBytesPerChecksum(),
|
checksum.getBytesPerChecksum(),
|
||||||
checksum.getChecksumSize());
|
checksum.getChecksumSize());
|
||||||
|
|
||||||
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
|
|
||||||
createSocketAddr(datanodeID.getXferAddr()));
|
|
||||||
|
|
||||||
this.peer = peer;
|
this.peer = peer;
|
||||||
this.datanodeID = datanodeID;
|
this.datanodeID = datanodeID;
|
||||||
this.in = in;
|
this.in = in;
|
||||||
@ -375,6 +369,7 @@ private RemoteBlockReader(String file, String bpid, long blockId,
|
|||||||
checksumSize = this.checksum.getChecksumSize();
|
checksumSize = this.checksum.getChecksumSize();
|
||||||
this.peerCache = peerCache;
|
this.peerCache = peerCache;
|
||||||
this.tracer = tracer;
|
this.tracer = tracer;
|
||||||
|
this.networkDistance = networkDistance;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -400,7 +395,7 @@ public static RemoteBlockReader newBlockReader(String file,
|
|||||||
DatanodeID datanodeID,
|
DatanodeID datanodeID,
|
||||||
PeerCache peerCache,
|
PeerCache peerCache,
|
||||||
CachingStrategy cachingStrategy,
|
CachingStrategy cachingStrategy,
|
||||||
Tracer tracer)
|
Tracer tracer, int networkDistance)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// in and out will be closed when sock is closed (by the caller)
|
// in and out will be closed when sock is closed (by the caller)
|
||||||
final DataOutputStream out =
|
final DataOutputStream out =
|
||||||
@ -436,7 +431,7 @@ public static RemoteBlockReader newBlockReader(String file,
|
|||||||
|
|
||||||
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
|
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
|
||||||
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
|
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
|
||||||
peer, datanodeID, peerCache, tracer);
|
peer, datanodeID, peerCache, tracer, networkDistance);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -493,11 +488,6 @@ public int available() {
|
|||||||
return RemoteBlockReader2.TCP_WINDOW_SIZE;
|
return RemoteBlockReader2.TCP_WINDOW_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isLocal() {
|
|
||||||
return isLocal;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isShortCircuit() {
|
public boolean isShortCircuit() {
|
||||||
return false;
|
return false;
|
||||||
@ -512,4 +502,9 @@ public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
|
|||||||
public DataChecksum getDataChecksum() {
|
public DataChecksum getDataChecksum() {
|
||||||
return checksum;
|
return checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return networkDistance;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -45,7 +45,6 @@
|
|||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.core.TraceScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
@ -116,17 +115,14 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||||||
*/
|
*/
|
||||||
private long bytesNeededToFinish;
|
private long bytesNeededToFinish;
|
||||||
|
|
||||||
/**
|
|
||||||
* True if we are reading from a local DataNode.
|
|
||||||
*/
|
|
||||||
private final boolean isLocal;
|
|
||||||
|
|
||||||
private final boolean verifyChecksum;
|
private final boolean verifyChecksum;
|
||||||
|
|
||||||
private boolean sentStatusCode = false;
|
private boolean sentStatusCode = false;
|
||||||
|
|
||||||
private final Tracer tracer;
|
private final Tracer tracer;
|
||||||
|
|
||||||
|
private final int networkDistance;
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public Peer getPeer() {
|
public Peer getPeer() {
|
||||||
return peer;
|
return peer;
|
||||||
@ -280,9 +276,8 @@ private void readTrailingEmptyPacket() throws IOException {
|
|||||||
protected RemoteBlockReader2(String file, long blockId,
|
protected RemoteBlockReader2(String file, long blockId,
|
||||||
DataChecksum checksum, boolean verifyChecksum,
|
DataChecksum checksum, boolean verifyChecksum,
|
||||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||||
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
|
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer,
|
||||||
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
|
int networkDistance) {
|
||||||
createSocketAddr(datanodeID.getXferAddr()));
|
|
||||||
// Path is used only for printing block and file information in debug
|
// Path is used only for printing block and file information in debug
|
||||||
this.peer = peer;
|
this.peer = peer;
|
||||||
this.datanodeID = datanodeID;
|
this.datanodeID = datanodeID;
|
||||||
@ -302,6 +297,7 @@ protected RemoteBlockReader2(String file, long blockId,
|
|||||||
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
||||||
checksumSize = this.checksum.getChecksumSize();
|
checksumSize = this.checksum.getChecksumSize();
|
||||||
this.tracer = tracer;
|
this.tracer = tracer;
|
||||||
|
this.networkDistance = networkDistance;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -397,7 +393,8 @@ public static BlockReader newBlockReader(String file,
|
|||||||
Peer peer, DatanodeID datanodeID,
|
Peer peer, DatanodeID datanodeID,
|
||||||
PeerCache peerCache,
|
PeerCache peerCache,
|
||||||
CachingStrategy cachingStrategy,
|
CachingStrategy cachingStrategy,
|
||||||
Tracer tracer) throws IOException {
|
Tracer tracer,
|
||||||
|
int networkDistance) throws IOException {
|
||||||
// in and out will be closed when sock is closed (by the caller)
|
// in and out will be closed when sock is closed (by the caller)
|
||||||
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||||
peer.getOutputStream()));
|
peer.getOutputStream()));
|
||||||
@ -430,7 +427,7 @@ public static BlockReader newBlockReader(String file,
|
|||||||
|
|
||||||
return new RemoteBlockReader2(file, block.getBlockId(), checksum,
|
return new RemoteBlockReader2(file, block.getBlockId(), checksum,
|
||||||
verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID,
|
verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID,
|
||||||
peerCache, tracer);
|
peerCache, tracer, networkDistance);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void checkSuccess(
|
static void checkSuccess(
|
||||||
@ -453,11 +450,6 @@ public int available() {
|
|||||||
return TCP_WINDOW_SIZE;
|
return TCP_WINDOW_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean isLocal() {
|
|
||||||
return isLocal;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean isShortCircuit() {
|
public boolean isShortCircuit() {
|
||||||
return false;
|
return false;
|
||||||
@ -472,4 +464,9 @@ public ClientMmap getClientMmap(EnumSet<ReadOption> opts) {
|
|||||||
public DataChecksum getDataChecksum() {
|
public DataChecksum getDataChecksum() {
|
||||||
return checksum;
|
return checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return networkDistance;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,4 +87,11 @@ public abstract int read(long pos, byte[] buf, int off, int len)
|
|||||||
* short-circuit byte count statistics.
|
* short-circuit byte count statistics.
|
||||||
*/
|
*/
|
||||||
public abstract boolean isShortCircuit();
|
public abstract boolean isShortCircuit();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the network distance between local machine and the remote machine.
|
||||||
|
*/
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return isLocal() ? 0 : Integer.MAX_VALUE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,100 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The class describes the configured admin properties for a datanode.
|
||||||
|
*
|
||||||
|
* It is the static configuration specified by administrators via dfsadmin
|
||||||
|
* command; different from the runtime state. CombinedHostFileManager uses
|
||||||
|
* the class to deserialize the configurations from json-based file format.
|
||||||
|
*
|
||||||
|
* To decommission a node, use AdminStates.DECOMMISSIONED.
|
||||||
|
*/
|
||||||
|
public class DatanodeAdminProperties {
|
||||||
|
private String hostName;
|
||||||
|
private int port;
|
||||||
|
private String upgradeDomain;
|
||||||
|
private AdminStates adminState = AdminStates.NORMAL;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the host name of the datanode.
|
||||||
|
* @return the host name of the datanode.
|
||||||
|
*/
|
||||||
|
public String getHostName() {
|
||||||
|
return hostName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the host name of the datanode.
|
||||||
|
* @param hostName the host name of the datanode.
|
||||||
|
*/
|
||||||
|
public void setHostName(final String hostName) {
|
||||||
|
this.hostName = hostName;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the port number of the datanode.
|
||||||
|
* @return the port number of the datanode.
|
||||||
|
*/
|
||||||
|
public int getPort() {
|
||||||
|
return port;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the port number of the datanode.
|
||||||
|
* @param port the port number of the datanode.
|
||||||
|
*/
|
||||||
|
public void setPort(final int port) {
|
||||||
|
this.port = port;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the upgrade domain of the datanode.
|
||||||
|
* @return the upgrade domain of the datanode.
|
||||||
|
*/
|
||||||
|
public String getUpgradeDomain() {
|
||||||
|
return upgradeDomain;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the upgrade domain of the datanode.
|
||||||
|
* @param upgradeDomain the upgrade domain of the datanode.
|
||||||
|
*/
|
||||||
|
public void setUpgradeDomain(final String upgradeDomain) {
|
||||||
|
this.upgradeDomain = upgradeDomain;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the admin state of the datanode.
|
||||||
|
* @return the admin state of the datanode.
|
||||||
|
*/
|
||||||
|
public AdminStates getAdminState() {
|
||||||
|
return adminState;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the admin state of the datanode.
|
||||||
|
* @param adminState the admin state of the datanode.
|
||||||
|
*/
|
||||||
|
public void setAdminState(final AdminStates adminState) {
|
||||||
|
this.adminState = adminState;
|
||||||
|
}
|
||||||
|
}
|
@ -24,6 +24,8 @@
|
|||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
|
||||||
|
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class represents the primary identifier for a Datanode.
|
* This class represents the primary identifier for a Datanode.
|
||||||
* Datanodes are identified by how they can be contacted (hostname
|
* Datanodes are identified by how they can be contacted (hostname
|
||||||
@ -327,4 +329,8 @@ public HdfsProtos.DatanodeIDProto getProtoBufMessage() {
|
|||||||
.setContainerPort(this.getContainerPort())
|
.setContainerPort(this.getContainerPort())
|
||||||
.build();
|
.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public InetSocketAddress getResolvedAddress() {
|
||||||
|
return new InetSocketAddress(this.getIpAddr(), this.getXferPort());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,61 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.protocol;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Striped block info that can be sent elsewhere to do block group level things,
|
||||||
|
* like checksum, and etc.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public class StripedBlockInfo {
|
||||||
|
private final ExtendedBlock block;
|
||||||
|
private final DatanodeInfo[] datanodes;
|
||||||
|
private final Token<BlockTokenIdentifier>[] blockTokens;
|
||||||
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
|
|
||||||
|
public StripedBlockInfo(ExtendedBlock block, DatanodeInfo[] datanodes,
|
||||||
|
Token<BlockTokenIdentifier>[] blockTokens,
|
||||||
|
ErasureCodingPolicy ecPolicy) {
|
||||||
|
this.block = block;
|
||||||
|
this.datanodes = datanodes;
|
||||||
|
this.blockTokens = blockTokens;
|
||||||
|
this.ecPolicy = ecPolicy;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ExtendedBlock getBlock() {
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
|
||||||
|
public DatanodeInfo[] getDatanodes() {
|
||||||
|
return datanodes;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Token<BlockTokenIdentifier>[] getBlockTokens() {
|
||||||
|
return blockTokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ErasureCodingPolicy getErasureCodingPolicy() {
|
||||||
|
return ecPolicy;
|
||||||
|
}
|
||||||
|
}
|
@ -24,6 +24,7 @@
|
|||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||||
@ -197,6 +198,17 @@ void copyBlock(final ExtendedBlock blk,
|
|||||||
* @param blockToken security token for accessing the block.
|
* @param blockToken security token for accessing the block.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void blockChecksum(final ExtendedBlock blk,
|
void blockChecksum(ExtendedBlock blk,
|
||||||
final Token<BlockTokenIdentifier> blockToken) throws IOException;
|
Token<BlockTokenIdentifier> blockToken) throws IOException;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get striped block group checksum (MD5 of CRC32).
|
||||||
|
*
|
||||||
|
* @param stripedBlockInfo a striped block info.
|
||||||
|
* @param blockToken security token for accessing the block.
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
void blockGroupChecksum(StripedBlockInfo stripedBlockInfo,
|
||||||
|
Token<BlockTokenIdentifier> blockToken) throws IOException;
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,7 @@ public enum Op {
|
|||||||
REQUEST_SHORT_CIRCUIT_FDS((byte)87),
|
REQUEST_SHORT_CIRCUIT_FDS((byte)87),
|
||||||
RELEASE_SHORT_CIRCUIT_FDS((byte)88),
|
RELEASE_SHORT_CIRCUIT_FDS((byte)88),
|
||||||
REQUEST_SHORT_CIRCUIT_SHM((byte)89),
|
REQUEST_SHORT_CIRCUIT_SHM((byte)89),
|
||||||
|
BLOCK_GROUP_CHECKSUM((byte)90),
|
||||||
CUSTOM((byte)127);
|
CUSTOM((byte)127);
|
||||||
|
|
||||||
/** The code for this operation. */
|
/** The code for this operation. */
|
||||||
|
@ -28,11 +28,13 @@
|
|||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
|
||||||
@ -261,4 +263,21 @@ public void blockChecksum(final ExtendedBlock blk,
|
|||||||
|
|
||||||
send(out, Op.BLOCK_CHECKSUM, proto);
|
send(out, Op.BLOCK_CHECKSUM, proto);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void blockGroupChecksum(StripedBlockInfo stripedBlockInfo,
|
||||||
|
Token<BlockTokenIdentifier> blockToken) throws IOException {
|
||||||
|
OpBlockGroupChecksumProto proto = OpBlockGroupChecksumProto.newBuilder()
|
||||||
|
.setHeader(DataTransferProtoUtil.buildBaseHeader(
|
||||||
|
stripedBlockInfo.getBlock(), blockToken))
|
||||||
|
.setDatanodes(PBHelperClient.convertToProto(
|
||||||
|
stripedBlockInfo.getDatanodes()))
|
||||||
|
.addAllBlockTokens(PBHelperClient.convert(
|
||||||
|
stripedBlockInfo.getBlockTokens()))
|
||||||
|
.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(
|
||||||
|
stripedBlockInfo.getErasureCodingPolicy()))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
send(out, Op.BLOCK_GROUP_CHECKSUM, proto);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -553,10 +553,8 @@ public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
|
|||||||
proto.getCorrupt(),
|
proto.getCorrupt(),
|
||||||
cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
|
cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
|
||||||
List<TokenProto> tokenProtos = proto.getBlockTokensList();
|
List<TokenProto> tokenProtos = proto.getBlockTokensList();
|
||||||
Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];
|
Token<BlockTokenIdentifier>[] blockTokens =
|
||||||
for (int i = 0; i < indices.length; i++) {
|
convertTokens(tokenProtos);
|
||||||
blockTokens[i] = convert(tokenProtos.get(i));
|
|
||||||
}
|
|
||||||
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
|
((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
|
||||||
}
|
}
|
||||||
lb.setBlockToken(convert(proto.getBlockToken()));
|
lb.setBlockToken(convert(proto.getBlockToken()));
|
||||||
@ -564,6 +562,18 @@ public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
|
|||||||
return lb;
|
return lb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static public Token<BlockTokenIdentifier>[] convertTokens(
|
||||||
|
List<TokenProto> tokenProtos) {
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
Token<BlockTokenIdentifier>[] blockTokens = new Token[tokenProtos.size()];
|
||||||
|
for (int i = 0; i < blockTokens.length; i++) {
|
||||||
|
blockTokens[i] = convert(tokenProtos.get(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
return blockTokens;
|
||||||
|
}
|
||||||
|
|
||||||
static public DatanodeInfo convert(DatanodeInfoProto di) {
|
static public DatanodeInfo convert(DatanodeInfoProto di) {
|
||||||
if (di == null) return null;
|
if (di == null) return null;
|
||||||
return new DatanodeInfo(
|
return new DatanodeInfo(
|
||||||
@ -815,9 +825,7 @@ public static LocatedBlockProto convertLocatedBlock(LocatedBlock b) {
|
|||||||
byte[] indices = sb.getBlockIndices();
|
byte[] indices = sb.getBlockIndices();
|
||||||
builder.setBlockIndices(PBHelperClient.getByteString(indices));
|
builder.setBlockIndices(PBHelperClient.getByteString(indices));
|
||||||
Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
|
Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
|
||||||
for (int i = 0; i < indices.length; i++) {
|
builder.addAllBlockTokens(convert(blockTokens));
|
||||||
builder.addBlockTokens(PBHelperClient.convert(blockTokens[i]));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return builder.setB(PBHelperClient.convert(b.getBlock()))
|
return builder.setB(PBHelperClient.convert(b.getBlock()))
|
||||||
@ -825,6 +833,16 @@ public static LocatedBlockProto convertLocatedBlock(LocatedBlock b) {
|
|||||||
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
|
.setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static List<TokenProto> convert(
|
||||||
|
Token<BlockTokenIdentifier>[] blockTokens) {
|
||||||
|
List<TokenProto> results = new ArrayList<>(blockTokens.length);
|
||||||
|
for (Token<BlockTokenIdentifier> bt : blockTokens) {
|
||||||
|
results.add(convert(bt));
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
|
|
||||||
public static BlockStoragePolicy convert(BlockStoragePolicyProto proto) {
|
public static BlockStoragePolicy convert(BlockStoragePolicyProto proto) {
|
||||||
List<StorageTypeProto> cList = proto.getCreationPolicy()
|
List<StorageTypeProto> cList = proto.getCreationPolicy()
|
||||||
.getStorageTypesList();
|
.getStorageTypesList();
|
||||||
@ -2500,4 +2518,14 @@ public static ErasureCodingPolicyProto convertErasureCodingPolicy(
|
|||||||
.setId(policy.getId());
|
.setId(policy.getId());
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static HdfsProtos.DatanodeInfosProto convertToProto(
|
||||||
|
DatanodeInfo[] datanodeInfos) {
|
||||||
|
HdfsProtos.DatanodeInfosProto.Builder builder =
|
||||||
|
HdfsProtos.DatanodeInfosProto.newBuilder();
|
||||||
|
for (DatanodeInfo datanodeInfo : datanodeInfos) {
|
||||||
|
builder.addDatanodes(PBHelperClient.convert(datanodeInfo));
|
||||||
|
}
|
||||||
|
return builder.build();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,76 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.InputStreamReader;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.Reader;
|
||||||
|
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.HashSet;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.codehaus.jackson.JsonFactory;
|
||||||
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reader support for JSON based datanode configuration, an alternative
|
||||||
|
* to the exclude/include files configuration.
|
||||||
|
* The JSON file format is the array of elements where each element
|
||||||
|
* in the array describes the properties of a datanode. The properties of
|
||||||
|
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
|
||||||
|
*
|
||||||
|
* {"hostName": "host1"}
|
||||||
|
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
|
||||||
|
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||||
|
@InterfaceStability.Unstable
|
||||||
|
public final class CombinedHostsFileReader {
|
||||||
|
private CombinedHostsFileReader() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Deserialize a set of DatanodeAdminProperties from a json file.
|
||||||
|
* @param hostsFile the input json file to read from.
|
||||||
|
* @return the set of DatanodeAdminProperties
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static Set<DatanodeAdminProperties>
|
||||||
|
readFile(final String hostsFile) throws IOException {
|
||||||
|
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
||||||
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
|
try (Reader input =
|
||||||
|
new InputStreamReader(new FileInputStream(hostsFile), "UTF-8")) {
|
||||||
|
Iterator<DatanodeAdminProperties> iterator =
|
||||||
|
mapper.readValues(new JsonFactory().createJsonParser(input),
|
||||||
|
DatanodeAdminProperties.class);
|
||||||
|
while (iterator.hasNext()) {
|
||||||
|
DatanodeAdminProperties properties = iterator.next();
|
||||||
|
allDNs.add(properties);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return allDNs;
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,69 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import java.io.FileOutputStream;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.OutputStreamWriter;
|
||||||
|
import java.io.Writer;
|
||||||
|
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.codehaus.jackson.map.ObjectMapper;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writer support for JSON based datanode configuration, an alternative
|
||||||
|
* to the exclude/include files configuration.
|
||||||
|
* The JSON file format is the array of elements where each element
|
||||||
|
* in the array describes the properties of a datanode. The properties of
|
||||||
|
* a datanode is defined in {@link DatanodeAdminProperties}. For example,
|
||||||
|
*
|
||||||
|
* {"hostName": "host1"}
|
||||||
|
* {"hostName": "host2", "port": 50, "upgradeDomain": "ud0"}
|
||||||
|
* {"hostName": "host3", "port": 0, "adminState": "DECOMMISSIONED"}
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.LimitedPrivate({"HDFS"})
|
||||||
|
@InterfaceStability.Unstable
|
||||||
|
public final class CombinedHostsFileWriter {
|
||||||
|
private CombinedHostsFileWriter() {
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Serialize a set of DatanodeAdminProperties to a json file.
|
||||||
|
* @param hostsFile the json file name.
|
||||||
|
* @param allDNs the set of DatanodeAdminProperties
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
public static void writeFile(final String hostsFile,
|
||||||
|
final Set<DatanodeAdminProperties> allDNs) throws IOException {
|
||||||
|
StringBuilder configs = new StringBuilder();
|
||||||
|
try (Writer output =
|
||||||
|
new OutputStreamWriter(new FileOutputStream(hostsFile), "UTF-8")) {
|
||||||
|
for (DatanodeAdminProperties datanodeAdminProperties: allDNs) {
|
||||||
|
ObjectMapper mapper = new ObjectMapper();
|
||||||
|
configs.append(mapper.writeValueAsString(datanodeAdminProperties));
|
||||||
|
}
|
||||||
|
output.write(configs.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -75,6 +75,18 @@ public class StripedBlockUtil {
|
|||||||
|
|
||||||
public static final Logger LOG = LoggerFactory.getLogger(StripedBlockUtil.class);
|
public static final Logger LOG = LoggerFactory.getLogger(StripedBlockUtil.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Parses a striped block group into individual blocks.
|
||||||
|
* @param bg The striped block group
|
||||||
|
* @param ecPolicy The erasure coding policy
|
||||||
|
* @return An array of the blocks in the group
|
||||||
|
*/
|
||||||
|
public static LocatedBlock[] parseStripedBlockGroup(LocatedStripedBlock bg,
|
||||||
|
ErasureCodingPolicy ecPolicy) {
|
||||||
|
return parseStripedBlockGroup(bg, ecPolicy.getCellSize(),
|
||||||
|
ecPolicy.getNumDataUnits(), ecPolicy.getNumParityUnits());
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method parses a striped block group into individual blocks.
|
* This method parses a striped block group into individual blocks.
|
||||||
*
|
*
|
||||||
|
@ -74,7 +74,6 @@ message OpReadBlockProto {
|
|||||||
optional CachingStrategyProto cachingStrategy = 5;
|
optional CachingStrategyProto cachingStrategy = 5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
message ChecksumProto {
|
message ChecksumProto {
|
||||||
required ChecksumTypeProto type = 1;
|
required ChecksumTypeProto type = 1;
|
||||||
required uint32 bytesPerChecksum = 2;
|
required uint32 bytesPerChecksum = 2;
|
||||||
@ -149,6 +148,14 @@ message OpBlockChecksumProto {
|
|||||||
required BaseHeaderProto header = 1;
|
required BaseHeaderProto header = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
message OpBlockGroupChecksumProto {
|
||||||
|
required BaseHeaderProto header = 1;
|
||||||
|
required DatanodeInfosProto datanodes = 2;
|
||||||
|
// each internal block has a block token
|
||||||
|
repeated hadoop.common.TokenProto blockTokens = 3;
|
||||||
|
required ErasureCodingPolicyProto ecPolicy = 4;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An ID uniquely identifying a shared memory segment.
|
* An ID uniquely identifying a shared memory segment.
|
||||||
*/
|
*/
|
||||||
|
@ -570,6 +570,11 @@ int fuseConnectAsThreadUid(struct hdfsConn **conn)
|
|||||||
|
|
||||||
ctx = fuse_get_context();
|
ctx = fuse_get_context();
|
||||||
usrname = getUsername(ctx->uid);
|
usrname = getUsername(ctx->uid);
|
||||||
|
if (!usrname) {
|
||||||
|
ERROR("fuseConnectAsThreadUid(): failed to get username for uid %"PRId64
|
||||||
|
"\n", (uint64_t)ctx->uid);
|
||||||
|
return EIO;
|
||||||
|
}
|
||||||
ret = fuseConnect(usrname, ctx, conn);
|
ret = fuseConnect(usrname, ctx, conn);
|
||||||
free(usrname);
|
free(usrname);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -142,7 +142,9 @@ case ${COMMAND} in
|
|||||||
echo "HDFS_DIR='${HDFS_DIR}'"
|
echo "HDFS_DIR='${HDFS_DIR}'"
|
||||||
echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
|
echo "HDFS_LIB_JARS_DIR='${HDFS_LIB_JARS_DIR}'"
|
||||||
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
|
echo "HADOOP_CONF_DIR='${HADOOP_CONF_DIR}'"
|
||||||
echo "HADOOP_TOOLS_PATH='${HADOOP_TOOLS_PATH}'"
|
echo "HADOOP_TOOLS_HOME='${HADOOP_TOOLS_HOME}'"
|
||||||
|
echo "HADOOP_TOOLS_DIR='${HADOOP_TOOLS_DIR}'"
|
||||||
|
echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'"
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
erasurecode)
|
erasurecode)
|
||||||
@ -166,7 +168,6 @@ case ${COMMAND} in
|
|||||||
;;
|
;;
|
||||||
haadmin)
|
haadmin)
|
||||||
CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
|
CLASS=org.apache.hadoop.hdfs.tools.DFSHAAdmin
|
||||||
hadoop_add_to_classpath_toolspath
|
|
||||||
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
|
||||||
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
|
||||||
;;
|
;;
|
||||||
|
@ -415,12 +415,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final String DFS_METRICS_PERCENTILES_INTERVALS_KEY = "dfs.metrics.percentiles.intervals";
|
public static final String DFS_METRICS_PERCENTILES_INTERVALS_KEY = "dfs.metrics.percentiles.intervals";
|
||||||
public static final String DFS_DATANODE_HOST_NAME_KEY =
|
public static final String DFS_DATANODE_HOST_NAME_KEY =
|
||||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_HOST_NAME_KEY;
|
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_HOST_NAME_KEY;
|
||||||
public static final String DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
|
|
||||||
public static final String DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
|
|
||||||
public static final String DFS_NAMENODE_CHECKPOINT_DIR_KEY =
|
public static final String DFS_NAMENODE_CHECKPOINT_DIR_KEY =
|
||||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
|
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
|
||||||
public static final String DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY =
|
public static final String DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY =
|
||||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY;
|
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY;
|
||||||
|
public static final String DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY =
|
||||||
|
"dfs.namenode.hosts.provider.classname";
|
||||||
public static final String DFS_HOSTS = "dfs.hosts";
|
public static final String DFS_HOSTS = "dfs.hosts";
|
||||||
public static final String DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
|
public static final String DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
|
||||||
public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers";
|
public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers";
|
||||||
@ -750,6 +750,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||||||
public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
|
public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
|
||||||
public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =
|
public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =
|
||||||
HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI;
|
HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI;
|
||||||
|
public static final String DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_KEY = "dfs.namenode.edekcacheloader.interval.ms";
|
||||||
|
public static final int DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_DEFAULT = 1000;
|
||||||
|
public static final String DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY = "dfs.namenode.edekcacheloader.initial.delay.ms";
|
||||||
|
public static final int DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_DEFAULT = 3000;
|
||||||
|
|
||||||
// Journal-node related configs. These are read on the JN side.
|
// Journal-node related configs. These are read on the JN side.
|
||||||
public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
|
public static final String DFS_JOURNALNODE_EDITS_DIR_KEY = "dfs.journalnode.edits.dir";
|
||||||
|
@ -26,11 +26,13 @@
|
|||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockGroupChecksumProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReplaceBlockProto;
|
||||||
@ -111,6 +113,9 @@ protected final void processOp(Op op) throws IOException {
|
|||||||
case BLOCK_CHECKSUM:
|
case BLOCK_CHECKSUM:
|
||||||
opBlockChecksum(in);
|
opBlockChecksum(in);
|
||||||
break;
|
break;
|
||||||
|
case BLOCK_GROUP_CHECKSUM:
|
||||||
|
opStripedBlockChecksum(in);
|
||||||
|
break;
|
||||||
case TRANSFER_BLOCK:
|
case TRANSFER_BLOCK:
|
||||||
opTransferBlock(in);
|
opTransferBlock(in);
|
||||||
break;
|
break;
|
||||||
@ -290,4 +295,27 @@ private void opBlockChecksum(DataInputStream in) throws IOException {
|
|||||||
if (traceScope != null) traceScope.close();
|
if (traceScope != null) traceScope.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Receive OP_STRIPED_BLOCK_CHECKSUM. */
|
||||||
|
private void opStripedBlockChecksum(DataInputStream dis) throws IOException {
|
||||||
|
OpBlockGroupChecksumProto proto =
|
||||||
|
OpBlockGroupChecksumProto.parseFrom(vintPrefixed(dis));
|
||||||
|
TraceScope traceScope = continueTraceSpan(proto.getHeader(),
|
||||||
|
proto.getClass().getSimpleName());
|
||||||
|
StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(
|
||||||
|
PBHelperClient.convert(proto.getHeader().getBlock()),
|
||||||
|
PBHelperClient.convert(proto.getDatanodes()),
|
||||||
|
PBHelperClient.convertTokens(proto.getBlockTokensList()),
|
||||||
|
PBHelperClient.convertErasureCodingPolicy(proto.getEcPolicy())
|
||||||
|
);
|
||||||
|
|
||||||
|
try {
|
||||||
|
blockGroupChecksum(stripedBlockInfo,
|
||||||
|
PBHelperClient.convert(proto.getHeader().getToken()));
|
||||||
|
} finally {
|
||||||
|
if (traceScope != null) {
|
||||||
|
traceScope.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,250 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.collect.HashMultimap;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
import com.google.common.collect.UnmodifiableIterator;
|
||||||
|
import com.google.common.collect.Iterables;
|
||||||
|
import com.google.common.collect.Collections2;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import com.google.common.base.Predicate;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.util.CombinedHostsFileReader;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class manages datanode configuration using a json file.
|
||||||
|
* Please refer to {@link CombinedHostsFileReader} for the json format.
|
||||||
|
* <p/>
|
||||||
|
* <p/>
|
||||||
|
* Entries may or may not specify a port. If they don't, we consider
|
||||||
|
* them to apply to every DataNode on that host. The code canonicalizes the
|
||||||
|
* entries into IP addresses.
|
||||||
|
* <p/>
|
||||||
|
* <p/>
|
||||||
|
* The code ignores all entries that the DNS fails to resolve their IP
|
||||||
|
* addresses. This is okay because by default the NN rejects the registrations
|
||||||
|
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
|
||||||
|
* resolutions are only done during the loading time to minimize the latency.
|
||||||
|
*/
|
||||||
|
public class CombinedHostFileManager extends HostConfigManager {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
|
CombinedHostFileManager.class);
|
||||||
|
private Configuration conf;
|
||||||
|
private HostProperties hostProperties = new HostProperties();
|
||||||
|
|
||||||
|
static class HostProperties {
|
||||||
|
private Multimap<InetAddress, DatanodeAdminProperties> allDNs =
|
||||||
|
HashMultimap.create();
|
||||||
|
// optimization. If every node in the file isn't in service, it implies
|
||||||
|
// any node is allowed to register with nn. This is equivalent to having
|
||||||
|
// an empty "include" file.
|
||||||
|
private boolean emptyInServiceNodeLists = true;
|
||||||
|
synchronized void add(InetAddress addr,
|
||||||
|
DatanodeAdminProperties properties) {
|
||||||
|
allDNs.put(addr, properties);
|
||||||
|
if (properties.getAdminState().equals(
|
||||||
|
AdminStates.NORMAL)) {
|
||||||
|
emptyInServiceNodeLists = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the includes list is empty, act as if everything is in the
|
||||||
|
// includes list.
|
||||||
|
synchronized boolean isIncluded(final InetSocketAddress address) {
|
||||||
|
return emptyInServiceNodeLists || Iterables.any(
|
||||||
|
allDNs.get(address.getAddress()),
|
||||||
|
new Predicate<DatanodeAdminProperties>() {
|
||||||
|
public boolean apply(DatanodeAdminProperties input) {
|
||||||
|
return input.getPort() == 0 ||
|
||||||
|
input.getPort() == address.getPort();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized boolean isExcluded(final InetSocketAddress address) {
|
||||||
|
return Iterables.any(allDNs.get(address.getAddress()),
|
||||||
|
new Predicate<DatanodeAdminProperties>() {
|
||||||
|
public boolean apply(DatanodeAdminProperties input) {
|
||||||
|
return input.getAdminState().equals(
|
||||||
|
AdminStates.DECOMMISSIONED) &&
|
||||||
|
(input.getPort() == 0 ||
|
||||||
|
input.getPort() == address.getPort());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
synchronized String getUpgradeDomain(final InetSocketAddress address) {
|
||||||
|
Iterable<DatanodeAdminProperties> datanode = Iterables.filter(
|
||||||
|
allDNs.get(address.getAddress()),
|
||||||
|
new Predicate<DatanodeAdminProperties>() {
|
||||||
|
public boolean apply(DatanodeAdminProperties input) {
|
||||||
|
return (input.getPort() == 0 ||
|
||||||
|
input.getPort() == address.getPort());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return datanode.iterator().hasNext() ?
|
||||||
|
datanode.iterator().next().getUpgradeDomain() : null;
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterable<InetSocketAddress> getIncludes() {
|
||||||
|
return new Iterable<InetSocketAddress>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<InetSocketAddress> iterator() {
|
||||||
|
return new HostIterator(allDNs.entries());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Iterable<InetSocketAddress> getExcludes() {
|
||||||
|
return new Iterable<InetSocketAddress>() {
|
||||||
|
@Override
|
||||||
|
public Iterator<InetSocketAddress> iterator() {
|
||||||
|
return new HostIterator(
|
||||||
|
Collections2.filter(allDNs.entries(),
|
||||||
|
new Predicate<java.util.Map.Entry<InetAddress,
|
||||||
|
DatanodeAdminProperties>>() {
|
||||||
|
public boolean apply(java.util.Map.Entry<InetAddress,
|
||||||
|
DatanodeAdminProperties> entry) {
|
||||||
|
return entry.getValue().getAdminState().equals(
|
||||||
|
AdminStates.DECOMMISSIONED);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static class HostIterator extends UnmodifiableIterator<InetSocketAddress> {
|
||||||
|
private final Iterator<Map.Entry<InetAddress,
|
||||||
|
DatanodeAdminProperties>> it;
|
||||||
|
public HostIterator(Collection<java.util.Map.Entry<InetAddress,
|
||||||
|
DatanodeAdminProperties>> nodes) {
|
||||||
|
this.it = nodes.iterator();
|
||||||
|
}
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return it.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InetSocketAddress next() {
|
||||||
|
Map.Entry<InetAddress, DatanodeAdminProperties> e = it.next();
|
||||||
|
return new InetSocketAddress(e.getKey(), e.getValue().getPort());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterable<InetSocketAddress> getIncludes() {
|
||||||
|
return hostProperties.getIncludes();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterable<InetSocketAddress> getExcludes() {
|
||||||
|
return hostProperties.getExcludes();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setConf(Configuration conf) {
|
||||||
|
this.conf = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Configuration getConf() {
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void refresh() throws IOException {
|
||||||
|
refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""));
|
||||||
|
}
|
||||||
|
private void refresh(final String hostsFile) throws IOException {
|
||||||
|
HostProperties hostProps = new HostProperties();
|
||||||
|
Set<DatanodeAdminProperties> all =
|
||||||
|
CombinedHostsFileReader.readFile(hostsFile);
|
||||||
|
for(DatanodeAdminProperties properties : all) {
|
||||||
|
InetSocketAddress addr = parseEntry(hostsFile,
|
||||||
|
properties.getHostName(), properties.getPort());
|
||||||
|
if (addr != null) {
|
||||||
|
hostProps.add(addr.getAddress(), properties);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
refresh(hostProps);
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
static InetSocketAddress parseEntry(final String fn, final String hostName,
|
||||||
|
final int port) {
|
||||||
|
InetSocketAddress addr = new InetSocketAddress(hostName, port);
|
||||||
|
if (addr.isUnresolved()) {
|
||||||
|
LOG.warn("Failed to resolve {} in {}. ", hostName, fn);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized boolean isIncluded(final DatanodeID dn) {
|
||||||
|
return hostProperties.isIncluded(dn.getResolvedAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized boolean isExcluded(final DatanodeID dn) {
|
||||||
|
return isExcluded(dn.getResolvedAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean isExcluded(final InetSocketAddress address) {
|
||||||
|
return hostProperties.isExcluded(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized String getUpgradeDomain(final DatanodeID dn) {
|
||||||
|
return hostProperties.getUpgradeDomain(dn.getResolvedAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set the properties lists by the new instances. The
|
||||||
|
* old instance is discarded.
|
||||||
|
* @param hostProperties the new properties list
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
private void refresh(final HostProperties hostProperties) {
|
||||||
|
synchronized (this) {
|
||||||
|
this.hostProperties = hostProperties;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -111,7 +111,7 @@ public class DatanodeManager {
|
|||||||
private final int defaultIpcPort;
|
private final int defaultIpcPort;
|
||||||
|
|
||||||
/** Read include/exclude files. */
|
/** Read include/exclude files. */
|
||||||
private final HostFileManager hostFileManager = new HostFileManager();
|
private HostConfigManager hostConfigManager;
|
||||||
|
|
||||||
/** The period to wait for datanode heartbeat.*/
|
/** The period to wait for datanode heartbeat.*/
|
||||||
private long heartbeatExpireInterval;
|
private long heartbeatExpireInterval;
|
||||||
@ -204,9 +204,11 @@ public class DatanodeManager {
|
|||||||
this.defaultIpcPort = NetUtils.createSocketAddr(
|
this.defaultIpcPort = NetUtils.createSocketAddr(
|
||||||
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
|
conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
|
DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
|
||||||
|
this.hostConfigManager = ReflectionUtils.newInstance(
|
||||||
|
conf.getClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
||||||
|
HostFileManager.class, HostConfigManager.class), conf);
|
||||||
try {
|
try {
|
||||||
this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
|
this.hostConfigManager.refresh();
|
||||||
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("error reading hosts files: ", e);
|
LOG.error("error reading hosts files: ", e);
|
||||||
}
|
}
|
||||||
@ -224,7 +226,7 @@ public class DatanodeManager {
|
|||||||
// in the cache; so future calls to resolve will be fast.
|
// in the cache; so future calls to resolve will be fast.
|
||||||
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
|
if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
|
||||||
final ArrayList<String> locations = new ArrayList<>();
|
final ArrayList<String> locations = new ArrayList<>();
|
||||||
for (InetSocketAddress addr : hostFileManager.getIncludes()) {
|
for (InetSocketAddress addr : hostConfigManager.getIncludes()) {
|
||||||
locations.add(addr.getAddress().getHostAddress());
|
locations.add(addr.getAddress().getHostAddress());
|
||||||
}
|
}
|
||||||
dnsToSwitchMapping.resolve(locations);
|
dnsToSwitchMapping.resolve(locations);
|
||||||
@ -337,8 +339,8 @@ public DecommissionManager getDecomManager() {
|
|||||||
return decomManager;
|
return decomManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
HostFileManager getHostFileManager() {
|
public HostConfigManager getHostConfigManager() {
|
||||||
return hostFileManager;
|
return hostConfigManager;
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
@ -632,6 +634,7 @@ void addDatanode(final DatanodeDescriptor node) {
|
|||||||
networktopology.add(node); // may throw InvalidTopologyException
|
networktopology.add(node); // may throw InvalidTopologyException
|
||||||
host2DatanodeMap.add(node);
|
host2DatanodeMap.add(node);
|
||||||
checkIfClusterIsNowMultiRack(node);
|
checkIfClusterIsNowMultiRack(node);
|
||||||
|
resolveUpgradeDomain(node);
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
|
LOG.debug(getClass().getSimpleName() + ".addDatanode: "
|
||||||
@ -704,7 +707,14 @@ public HashMap<String, Integer> getDatanodesSoftwareVersions() {
|
|||||||
return new HashMap<> (this.datanodesSoftwareVersions);
|
return new HashMap<> (this.datanodesSoftwareVersions);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void resolveUpgradeDomain(DatanodeDescriptor node) {
|
||||||
|
String upgradeDomain = hostConfigManager.getUpgradeDomain(node);
|
||||||
|
if (upgradeDomain != null && upgradeDomain.length() > 0) {
|
||||||
|
node.setUpgradeDomain(upgradeDomain);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Resolve a node's network location. If the DNS to switch mapping fails
|
* Resolve a node's network location. If the DNS to switch mapping fails
|
||||||
* then this method guarantees default rack location.
|
* then this method guarantees default rack location.
|
||||||
@ -831,7 +841,7 @@ private static void removeDecomNodeFromList(
|
|||||||
*/
|
*/
|
||||||
void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) {
|
void startDecommissioningIfExcluded(DatanodeDescriptor nodeReg) {
|
||||||
// If the registered node is in exclude list, then decommission it
|
// If the registered node is in exclude list, then decommission it
|
||||||
if (getHostFileManager().isExcluded(nodeReg)) {
|
if (getHostConfigManager().isExcluded(nodeReg)) {
|
||||||
decomManager.startDecommission(nodeReg);
|
decomManager.startDecommission(nodeReg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -871,7 +881,7 @@ public void registerDatanode(DatanodeRegistration nodeReg)
|
|||||||
|
|
||||||
// Checks if the node is not on the hosts list. If it is not, then
|
// Checks if the node is not on the hosts list. If it is not, then
|
||||||
// it will be disallowed from registering.
|
// it will be disallowed from registering.
|
||||||
if (!hostFileManager.isIncluded(nodeReg)) {
|
if (!hostConfigManager.isIncluded(nodeReg)) {
|
||||||
throw new DisallowedDatanodeException(nodeReg);
|
throw new DisallowedDatanodeException(nodeReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -939,7 +949,8 @@ nodes with its data cleared (or user can just remove the StorageID
|
|||||||
getNetworkDependenciesWithDefault(nodeS));
|
getNetworkDependenciesWithDefault(nodeS));
|
||||||
}
|
}
|
||||||
getNetworkTopology().add(nodeS);
|
getNetworkTopology().add(nodeS);
|
||||||
|
resolveUpgradeDomain(nodeS);
|
||||||
|
|
||||||
// also treat the registration message as a heartbeat
|
// also treat the registration message as a heartbeat
|
||||||
heartbeatManager.register(nodeS);
|
heartbeatManager.register(nodeS);
|
||||||
incrementVersionCount(nodeS.getSoftwareVersion());
|
incrementVersionCount(nodeS.getSoftwareVersion());
|
||||||
@ -971,7 +982,8 @@ nodes with its data cleared (or user can just remove the StorageID
|
|||||||
}
|
}
|
||||||
networktopology.add(nodeDescr);
|
networktopology.add(nodeDescr);
|
||||||
nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion());
|
nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion());
|
||||||
|
resolveUpgradeDomain(nodeDescr);
|
||||||
|
|
||||||
// register new datanode
|
// register new datanode
|
||||||
addDatanode(nodeDescr);
|
addDatanode(nodeDescr);
|
||||||
blockManager.getBlockReportLeaseManager().register(nodeDescr);
|
blockManager.getBlockReportLeaseManager().register(nodeDescr);
|
||||||
@ -1026,9 +1038,9 @@ private void refreshHostsReader(Configuration conf) throws IOException {
|
|||||||
// Update the file names and refresh internal includes and excludes list.
|
// Update the file names and refresh internal includes and excludes list.
|
||||||
if (conf == null) {
|
if (conf == null) {
|
||||||
conf = new HdfsConfiguration();
|
conf = new HdfsConfiguration();
|
||||||
|
this.hostConfigManager.setConf(conf);
|
||||||
}
|
}
|
||||||
this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
|
this.hostConfigManager.refresh();
|
||||||
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1044,15 +1056,16 @@ private void refreshDatanodes() {
|
|||||||
}
|
}
|
||||||
for (DatanodeDescriptor node : copy.values()) {
|
for (DatanodeDescriptor node : copy.values()) {
|
||||||
// Check if not include.
|
// Check if not include.
|
||||||
if (!hostFileManager.isIncluded(node)) {
|
if (!hostConfigManager.isIncluded(node)) {
|
||||||
node.setDisallowed(true); // case 2.
|
node.setDisallowed(true); // case 2.
|
||||||
} else {
|
} else {
|
||||||
if (hostFileManager.isExcluded(node)) {
|
if (hostConfigManager.isExcluded(node)) {
|
||||||
decomManager.startDecommission(node); // case 3.
|
decomManager.startDecommission(node); // case 3.
|
||||||
} else {
|
} else {
|
||||||
decomManager.stopDecommission(node); // case 4.
|
decomManager.stopDecommission(node); // case 4.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
node.setUpgradeDomain(hostConfigManager.getUpgradeDomain(node));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1268,9 +1281,9 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
type == DatanodeReportType.DECOMMISSIONING;
|
type == DatanodeReportType.DECOMMISSIONING;
|
||||||
|
|
||||||
ArrayList<DatanodeDescriptor> nodes;
|
ArrayList<DatanodeDescriptor> nodes;
|
||||||
final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet();
|
final HostSet foundNodes = new HostSet();
|
||||||
final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes();
|
final Iterable<InetSocketAddress> includedNodes =
|
||||||
final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();
|
hostConfigManager.getIncludes();
|
||||||
|
|
||||||
synchronized(this) {
|
synchronized(this) {
|
||||||
nodes = new ArrayList<>(datanodeMap.size());
|
nodes = new ArrayList<>(datanodeMap.size());
|
||||||
@ -1281,11 +1294,11 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
if (((listLiveNodes && !isDead) ||
|
if (((listLiveNodes && !isDead) ||
|
||||||
(listDeadNodes && isDead) ||
|
(listDeadNodes && isDead) ||
|
||||||
(listDecommissioningNodes && isDecommissioning)) &&
|
(listDecommissioningNodes && isDecommissioning)) &&
|
||||||
hostFileManager.isIncluded(dn)) {
|
hostConfigManager.isIncluded(dn)) {
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
}
|
}
|
||||||
|
|
||||||
foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
|
foundNodes.add(dn.getResolvedAddress());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Collections.sort(nodes);
|
Collections.sort(nodes);
|
||||||
@ -1309,7 +1322,7 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
addr.getPort() == 0 ? defaultXferPort : addr.getPort(),
|
addr.getPort() == 0 ? defaultXferPort : addr.getPort(),
|
||||||
defaultInfoPort, defaultInfoSecurePort, defaultIpcPort));
|
defaultInfoPort, defaultInfoSecurePort, defaultIpcPort));
|
||||||
setDatanodeDead(dn);
|
setDatanodeDead(dn);
|
||||||
if (excludedNodes.match(addr)) {
|
if (hostConfigManager.isExcluded(dn)) {
|
||||||
dn.setDecommissioned();
|
dn.setDecommissioned();
|
||||||
}
|
}
|
||||||
nodes.add(dn);
|
nodes.add(dn);
|
||||||
@ -1318,8 +1331,8 @@ public List<DatanodeDescriptor> getDatanodeListForReport(
|
|||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("getDatanodeListForReport with " +
|
LOG.debug("getDatanodeListForReport with " +
|
||||||
"includedNodes = " + hostFileManager.getIncludes() +
|
"includedNodes = " + hostConfigManager.getIncludes() +
|
||||||
", excludedNodes = " + hostFileManager.getExcludes() +
|
", excludedNodes = " + hostConfigManager.getExcludes() +
|
||||||
", foundNodes = " + foundNodes +
|
", foundNodes = " + foundNodes +
|
||||||
", nodes = " + nodes);
|
", nodes = " + nodes);
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,80 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configurable;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This interface abstracts how datanode configuration is managed.
|
||||||
|
*
|
||||||
|
* Each implementation defines its own way to persist the configuration.
|
||||||
|
* For example, it can use one JSON file to store the configs for all
|
||||||
|
* datanodes; or it can use one file to store in-service datanodes and another
|
||||||
|
* file to store decommission-requested datanodes.
|
||||||
|
*
|
||||||
|
* These files control which DataNodes the NameNode expects to see in the
|
||||||
|
* cluster.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Unstable
|
||||||
|
public abstract class HostConfigManager implements Configurable {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return all the datanodes that are allowed to connect to the namenode.
|
||||||
|
* @return Iterable of all datanodes
|
||||||
|
*/
|
||||||
|
public abstract Iterable<InetSocketAddress> getIncludes();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return all datanodes that should be in decommissioned state.
|
||||||
|
* @return Iterable of those datanodes
|
||||||
|
*/
|
||||||
|
public abstract Iterable<InetSocketAddress> getExcludes();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a datanode is allowed to connect the namenode.
|
||||||
|
* @param dn the DatanodeID of the datanode
|
||||||
|
* @return boolean if dn is allowed to connect the namenode.
|
||||||
|
*/
|
||||||
|
public abstract boolean isIncluded(DatanodeID dn);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Check if a datanode needs to be decommissioned.
|
||||||
|
* @param dn the DatanodeID of the datanode
|
||||||
|
* @return boolean if dn needs to be decommissioned.
|
||||||
|
*/
|
||||||
|
public abstract boolean isExcluded(DatanodeID dn);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reload the configuration.
|
||||||
|
*/
|
||||||
|
public abstract void refresh() throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the upgrade domain of a datanode.
|
||||||
|
* @param dn the DatanodeID of the datanode
|
||||||
|
* @return the upgrade domain of dn.
|
||||||
|
*/
|
||||||
|
public abstract String getUpgradeDomain(DatanodeID dn);
|
||||||
|
}
|
@ -18,28 +18,18 @@
|
|||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Function;
|
|
||||||
import com.google.common.base.Joiner;
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
import com.google.common.collect.HashMultimap;
|
|
||||||
import com.google.common.collect.Iterators;
|
|
||||||
import com.google.common.collect.Multimap;
|
|
||||||
import com.google.common.collect.UnmodifiableIterator;
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.util.HostsFileReader;
|
import org.apache.hadoop.util.HostsFileReader;
|
||||||
|
|
||||||
import javax.annotation.Nullable;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.Map;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class manages the include and exclude files for HDFS.
|
* This class manages the include and exclude files for HDFS.
|
||||||
@ -59,11 +49,27 @@
|
|||||||
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
|
* of DNs when it fails to do a forward and reverse lookup. Note that DNS
|
||||||
* resolutions are only done during the loading time to minimize the latency.
|
* resolutions are only done during the loading time to minimize the latency.
|
||||||
*/
|
*/
|
||||||
class HostFileManager {
|
public class HostFileManager extends HostConfigManager {
|
||||||
private static final Log LOG = LogFactory.getLog(HostFileManager.class);
|
private static final Log LOG = LogFactory.getLog(HostFileManager.class);
|
||||||
|
private Configuration conf;
|
||||||
private HostSet includes = new HostSet();
|
private HostSet includes = new HostSet();
|
||||||
private HostSet excludes = new HostSet();
|
private HostSet excludes = new HostSet();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setConf(Configuration conf) {
|
||||||
|
this.conf = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Configuration getConf() {
|
||||||
|
return conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void refresh() throws IOException {
|
||||||
|
refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
|
||||||
|
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
|
||||||
|
}
|
||||||
private static HostSet readFile(String type, String filename)
|
private static HostSet readFile(String type, String filename)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
HostSet res = new HostSet();
|
HostSet res = new HostSet();
|
||||||
@ -99,31 +105,37 @@ static InetSocketAddress parseEntry(String type, String fn, String line) {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
static InetSocketAddress resolvedAddressFromDatanodeID(DatanodeID id) {
|
@Override
|
||||||
return new InetSocketAddress(id.getIpAddr(), id.getXferPort());
|
public synchronized HostSet getIncludes() {
|
||||||
}
|
|
||||||
|
|
||||||
synchronized HostSet getIncludes() {
|
|
||||||
return includes;
|
return includes;
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized HostSet getExcludes() {
|
@Override
|
||||||
|
public synchronized HostSet getExcludes() {
|
||||||
return excludes;
|
return excludes;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the includes list is empty, act as if everything is in the
|
// If the includes list is empty, act as if everything is in the
|
||||||
// includes list.
|
// includes list.
|
||||||
synchronized boolean isIncluded(DatanodeID dn) {
|
@Override
|
||||||
return includes.isEmpty() || includes.match
|
public synchronized boolean isIncluded(DatanodeID dn) {
|
||||||
(resolvedAddressFromDatanodeID(dn));
|
return includes.isEmpty() || includes.match(dn.getResolvedAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized boolean isExcluded(DatanodeID dn) {
|
@Override
|
||||||
return excludes.match(resolvedAddressFromDatanodeID(dn));
|
public synchronized boolean isExcluded(DatanodeID dn) {
|
||||||
|
return isExcluded(dn.getResolvedAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized boolean hasIncludes() {
|
private boolean isExcluded(InetSocketAddress address) {
|
||||||
return !includes.isEmpty();
|
return excludes.match(address);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized String getUpgradeDomain(final DatanodeID dn) {
|
||||||
|
// The include/exclude files based config doesn't support upgrade domain
|
||||||
|
// config.
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -133,7 +145,8 @@ synchronized boolean hasIncludes() {
|
|||||||
* @param excludeFile the path to the new excludes list
|
* @param excludeFile the path to the new excludes list
|
||||||
* @throws IOException thrown if there is a problem reading one of the files
|
* @throws IOException thrown if there is a problem reading one of the files
|
||||||
*/
|
*/
|
||||||
void refresh(String includeFile, String excludeFile) throws IOException {
|
private void refresh(String includeFile, String excludeFile)
|
||||||
|
throws IOException {
|
||||||
HostSet newIncludes = readFile("included", includeFile);
|
HostSet newIncludes = readFile("included", includeFile);
|
||||||
HostSet newExcludes = readFile("excluded", excludeFile);
|
HostSet newExcludes = readFile("excluded", excludeFile);
|
||||||
|
|
||||||
@ -153,84 +166,4 @@ void refresh(HostSet newIncludes, HostSet newExcludes) {
|
|||||||
excludes = newExcludes;
|
excludes = newExcludes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The HostSet allows efficient queries on matching wildcard addresses.
|
|
||||||
* <p/>
|
|
||||||
* For InetSocketAddress A and B with the same host address,
|
|
||||||
* we define a partial order between A and B, A <= B iff A.getPort() == B
|
|
||||||
* .getPort() || B.getPort() == 0.
|
|
||||||
*/
|
|
||||||
static class HostSet implements Iterable<InetSocketAddress> {
|
|
||||||
// Host -> lists of ports
|
|
||||||
private final Multimap<InetAddress, Integer> addrs = HashMultimap.create();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The function that checks whether there exists an entry foo in the set
|
|
||||||
* so that foo <= addr.
|
|
||||||
*/
|
|
||||||
boolean matchedBy(InetSocketAddress addr) {
|
|
||||||
Collection<Integer> ports = addrs.get(addr.getAddress());
|
|
||||||
return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr
|
|
||||||
.getPort());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The function that checks whether there exists an entry foo in the set
|
|
||||||
* so that addr <= foo.
|
|
||||||
*/
|
|
||||||
boolean match(InetSocketAddress addr) {
|
|
||||||
int port = addr.getPort();
|
|
||||||
Collection<Integer> ports = addrs.get(addr.getAddress());
|
|
||||||
boolean exactMatch = ports.contains(port);
|
|
||||||
boolean genericMatch = ports.contains(0);
|
|
||||||
return exactMatch || genericMatch;
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean isEmpty() {
|
|
||||||
return addrs.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
int size() {
|
|
||||||
return addrs.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
void add(InetSocketAddress addr) {
|
|
||||||
Preconditions.checkArgument(!addr.isUnresolved());
|
|
||||||
addrs.put(addr.getAddress(), addr.getPort());
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Iterator<InetSocketAddress> iterator() {
|
|
||||||
return new UnmodifiableIterator<InetSocketAddress>() {
|
|
||||||
private final Iterator<Map.Entry<InetAddress,
|
|
||||||
Integer>> it = addrs.entries().iterator();
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean hasNext() {
|
|
||||||
return it.hasNext();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public InetSocketAddress next() {
|
|
||||||
Map.Entry<InetAddress, Integer> e = it.next();
|
|
||||||
return new InetSocketAddress(e.getKey(), e.getValue());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public String toString() {
|
|
||||||
StringBuilder sb = new StringBuilder("HostSet(");
|
|
||||||
Joiner.on(",").appendTo(sb, Iterators.transform(iterator(),
|
|
||||||
new Function<InetSocketAddress, String>() {
|
|
||||||
@Override
|
|
||||||
public String apply(@Nullable InetSocketAddress addr) {
|
|
||||||
assert addr != null;
|
|
||||||
return addr.getAddress().getHostAddress() + ":" + addr.getPort();
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
return sb.append(")").toString();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,114 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
|
import com.google.common.base.Function;
|
||||||
|
import com.google.common.base.Joiner;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.HashMultimap;
|
||||||
|
import com.google.common.collect.Iterators;
|
||||||
|
import com.google.common.collect.Multimap;
|
||||||
|
import com.google.common.collect.UnmodifiableIterator;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The HostSet allows efficient queries on matching wildcard addresses.
|
||||||
|
* <p/>
|
||||||
|
* For InetSocketAddress A and B with the same host address,
|
||||||
|
* we define a partial order between A and B, A <= B iff A.getPort() == B
|
||||||
|
* .getPort() || B.getPort() == 0.
|
||||||
|
*/
|
||||||
|
public class HostSet implements Iterable<InetSocketAddress> {
|
||||||
|
// Host -> lists of ports
|
||||||
|
private final Multimap<InetAddress, Integer> addrs = HashMultimap.create();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function that checks whether there exists an entry foo in the set
|
||||||
|
* so that foo <= addr.
|
||||||
|
*/
|
||||||
|
boolean matchedBy(InetSocketAddress addr) {
|
||||||
|
Collection<Integer> ports = addrs.get(addr.getAddress());
|
||||||
|
return addr.getPort() == 0 ? !ports.isEmpty() : ports.contains(addr
|
||||||
|
.getPort());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The function that checks whether there exists an entry foo in the set
|
||||||
|
* so that addr <= foo.
|
||||||
|
*/
|
||||||
|
boolean match(InetSocketAddress addr) {
|
||||||
|
int port = addr.getPort();
|
||||||
|
Collection<Integer> ports = addrs.get(addr.getAddress());
|
||||||
|
boolean exactMatch = ports.contains(port);
|
||||||
|
boolean genericMatch = ports.contains(0);
|
||||||
|
return exactMatch || genericMatch;
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean isEmpty() {
|
||||||
|
return addrs.isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
|
int size() {
|
||||||
|
return addrs.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
void add(InetSocketAddress addr) {
|
||||||
|
Preconditions.checkArgument(!addr.isUnresolved());
|
||||||
|
addrs.put(addr.getAddress(), addr.getPort());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Iterator<InetSocketAddress> iterator() {
|
||||||
|
return new UnmodifiableIterator<InetSocketAddress>() {
|
||||||
|
private final Iterator<Map.Entry<InetAddress,
|
||||||
|
Integer>> it = addrs.entries().iterator();
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasNext() {
|
||||||
|
return it.hasNext();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InetSocketAddress next() {
|
||||||
|
Map.Entry<InetAddress, Integer> e = it.next();
|
||||||
|
return new InetSocketAddress(e.getKey(), e.getValue());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder sb = new StringBuilder("HostSet(");
|
||||||
|
Joiner.on(",").appendTo(sb, Iterators.transform(iterator(),
|
||||||
|
new Function<InetSocketAddress, String>() {
|
||||||
|
@Override
|
||||||
|
public String apply(@Nullable InetSocketAddress addr) {
|
||||||
|
assert addr != null;
|
||||||
|
return addr.getAddress().getHostAddress() + ":" + addr.getPort();
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
return sb.append(")").toString();
|
||||||
|
}
|
||||||
|
}
|
@ -19,16 +19,30 @@
|
|||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||||
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
||||||
|
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||||
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.MD5Hash;
|
import org.apache.hadoop.io.MD5Hash;
|
||||||
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
import java.io.DataOutputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
@ -41,13 +55,87 @@ final class BlockChecksumHelper {
|
|||||||
|
|
||||||
static final Logger LOG = LoggerFactory.getLogger(BlockChecksumHelper.class);
|
static final Logger LOG = LoggerFactory.getLogger(BlockChecksumHelper.class);
|
||||||
|
|
||||||
private BlockChecksumHelper() {}
|
private BlockChecksumHelper() {
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The abstract base block checksum computer.
|
* The abstract base block checksum computer.
|
||||||
*/
|
*/
|
||||||
static abstract class BlockChecksumComputer {
|
static abstract class AbstractBlockChecksumComputer {
|
||||||
private final DataNode datanode;
|
private final DataNode datanode;
|
||||||
|
|
||||||
|
private byte[] outBytes;
|
||||||
|
private int bytesPerCRC = -1;
|
||||||
|
private DataChecksum.Type crcType = null;
|
||||||
|
private long crcPerBlock = -1;
|
||||||
|
private int checksumSize = -1;
|
||||||
|
|
||||||
|
AbstractBlockChecksumComputer(DataNode datanode) throws IOException {
|
||||||
|
this.datanode = datanode;
|
||||||
|
}
|
||||||
|
|
||||||
|
abstract void compute() throws IOException;
|
||||||
|
|
||||||
|
Sender createSender(IOStreamPair pair) {
|
||||||
|
DataOutputStream out = (DataOutputStream) pair.out;
|
||||||
|
return new Sender(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
DataNode getDatanode() {
|
||||||
|
return datanode;
|
||||||
|
}
|
||||||
|
|
||||||
|
InputStream getBlockInputStream(ExtendedBlock block, long seekOffset)
|
||||||
|
throws IOException {
|
||||||
|
return datanode.data.getBlockInputStream(block, seekOffset);
|
||||||
|
}
|
||||||
|
|
||||||
|
void setOutBytes(byte[] bytes) {
|
||||||
|
this.outBytes = bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
byte[] getOutBytes() {
|
||||||
|
return outBytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
int getBytesPerCRC() {
|
||||||
|
return bytesPerCRC;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setBytesPerCRC(int bytesPerCRC) {
|
||||||
|
this.bytesPerCRC = bytesPerCRC;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCrcType(DataChecksum.Type crcType) {
|
||||||
|
this.crcType = crcType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setCrcPerBlock(long crcPerBlock) {
|
||||||
|
this.crcPerBlock = crcPerBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setChecksumSize(int checksumSize) {
|
||||||
|
this.checksumSize = checksumSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
DataChecksum.Type getCrcType() {
|
||||||
|
return crcType;
|
||||||
|
}
|
||||||
|
|
||||||
|
long getCrcPerBlock() {
|
||||||
|
return crcPerBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
int getChecksumSize() {
|
||||||
|
return checksumSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The abstract base block checksum computer.
|
||||||
|
*/
|
||||||
|
static abstract class BlockChecksumComputer
|
||||||
|
extends AbstractBlockChecksumComputer {
|
||||||
private final ExtendedBlock block;
|
private final ExtendedBlock block;
|
||||||
// client side now can specify a range of the block for checksum
|
// client side now can specify a range of the block for checksum
|
||||||
private final long requestLength;
|
private final long requestLength;
|
||||||
@ -56,17 +144,12 @@ static abstract class BlockChecksumComputer {
|
|||||||
private final long visibleLength;
|
private final long visibleLength;
|
||||||
private final boolean partialBlk;
|
private final boolean partialBlk;
|
||||||
|
|
||||||
private byte[] outBytes;
|
|
||||||
private int bytesPerCRC = -1;
|
|
||||||
private DataChecksum.Type crcType = null;
|
|
||||||
private long crcPerBlock = -1;
|
|
||||||
private int checksumSize = -1;
|
|
||||||
private BlockMetadataHeader header;
|
private BlockMetadataHeader header;
|
||||||
private DataChecksum checksum;
|
private DataChecksum checksum;
|
||||||
|
|
||||||
BlockChecksumComputer(DataNode datanode,
|
BlockChecksumComputer(DataNode datanode,
|
||||||
ExtendedBlock block) throws IOException {
|
ExtendedBlock block) throws IOException {
|
||||||
this.datanode = datanode;
|
super(datanode);
|
||||||
this.block = block;
|
this.block = block;
|
||||||
this.requestLength = block.getNumBytes();
|
this.requestLength = block.getNumBytes();
|
||||||
Preconditions.checkArgument(requestLength >= 0);
|
Preconditions.checkArgument(requestLength >= 0);
|
||||||
@ -81,98 +164,80 @@ static abstract class BlockChecksumComputer {
|
|||||||
new BufferedInputStream(metadataIn, ioFileBufferSize));
|
new BufferedInputStream(metadataIn, ioFileBufferSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected DataNode getDatanode() {
|
Sender createSender(IOStreamPair pair) {
|
||||||
return datanode;
|
DataOutputStream out = (DataOutputStream) pair.out;
|
||||||
|
return new Sender(out);
|
||||||
}
|
}
|
||||||
|
|
||||||
protected ExtendedBlock getBlock() {
|
|
||||||
|
ExtendedBlock getBlock() {
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected long getRequestLength() {
|
long getRequestLength() {
|
||||||
return requestLength;
|
return requestLength;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected LengthInputStream getMetadataIn() {
|
LengthInputStream getMetadataIn() {
|
||||||
return metadataIn;
|
return metadataIn;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected DataInputStream getChecksumIn() {
|
DataInputStream getChecksumIn() {
|
||||||
return checksumIn;
|
return checksumIn;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected long getVisibleLength() {
|
long getVisibleLength() {
|
||||||
return visibleLength;
|
return visibleLength;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected boolean isPartialBlk() {
|
boolean isPartialBlk() {
|
||||||
return partialBlk;
|
return partialBlk;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void setOutBytes(byte[] bytes) {
|
BlockMetadataHeader getHeader() {
|
||||||
this.outBytes = bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected byte[] getOutBytes() {
|
|
||||||
return outBytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected int getBytesPerCRC() {
|
|
||||||
return bytesPerCRC;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected DataChecksum.Type getCrcType() {
|
|
||||||
return crcType;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected long getCrcPerBlock() {
|
|
||||||
return crcPerBlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected int getChecksumSize() {
|
|
||||||
return checksumSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected BlockMetadataHeader getHeader() {
|
|
||||||
return header;
|
return header;
|
||||||
}
|
}
|
||||||
|
|
||||||
protected DataChecksum getChecksum() {
|
DataChecksum getChecksum() {
|
||||||
return checksum;
|
return checksum;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Perform the block checksum computing.
|
* Perform the block checksum computing.
|
||||||
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
abstract void compute() throws IOException;
|
abstract void compute() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Read block metadata header.
|
* Read block metadata header.
|
||||||
|
*
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
protected void readHeader() throws IOException {
|
void readHeader() throws IOException {
|
||||||
//read metadata file
|
//read metadata file
|
||||||
header = BlockMetadataHeader.readHeader(checksumIn);
|
header = BlockMetadataHeader.readHeader(checksumIn);
|
||||||
checksum = header.getChecksum();
|
checksum = header.getChecksum();
|
||||||
checksumSize = checksum.getChecksumSize();
|
setChecksumSize(checksum.getChecksumSize());
|
||||||
bytesPerCRC = checksum.getBytesPerChecksum();
|
setBytesPerCRC(checksum.getBytesPerChecksum());
|
||||||
crcPerBlock = checksumSize <= 0 ? 0 :
|
long crcPerBlock = checksum.getChecksumSize() <= 0 ? 0 :
|
||||||
(metadataIn.getLength() -
|
(metadataIn.getLength() -
|
||||||
BlockMetadataHeader.getHeaderSize()) / checksumSize;
|
BlockMetadataHeader.getHeaderSize()) / checksum.getChecksumSize();
|
||||||
crcType = checksum.getChecksumType();
|
setCrcPerBlock(crcPerBlock);
|
||||||
|
setCrcType(checksum.getChecksumType());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Calculate partial block checksum.
|
* Calculate partial block checksum.
|
||||||
|
*
|
||||||
* @return
|
* @return
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
protected byte[] crcPartialBlock() throws IOException {
|
byte[] crcPartialBlock() throws IOException {
|
||||||
int partialLength = (int) (requestLength % bytesPerCRC);
|
int partialLength = (int) (requestLength % getBytesPerCRC());
|
||||||
if (partialLength > 0) {
|
if (partialLength > 0) {
|
||||||
byte[] buf = new byte[partialLength];
|
byte[] buf = new byte[partialLength];
|
||||||
final InputStream blockIn = datanode.data.getBlockInputStream(block,
|
final InputStream blockIn = getBlockInputStream(block,
|
||||||
requestLength - partialLength);
|
requestLength - partialLength);
|
||||||
try {
|
try {
|
||||||
// Get the CRC of the partialLength.
|
// Get the CRC of the partialLength.
|
||||||
@ -181,7 +246,7 @@ protected byte[] crcPartialBlock() throws IOException {
|
|||||||
IOUtils.closeStream(blockIn);
|
IOUtils.closeStream(blockIn);
|
||||||
}
|
}
|
||||||
checksum.update(buf, 0, partialLength);
|
checksum.update(buf, 0, partialLength);
|
||||||
byte[] partialCrc = new byte[checksumSize];
|
byte[] partialCrc = new byte[getChecksumSize()];
|
||||||
checksum.writeValue(partialCrc, 0, true);
|
checksum.writeValue(partialCrc, 0, true);
|
||||||
return partialCrc;
|
return partialCrc;
|
||||||
}
|
}
|
||||||
@ -229,7 +294,7 @@ private MD5Hash checksumWholeBlock() throws IOException {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private MD5Hash checksumPartialBlock() throws IOException {
|
private MD5Hash checksumPartialBlock() throws IOException {
|
||||||
byte[] buffer = new byte[4*1024];
|
byte[] buffer = new byte[4 * 1024];
|
||||||
MessageDigest digester = MD5Hash.getDigester();
|
MessageDigest digester = MD5Hash.getDigester();
|
||||||
|
|
||||||
long remaining = (getRequestLength() / getBytesPerCRC())
|
long remaining = (getRequestLength() / getBytesPerCRC())
|
||||||
@ -251,4 +316,115 @@ private MD5Hash checksumPartialBlock() throws IOException {
|
|||||||
return new MD5Hash(digester.digest());
|
return new MD5Hash(digester.digest());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
/**
|
||||||
|
* Non-striped block group checksum computer for striped blocks.
|
||||||
|
*/
|
||||||
|
static class BlockGroupNonStripedChecksumComputer
|
||||||
|
extends AbstractBlockChecksumComputer {
|
||||||
|
|
||||||
|
private final ExtendedBlock blockGroup;
|
||||||
|
private final ErasureCodingPolicy ecPolicy;
|
||||||
|
private final DatanodeInfo[] datanodes;
|
||||||
|
private final Token<BlockTokenIdentifier>[] blockTokens;
|
||||||
|
|
||||||
|
private final DataOutputBuffer md5writer = new DataOutputBuffer();
|
||||||
|
|
||||||
|
BlockGroupNonStripedChecksumComputer(DataNode datanode,
|
||||||
|
StripedBlockInfo stripedBlockInfo)
|
||||||
|
throws IOException {
|
||||||
|
super(datanode);
|
||||||
|
this.blockGroup = stripedBlockInfo.getBlock();
|
||||||
|
this.ecPolicy = stripedBlockInfo.getErasureCodingPolicy();
|
||||||
|
this.datanodes = stripedBlockInfo.getDatanodes();
|
||||||
|
this.blockTokens = stripedBlockInfo.getBlockTokens();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
void compute() throws IOException {
|
||||||
|
for (int idx = 0; idx < ecPolicy.getNumDataUnits(); idx++) {
|
||||||
|
ExtendedBlock block =
|
||||||
|
StripedBlockUtil.constructInternalBlock(blockGroup,
|
||||||
|
ecPolicy.getCellSize(), ecPolicy.getNumDataUnits(), idx);
|
||||||
|
DatanodeInfo targetDatanode = datanodes[idx];
|
||||||
|
Token<BlockTokenIdentifier> blockToken = blockTokens[idx];
|
||||||
|
checksumBlock(block, idx, blockToken, targetDatanode);
|
||||||
|
}
|
||||||
|
|
||||||
|
MD5Hash md5out = MD5Hash.digest(md5writer.getData());
|
||||||
|
setOutBytes(md5out.getDigest());
|
||||||
|
}
|
||||||
|
|
||||||
|
private void checksumBlock(ExtendedBlock block, int blockIdx,
|
||||||
|
Token<BlockTokenIdentifier> blockToken,
|
||||||
|
DatanodeInfo targetDatanode) throws IOException {
|
||||||
|
int timeout = 3000;
|
||||||
|
try (IOStreamPair pair = getDatanode().connectToDN(targetDatanode,
|
||||||
|
timeout, block, blockToken)) {
|
||||||
|
|
||||||
|
LOG.debug("write to {}: {}, block={}",
|
||||||
|
getDatanode(), Op.BLOCK_CHECKSUM, block);
|
||||||
|
|
||||||
|
// get block MD5
|
||||||
|
createSender(pair).blockChecksum(block, blockToken);
|
||||||
|
|
||||||
|
final DataTransferProtos.BlockOpResponseProto reply =
|
||||||
|
DataTransferProtos.BlockOpResponseProto.parseFrom(
|
||||||
|
PBHelperClient.vintPrefixed(pair.in));
|
||||||
|
|
||||||
|
String logInfo = "for block " + block
|
||||||
|
+ " from datanode " + targetDatanode;
|
||||||
|
DataTransferProtoUtil.checkBlockOpStatus(reply, logInfo);
|
||||||
|
|
||||||
|
DataTransferProtos.OpBlockChecksumResponseProto checksumData =
|
||||||
|
reply.getChecksumResponse();
|
||||||
|
|
||||||
|
//read byte-per-checksum
|
||||||
|
final int bpc = checksumData.getBytesPerCrc();
|
||||||
|
if (blockIdx == 0) { //first block
|
||||||
|
setBytesPerCRC(bpc);
|
||||||
|
} else if (bpc != getBytesPerCRC()) {
|
||||||
|
throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
|
||||||
|
+ " but bytesPerCRC=" + getBytesPerCRC());
|
||||||
|
}
|
||||||
|
|
||||||
|
//read crc-per-block
|
||||||
|
final long cpb = checksumData.getCrcPerBlock();
|
||||||
|
if (blockIdx == 0) {
|
||||||
|
setCrcPerBlock(cpb);
|
||||||
|
}
|
||||||
|
|
||||||
|
//read md5
|
||||||
|
final MD5Hash md5 = new MD5Hash(
|
||||||
|
checksumData.getMd5().toByteArray());
|
||||||
|
md5.write(md5writer);
|
||||||
|
|
||||||
|
// read crc-type
|
||||||
|
final DataChecksum.Type ct;
|
||||||
|
if (checksumData.hasCrcType()) {
|
||||||
|
ct = PBHelperClient.convert(checksumData.getCrcType());
|
||||||
|
} else {
|
||||||
|
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
|
||||||
|
"inferring checksum by reading first byte");
|
||||||
|
ct = DataChecksum.Type.DEFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (blockIdx == 0) { // first block
|
||||||
|
setCrcType(ct);
|
||||||
|
} else if (getCrcType() != DataChecksum.Type.MIXED &&
|
||||||
|
getCrcType() != ct) {
|
||||||
|
// if crc types are mixed in a file
|
||||||
|
setCrcType(DataChecksum.Type.MIXED);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
if (blockIdx == 0) {
|
||||||
|
LOG.debug("set bytesPerCRC=" + getBytesPerCRC()
|
||||||
|
+ ", crcPerBlock=" + getCrcPerBlock());
|
||||||
|
}
|
||||||
|
LOG.debug("got reply from " + targetDatanode + ": md5=" + md5);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -1157,7 +1157,7 @@ public String toString() {
|
|||||||
|
|
||||||
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
|
final StringBuilder b = new StringBuilder(getClass().getSimpleName())
|
||||||
.append(": ").append(block).append(", type=").append(type);
|
.append(": ").append(block).append(", type=").append(type);
|
||||||
if (type != PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
|
if (type == PacketResponderType.HAS_DOWNSTREAM_IN_PIPELINE) {
|
||||||
b.append(", downstreams=").append(downstreams.length)
|
b.append(", downstreams=").append(downstreams.length)
|
||||||
.append(":").append(Arrays.asList(downstreams));
|
.append(":").append(Arrays.asList(downstreams));
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.StripedBlockInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
@ -46,7 +47,9 @@
|
|||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.BlockChecksumComputer;
|
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.BlockChecksumComputer;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.AbstractBlockChecksumComputer;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.ReplicatedBlockChecksumComputer;
|
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.ReplicatedBlockChecksumComputer;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.BlockChecksumHelper.BlockGroupNonStripedChecksumComputer;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
|
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
|
||||||
@ -923,6 +926,46 @@ public void blockChecksum(ExtendedBlock block,
|
|||||||
datanode.metrics.addBlockChecksumOp(elapsed());
|
datanode.metrics.addBlockChecksumOp(elapsed());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void blockGroupChecksum(final StripedBlockInfo stripedBlockInfo,
|
||||||
|
final Token<BlockTokenIdentifier> blockToken)
|
||||||
|
throws IOException {
|
||||||
|
updateCurrentThreadName("Getting checksum for block group" +
|
||||||
|
stripedBlockInfo.getBlock());
|
||||||
|
final DataOutputStream out = new DataOutputStream(getOutputStream());
|
||||||
|
checkAccess(out, true, stripedBlockInfo.getBlock(), blockToken,
|
||||||
|
Op.BLOCK_GROUP_CHECKSUM, BlockTokenIdentifier.AccessMode.READ);
|
||||||
|
|
||||||
|
AbstractBlockChecksumComputer maker =
|
||||||
|
new BlockGroupNonStripedChecksumComputer(datanode, stripedBlockInfo);
|
||||||
|
|
||||||
|
try {
|
||||||
|
maker.compute();
|
||||||
|
|
||||||
|
//write reply
|
||||||
|
BlockOpResponseProto.newBuilder()
|
||||||
|
.setStatus(SUCCESS)
|
||||||
|
.setChecksumResponse(OpBlockChecksumResponseProto.newBuilder()
|
||||||
|
.setBytesPerCrc(maker.getBytesPerCRC())
|
||||||
|
.setCrcPerBlock(maker.getCrcPerBlock())
|
||||||
|
.setMd5(ByteString.copyFrom(maker.getOutBytes()))
|
||||||
|
.setCrcType(PBHelperClient.convert(maker.getCrcType())))
|
||||||
|
.build()
|
||||||
|
.writeDelimitedTo(out);
|
||||||
|
out.flush();
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
LOG.info("blockChecksum " + stripedBlockInfo.getBlock() +
|
||||||
|
" received exception " + ioe);
|
||||||
|
incrDatanodeNetworkErrors();
|
||||||
|
throw ioe;
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
//update metrics
|
||||||
|
datanode.metrics.addBlockChecksumOp(elapsed());
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void copyBlock(final ExtendedBlock block,
|
public void copyBlock(final ExtendedBlock block,
|
||||||
final Token<BlockTokenIdentifier> blockToken) throws IOException {
|
final Token<BlockTokenIdentifier> blockToken) throws IOException {
|
||||||
|
@ -849,12 +849,13 @@ private BlockReader newBlockReader(final ExtendedBlock block,
|
|||||||
* read directly from DN and need to check the replica is FINALIZED
|
* read directly from DN and need to check the replica is FINALIZED
|
||||||
* state, notice we should not use short-circuit local read which
|
* state, notice we should not use short-circuit local read which
|
||||||
* requires config for domain-socket in UNIX or legacy config in Windows.
|
* requires config for domain-socket in UNIX or legacy config in Windows.
|
||||||
|
* The network distance value isn't used for this scenario.
|
||||||
*/
|
*/
|
||||||
return RemoteBlockReader2.newBlockReader(
|
return RemoteBlockReader2.newBlockReader(
|
||||||
"dummy", block, blockToken, offsetInBlock,
|
"dummy", block, blockToken, offsetInBlock,
|
||||||
block.getNumBytes() - offsetInBlock, true,
|
block.getNumBytes() - offsetInBlock, true,
|
||||||
"", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
|
"", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
|
||||||
null, cachingStrategy, datanode.getTracer());
|
null, cachingStrategy, datanode.getTracer(), -1);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.debug("Exception while creating remote block reader, datanode {}",
|
LOG.debug("Exception while creating remote block reader, datanode {}",
|
||||||
dnInfo, e);
|
dnInfo, e);
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.NavigableMap;
|
import java.util.NavigableMap;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
|
||||||
@ -380,4 +381,18 @@ BatchedListEntries<EncryptionZone> listEncryptionZones(long prevId)
|
|||||||
public int getNumEncryptionZones() {
|
public int getNumEncryptionZones() {
|
||||||
return encryptionZones.size();
|
return encryptionZones.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return a list of all key names.
|
||||||
|
*/
|
||||||
|
String[] getKeyNames() {
|
||||||
|
assert dir.hasReadLock();
|
||||||
|
String[] ret = new String[encryptionZones.size()];
|
||||||
|
int index = 0;
|
||||||
|
for (Map.Entry<Long, EncryptionZoneInt> entry : encryptionZones
|
||||||
|
.entrySet()) {
|
||||||
|
ret[index] = entry.getValue().getKeyName();
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,6 +23,7 @@
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.security.GeneralSecurityException;
|
import java.security.GeneralSecurityException;
|
||||||
import java.util.AbstractMap;
|
import java.util.AbstractMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -304,4 +305,86 @@ static boolean isInAnEZ(final FSDirectory fsd, final INodesInPath iip)
|
|||||||
fsd.readUnlock();
|
fsd.readUnlock();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Proactively warm up the edek cache. We'll get all the edek key names,
|
||||||
|
* then launch up a separate thread to warm them up.
|
||||||
|
*/
|
||||||
|
static void warmUpEdekCache(final ExecutorService executor,
|
||||||
|
final FSDirectory fsd, final int delay, final int interval) {
|
||||||
|
fsd.readLock();
|
||||||
|
try {
|
||||||
|
String[] edeks = fsd.ezManager.getKeyNames();
|
||||||
|
executor.execute(
|
||||||
|
new EDEKCacheLoader(edeks, fsd.getProvider(), delay, interval));
|
||||||
|
} finally {
|
||||||
|
fsd.readUnlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* EDEKCacheLoader is being run in a separate thread to loop through all the
|
||||||
|
* EDEKs and warm them up in the KMS cache.
|
||||||
|
*/
|
||||||
|
static class EDEKCacheLoader implements Runnable {
|
||||||
|
private final String[] keyNames;
|
||||||
|
private final KeyProviderCryptoExtension kp;
|
||||||
|
private int initialDelay;
|
||||||
|
private int retryInterval;
|
||||||
|
|
||||||
|
EDEKCacheLoader(final String[] names, final KeyProviderCryptoExtension kp,
|
||||||
|
final int delay, final int interval) {
|
||||||
|
this.keyNames = names;
|
||||||
|
this.kp = kp;
|
||||||
|
this.initialDelay = delay;
|
||||||
|
this.retryInterval = interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
NameNode.LOG.info("Warming up {} EDEKs... (initialDelay={}, "
|
||||||
|
+ "retryInterval={})", keyNames.length, initialDelay, retryInterval);
|
||||||
|
try {
|
||||||
|
Thread.sleep(initialDelay);
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
NameNode.LOG.info("EDEKCacheLoader interrupted before warming up.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
final int logCoolDown = 10000; // periodically print error log (if any)
|
||||||
|
int sinceLastLog = logCoolDown; // always print the first failure
|
||||||
|
boolean success = false;
|
||||||
|
IOException lastSeenIOE = null;
|
||||||
|
while (true) {
|
||||||
|
try {
|
||||||
|
kp.warmUpEncryptedKeys(keyNames);
|
||||||
|
NameNode.LOG
|
||||||
|
.info("Successfully warmed up {} EDEKs.", keyNames.length);
|
||||||
|
success = true;
|
||||||
|
break;
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
lastSeenIOE = ioe;
|
||||||
|
if (sinceLastLog >= logCoolDown) {
|
||||||
|
NameNode.LOG.info("Failed to warm up EDEKs.", ioe);
|
||||||
|
sinceLastLog = 0;
|
||||||
|
} else {
|
||||||
|
NameNode.LOG.debug("Failed to warm up EDEKs.", ioe);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
Thread.sleep(retryInterval);
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
NameNode.LOG.info("EDEKCacheLoader interrupted during retry.");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
sinceLastLog += retryInterval;
|
||||||
|
}
|
||||||
|
if (!success) {
|
||||||
|
NameNode.LOG.warn("Unable to warm up EDEKs.");
|
||||||
|
if (lastSeenIOE != null) {
|
||||||
|
NameNode.LOG.warn("Last seen exception:", lastSeenIOE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,6 +116,8 @@
|
|||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.locks.Condition;
|
import java.util.concurrent.locks.Condition;
|
||||||
import java.util.concurrent.locks.ReentrantLock;
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
@ -283,6 +285,7 @@
|
|||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* FSNamesystem is a container of both transient
|
* FSNamesystem is a container of both transient
|
||||||
@ -425,6 +428,12 @@ private void logAuditEvent(boolean succeeded,
|
|||||||
// A daemon to periodically clean up corrupt lazyPersist files
|
// A daemon to periodically clean up corrupt lazyPersist files
|
||||||
// from the name space.
|
// from the name space.
|
||||||
Daemon lazyPersistFileScrubber = null;
|
Daemon lazyPersistFileScrubber = null;
|
||||||
|
|
||||||
|
// Executor to warm up EDEK cache
|
||||||
|
private ExecutorService edekCacheLoader = null;
|
||||||
|
private final int edekCacheLoaderDelay;
|
||||||
|
private final int edekCacheLoaderInterval;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* When an active namenode will roll its own edit log, in # edits
|
* When an active namenode will roll its own edit log, in # edits
|
||||||
*/
|
*/
|
||||||
@ -787,6 +796,13 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
|
|||||||
+ " must be zero (for disable) or greater than zero.");
|
+ " must be zero (for disable) or greater than zero.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.edekCacheLoaderDelay = conf.getInt(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_DEFAULT);
|
||||||
|
this.edekCacheLoaderInterval = conf.getInt(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_KEY,
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INTERVAL_MS_DEFAULT);
|
||||||
|
|
||||||
// For testing purposes, allow the DT secret manager to be started regardless
|
// For testing purposes, allow the DT secret manager to be started regardless
|
||||||
// of whether security is enabled.
|
// of whether security is enabled.
|
||||||
alwaysUseDelegationTokensForTests = conf.getBoolean(
|
alwaysUseDelegationTokensForTests = conf.getBoolean(
|
||||||
@ -1128,6 +1144,14 @@ void startActiveServices() throws IOException {
|
|||||||
|
|
||||||
cacheManager.startMonitorThread();
|
cacheManager.startMonitorThread();
|
||||||
blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
|
blockManager.getDatanodeManager().setShouldSendCachingCommands(true);
|
||||||
|
if (provider != null) {
|
||||||
|
edekCacheLoader = Executors.newSingleThreadExecutor(
|
||||||
|
new ThreadFactoryBuilder().setDaemon(true)
|
||||||
|
.setNameFormat("Warm Up EDEK Cache Thread #%d")
|
||||||
|
.build());
|
||||||
|
FSDirEncryptionZoneOp.warmUpEdekCache(edekCacheLoader, dir,
|
||||||
|
edekCacheLoaderDelay, edekCacheLoaderInterval);
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
startingActiveService = false;
|
startingActiveService = false;
|
||||||
writeUnlock();
|
writeUnlock();
|
||||||
@ -1162,6 +1186,9 @@ void stopActiveServices() {
|
|||||||
((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
|
((NameNodeResourceMonitor) nnrmthread.getRunnable()).stopMonitor();
|
||||||
nnrmthread.interrupt();
|
nnrmthread.interrupt();
|
||||||
}
|
}
|
||||||
|
if (edekCacheLoader != null) {
|
||||||
|
edekCacheLoader.shutdownNow();
|
||||||
|
}
|
||||||
if (nnEditLogRoller != null) {
|
if (nnEditLogRoller != null) {
|
||||||
((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
|
((NameNodeEditLogRoller)nnEditLogRoller.getRunnable()).stop();
|
||||||
nnEditLogRoller.interrupt();
|
nnEditLogRoller.interrupt();
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_OFFSET;
|
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_OFFSET;
|
||||||
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_OFFSET;
|
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_OFFSET;
|
||||||
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_MASK;
|
import static org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode.XATTR_NAMESPACE_EXT_MASK;
|
||||||
|
import static org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter.*;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
@ -434,36 +435,38 @@ public void process() throws IOException {
|
|||||||
Node node = new Node();
|
Node node = new Node();
|
||||||
loadNodeChildren(node, "NameSection fields");
|
loadNodeChildren(node, "NameSection fields");
|
||||||
NameSystemSection.Builder b = NameSystemSection.newBuilder();
|
NameSystemSection.Builder b = NameSystemSection.newBuilder();
|
||||||
Integer namespaceId = node.removeChildInt("namespaceId");
|
Integer namespaceId = node.removeChildInt(NAME_SECTION_NAMESPACE_ID);
|
||||||
if (namespaceId == null) {
|
if (namespaceId == null) {
|
||||||
throw new IOException("<NameSection> is missing <namespaceId>");
|
throw new IOException("<NameSection> is missing <namespaceId>");
|
||||||
}
|
}
|
||||||
b.setNamespaceId(namespaceId);
|
b.setNamespaceId(namespaceId);
|
||||||
Long lval = node.removeChildLong("genstampV1");
|
Long lval = node.removeChildLong(NAME_SECTION_GENSTAMPV1);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setGenstampV1(lval);
|
b.setGenstampV1(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("genstampV2");
|
lval = node.removeChildLong(NAME_SECTION_GENSTAMPV2);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setGenstampV2(lval);
|
b.setGenstampV2(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("genstampV1Limit");
|
lval = node.removeChildLong(NAME_SECTION_GENSTAMPV1_LIMIT);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setGenstampV1Limit(lval);
|
b.setGenstampV1Limit(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("lastAllocatedBlockId");
|
lval = node.removeChildLong(NAME_SECTION_LAST_ALLOCATED_BLOCK_ID);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setLastAllocatedBlockId(lval);
|
b.setLastAllocatedBlockId(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("txid");
|
lval = node.removeChildLong(NAME_SECTION_TXID);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setTransactionId(lval);
|
b.setTransactionId(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("rollingUpgradeStartTime");
|
lval = node.removeChildLong(
|
||||||
|
NAME_SECTION_ROLLING_UPGRADE_START_TIME);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setRollingUpgradeStartTime(lval);
|
b.setRollingUpgradeStartTime(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("lastAllocatedStripedBlockId");
|
lval = node.removeChildLong(
|
||||||
|
NAME_SECTION_LAST_ALLOCATED_STRIPED_BLOCK_ID);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setLastAllocatedStripedBlockId(lval);
|
b.setLastAllocatedStripedBlockId(lval);
|
||||||
}
|
}
|
||||||
@ -486,11 +489,12 @@ public void process() throws IOException {
|
|||||||
Node headerNode = new Node();
|
Node headerNode = new Node();
|
||||||
loadNodeChildren(headerNode, "INodeSection fields", "inode");
|
loadNodeChildren(headerNode, "INodeSection fields", "inode");
|
||||||
INodeSection.Builder b = INodeSection.newBuilder();
|
INodeSection.Builder b = INodeSection.newBuilder();
|
||||||
Long lval = headerNode.removeChildLong("lastInodeId");
|
Long lval = headerNode.removeChildLong(INODE_SECTION_LAST_INODE_ID);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
b.setLastInodeId(lval);
|
b.setLastInodeId(lval);
|
||||||
}
|
}
|
||||||
Integer expectedNumINodes = headerNode.removeChildInt("numInodes");
|
Integer expectedNumINodes =
|
||||||
|
headerNode.removeChildInt(INODE_SECTION_NUM_INODES);
|
||||||
if (expectedNumINodes == null) {
|
if (expectedNumINodes == null) {
|
||||||
throw new IOException("Failed to find <numInodes> in INodeSection.");
|
throw new IOException("Failed to find <numInodes> in INodeSection.");
|
||||||
}
|
}
|
||||||
@ -501,7 +505,7 @@ public void process() throws IOException {
|
|||||||
int actualNumINodes = 0;
|
int actualNumINodes = 0;
|
||||||
while (actualNumINodes < expectedNumINodes) {
|
while (actualNumINodes < expectedNumINodes) {
|
||||||
try {
|
try {
|
||||||
expectTag("inode", false);
|
expectTag(INODE_SECTION_INODE, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only found " + actualNumINodes +
|
throw new IOException("Only found " + actualNumINodes +
|
||||||
" <inode> entries out of " + expectedNumINodes, e);
|
" <inode> entries out of " + expectedNumINodes, e);
|
||||||
@ -512,24 +516,24 @@ public void process() throws IOException {
|
|||||||
INodeSection.INode.Builder inodeBld = processINodeXml(inode);
|
INodeSection.INode.Builder inodeBld = processINodeXml(inode);
|
||||||
inodeBld.build().writeDelimitedTo(out);
|
inodeBld.build().writeDelimitedTo(out);
|
||||||
}
|
}
|
||||||
expectTagEnd("INodeSection");
|
expectTagEnd(INODE_SECTION_NAME);
|
||||||
recordSectionLength(SectionName.INODE.name());
|
recordSectionLength(SectionName.INODE.name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private INodeSection.INode.Builder processINodeXml(Node node)
|
private INodeSection.INode.Builder processINodeXml(Node node)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String type = node.removeChildStr("type");
|
String type = node.removeChildStr(INODE_SECTION_TYPE);
|
||||||
if (type == null) {
|
if (type == null) {
|
||||||
throw new IOException("INode XML found with no <type> tag.");
|
throw new IOException("INode XML found with no <type> tag.");
|
||||||
}
|
}
|
||||||
INodeSection.INode.Builder inodeBld = INodeSection.INode.newBuilder();
|
INodeSection.INode.Builder inodeBld = INodeSection.INode.newBuilder();
|
||||||
Long id = node.removeChildLong("id");
|
Long id = node.removeChildLong(SECTION_ID);
|
||||||
if (id == null) {
|
if (id == null) {
|
||||||
throw new IOException("<inode> found without <id>");
|
throw new IOException("<inode> found without <id>");
|
||||||
}
|
}
|
||||||
inodeBld.setId(id);
|
inodeBld.setId(id);
|
||||||
String name = node.removeChildStr("name");
|
String name = node.removeChildStr(SECTION_NAME);
|
||||||
if (name != null) {
|
if (name != null) {
|
||||||
inodeBld.setName(ByteString.copyFrom(name, "UTF8"));
|
inodeBld.setName(ByteString.copyFrom(name, "UTF8"));
|
||||||
}
|
}
|
||||||
@ -555,46 +559,46 @@ private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
inodeBld.setType(INodeSection.INode.Type.FILE);
|
inodeBld.setType(INodeSection.INode.Type.FILE);
|
||||||
INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
|
INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
|
||||||
Integer ival = node.removeChildInt("replication");
|
Integer ival = node.removeChildInt(SECTION_REPLICATION);
|
||||||
if (ival != null) {
|
if (ival != null) {
|
||||||
bld.setReplication(ival);
|
bld.setReplication(ival);
|
||||||
}
|
}
|
||||||
Long lval = node.removeChildLong("mtime");
|
Long lval = node.removeChildLong(INODE_SECTION_MTIME);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setModificationTime(lval);
|
bld.setModificationTime(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("atime");
|
lval = node.removeChildLong(INODE_SECTION_ATIME);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setAccessTime(lval);
|
bld.setAccessTime(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("preferredBlockSize");
|
lval = node.removeChildLong(INODE_SECTION_PREFERRED_BLOCK_SIZE);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setPreferredBlockSize(lval);
|
bld.setPreferredBlockSize(lval);
|
||||||
}
|
}
|
||||||
String perm = node.removeChildStr("permission");
|
String perm = node.removeChildStr(INODE_SECTION_PERMISSION);
|
||||||
if (perm != null) {
|
if (perm != null) {
|
||||||
bld.setPermission(permissionXmlToU64(perm));
|
bld.setPermission(permissionXmlToU64(perm));
|
||||||
}
|
}
|
||||||
Node blocks = node.removeChild("blocks");
|
Node blocks = node.removeChild(INODE_SECTION_BLOCKS);
|
||||||
if (blocks != null) {
|
if (blocks != null) {
|
||||||
while (true) {
|
while (true) {
|
||||||
Node block = blocks.removeChild("block");
|
Node block = blocks.removeChild(INODE_SECTION_BLOCK);
|
||||||
if (block == null) {
|
if (block == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
HdfsProtos.BlockProto.Builder blockBld =
|
HdfsProtos.BlockProto.Builder blockBld =
|
||||||
HdfsProtos.BlockProto.newBuilder();
|
HdfsProtos.BlockProto.newBuilder();
|
||||||
Long id = block.removeChildLong("id");
|
Long id = block.removeChildLong(SECTION_ID);
|
||||||
if (id == null) {
|
if (id == null) {
|
||||||
throw new IOException("<block> found without <id>");
|
throw new IOException("<block> found without <id>");
|
||||||
}
|
}
|
||||||
blockBld.setBlockId(id);
|
blockBld.setBlockId(id);
|
||||||
Long genstamp = block.removeChildLong("genstamp");
|
Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
|
||||||
if (genstamp == null) {
|
if (genstamp == null) {
|
||||||
throw new IOException("<block> found without <genstamp>");
|
throw new IOException("<block> found without <genstamp>");
|
||||||
}
|
}
|
||||||
blockBld.setGenStamp(genstamp);
|
blockBld.setGenStamp(genstamp);
|
||||||
Long numBytes = block.removeChildLong("numBytes");
|
Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
|
||||||
if (numBytes == null) {
|
if (numBytes == null) {
|
||||||
throw new IOException("<block> found without <numBytes>");
|
throw new IOException("<block> found without <numBytes>");
|
||||||
}
|
}
|
||||||
@ -602,19 +606,21 @@ private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
|
|||||||
bld.addBlocks(blockBld);
|
bld.addBlocks(blockBld);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Node fileUnderConstruction = node.removeChild("file-under-construction");
|
Node fileUnderConstruction =
|
||||||
|
node.removeChild(INODE_SECTION_FILE_UNDER_CONSTRUCTION);
|
||||||
if (fileUnderConstruction != null) {
|
if (fileUnderConstruction != null) {
|
||||||
INodeSection.FileUnderConstructionFeature.Builder fb =
|
INodeSection.FileUnderConstructionFeature.Builder fb =
|
||||||
INodeSection.FileUnderConstructionFeature.newBuilder();
|
INodeSection.FileUnderConstructionFeature.newBuilder();
|
||||||
String clientName =
|
String clientName =
|
||||||
fileUnderConstruction.removeChildStr("clientName");
|
fileUnderConstruction.removeChildStr(INODE_SECTION_CLIENT_NAME);
|
||||||
if (clientName == null) {
|
if (clientName == null) {
|
||||||
throw new IOException("<file-under-construction> found without " +
|
throw new IOException("<file-under-construction> found without " +
|
||||||
"<clientName>");
|
"<clientName>");
|
||||||
}
|
}
|
||||||
fb.setClientName(clientName);
|
fb.setClientName(clientName);
|
||||||
String clientMachine =
|
String clientMachine =
|
||||||
fileUnderConstruction.removeChildStr("clientMachine");
|
fileUnderConstruction
|
||||||
|
.removeChildStr(INODE_SECTION_CLIENT_MACHINE);
|
||||||
if (clientMachine == null) {
|
if (clientMachine == null) {
|
||||||
throw new IOException("<file-under-construction> found without " +
|
throw new IOException("<file-under-construction> found without " +
|
||||||
"<clientMachine>");
|
"<clientMachine>");
|
||||||
@ -622,19 +628,19 @@ private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
|
|||||||
fb.setClientMachine(clientMachine);
|
fb.setClientMachine(clientMachine);
|
||||||
bld.setFileUC(fb);
|
bld.setFileUC(fb);
|
||||||
}
|
}
|
||||||
Node acls = node.removeChild("acls");
|
Node acls = node.removeChild(INODE_SECTION_ACLS);
|
||||||
if (acls != null) {
|
if (acls != null) {
|
||||||
bld.setAcl(aclXmlToProto(acls));
|
bld.setAcl(aclXmlToProto(acls));
|
||||||
}
|
}
|
||||||
Node xattrs = node.removeChild("xattrs");
|
Node xattrs = node.removeChild(INODE_SECTION_XATTRS);
|
||||||
if (xattrs != null) {
|
if (xattrs != null) {
|
||||||
bld.setXAttrs(xattrsXmlToProto(xattrs));
|
bld.setXAttrs(xattrsXmlToProto(xattrs));
|
||||||
}
|
}
|
||||||
ival = node.removeChildInt("storagePolicyId");
|
ival = node.removeChildInt(INODE_SECTION_STORAGE_POLICY_ID);
|
||||||
if (ival != null) {
|
if (ival != null) {
|
||||||
bld.setStoragePolicyID(ival);
|
bld.setStoragePolicyID(ival);
|
||||||
}
|
}
|
||||||
Boolean bval = node.removeChildBool("isStriped");
|
Boolean bval = node.removeChildBool(INODE_SECTION_IS_STRIPED);
|
||||||
bld.setIsStriped(bval);
|
bld.setIsStriped(bval);
|
||||||
inodeBld.setFile(bld);
|
inodeBld.setFile(bld);
|
||||||
// Will check remaining keys and serialize in processINodeXml
|
// Will check remaining keys and serialize in processINodeXml
|
||||||
@ -645,40 +651,40 @@ private void processDirectoryXml(Node node,
|
|||||||
inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
|
inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
|
||||||
INodeSection.INodeDirectory.Builder bld =
|
INodeSection.INodeDirectory.Builder bld =
|
||||||
INodeSection.INodeDirectory.newBuilder();
|
INodeSection.INodeDirectory.newBuilder();
|
||||||
Long lval = node.removeChildLong("mtime");
|
Long lval = node.removeChildLong(INODE_SECTION_MTIME);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setModificationTime(lval);
|
bld.setModificationTime(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("nsquota");
|
lval = node.removeChildLong(INODE_SECTION_NS_QUOTA);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setNsQuota(lval);
|
bld.setNsQuota(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("dsquota");
|
lval = node.removeChildLong(INODE_SECTION_DS_QUOTA);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setDsQuota(lval);
|
bld.setDsQuota(lval);
|
||||||
}
|
}
|
||||||
String perm = node.removeChildStr("permission");
|
String perm = node.removeChildStr(INODE_SECTION_PERMISSION);
|
||||||
if (perm != null) {
|
if (perm != null) {
|
||||||
bld.setPermission(permissionXmlToU64(perm));
|
bld.setPermission(permissionXmlToU64(perm));
|
||||||
}
|
}
|
||||||
Node acls = node.removeChild("acls");
|
Node acls = node.removeChild(INODE_SECTION_ACLS);
|
||||||
if (acls != null) {
|
if (acls != null) {
|
||||||
bld.setAcl(aclXmlToProto(acls));
|
bld.setAcl(aclXmlToProto(acls));
|
||||||
}
|
}
|
||||||
Node xattrs = node.removeChild("xattrs");
|
Node xattrs = node.removeChild(INODE_SECTION_XATTRS);
|
||||||
if (xattrs != null) {
|
if (xattrs != null) {
|
||||||
bld.setXAttrs(xattrsXmlToProto(xattrs));
|
bld.setXAttrs(xattrsXmlToProto(xattrs));
|
||||||
}
|
}
|
||||||
INodeSection.QuotaByStorageTypeFeatureProto.Builder qf =
|
INodeSection.QuotaByStorageTypeFeatureProto.Builder qf =
|
||||||
INodeSection.QuotaByStorageTypeFeatureProto.newBuilder();
|
INodeSection.QuotaByStorageTypeFeatureProto.newBuilder();
|
||||||
while (true) {
|
while (true) {
|
||||||
Node typeQuota = node.removeChild("typeQuota");
|
Node typeQuota = node.removeChild(INODE_SECTION_TYPE_QUOTA);
|
||||||
if (typeQuota == null) {
|
if (typeQuota == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
INodeSection.QuotaByStorageTypeEntryProto.Builder qbld =
|
INodeSection.QuotaByStorageTypeEntryProto.Builder qbld =
|
||||||
INodeSection.QuotaByStorageTypeEntryProto.newBuilder();
|
INodeSection.QuotaByStorageTypeEntryProto.newBuilder();
|
||||||
String type = typeQuota.removeChildStr("type");
|
String type = typeQuota.removeChildStr(INODE_SECTION_TYPE);
|
||||||
if (type == null) {
|
if (type == null) {
|
||||||
throw new IOException("<typeQuota> was missing <type>");
|
throw new IOException("<typeQuota> was missing <type>");
|
||||||
}
|
}
|
||||||
@ -688,7 +694,7 @@ private void processDirectoryXml(Node node,
|
|||||||
throw new IOException("<typeQuota> had unknown <type> " + type);
|
throw new IOException("<typeQuota> had unknown <type> " + type);
|
||||||
}
|
}
|
||||||
qbld.setStorageType(storageType);
|
qbld.setStorageType(storageType);
|
||||||
Long quota = typeQuota.removeChildLong("quota");
|
Long quota = typeQuota.removeChildLong(INODE_SECTION_QUOTA);
|
||||||
if (quota == null) {
|
if (quota == null) {
|
||||||
throw new IOException("<typeQuota> was missing <quota>");
|
throw new IOException("<typeQuota> was missing <quota>");
|
||||||
}
|
}
|
||||||
@ -705,19 +711,19 @@ private void processSymlinkXml(Node node,
|
|||||||
inodeBld.setType(INodeSection.INode.Type.SYMLINK);
|
inodeBld.setType(INodeSection.INode.Type.SYMLINK);
|
||||||
INodeSection.INodeSymlink.Builder bld =
|
INodeSection.INodeSymlink.Builder bld =
|
||||||
INodeSection.INodeSymlink.newBuilder();
|
INodeSection.INodeSymlink.newBuilder();
|
||||||
String perm = node.removeChildStr("permission");
|
String perm = node.removeChildStr(INODE_SECTION_PERMISSION);
|
||||||
if (perm != null) {
|
if (perm != null) {
|
||||||
bld.setPermission(permissionXmlToU64(perm));
|
bld.setPermission(permissionXmlToU64(perm));
|
||||||
}
|
}
|
||||||
String target = node.removeChildStr("target");
|
String target = node.removeChildStr(INODE_SECTION_TARGET);
|
||||||
if (target != null) {
|
if (target != null) {
|
||||||
bld.setTarget(ByteString.copyFrom(target, "UTF8"));
|
bld.setTarget(ByteString.copyFrom(target, "UTF8"));
|
||||||
}
|
}
|
||||||
Long lval = node.removeChildLong("mtime");
|
Long lval = node.removeChildLong(INODE_SECTION_MTIME);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setModificationTime(lval);
|
bld.setModificationTime(lval);
|
||||||
}
|
}
|
||||||
lval = node.removeChildLong("atime");
|
lval = node.removeChildLong(INODE_SECTION_ATIME);
|
||||||
if (lval != null) {
|
if (lval != null) {
|
||||||
bld.setAccessTime(lval);
|
bld.setAccessTime(lval);
|
||||||
}
|
}
|
||||||
@ -736,23 +742,23 @@ private INodeSection.XAttrFeatureProto.Builder xattrsXmlToProto(Node xattrs)
|
|||||||
INodeSection.XAttrFeatureProto.Builder bld =
|
INodeSection.XAttrFeatureProto.Builder bld =
|
||||||
INodeSection.XAttrFeatureProto.newBuilder();
|
INodeSection.XAttrFeatureProto.newBuilder();
|
||||||
while (true) {
|
while (true) {
|
||||||
Node xattr = xattrs.removeChild("xattr");
|
Node xattr = xattrs.removeChild(INODE_SECTION_XATTR);
|
||||||
if (xattr == null) {
|
if (xattr == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
INodeSection.XAttrCompactProto.Builder b =
|
INodeSection.XAttrCompactProto.Builder b =
|
||||||
INodeSection.XAttrCompactProto.newBuilder();
|
INodeSection.XAttrCompactProto.newBuilder();
|
||||||
String ns = xattr.removeChildStr("ns");
|
String ns = xattr.removeChildStr(INODE_SECTION_NS);
|
||||||
if (ns == null) {
|
if (ns == null) {
|
||||||
throw new IOException("<xattr> had no <ns> entry.");
|
throw new IOException("<xattr> had no <ns> entry.");
|
||||||
}
|
}
|
||||||
int nsIdx = XAttrProtos.XAttrProto.
|
int nsIdx = XAttrProtos.XAttrProto.
|
||||||
XAttrNamespaceProto.valueOf(ns).ordinal();
|
XAttrNamespaceProto.valueOf(ns).ordinal();
|
||||||
String name = xattr.removeChildStr("name");
|
String name = xattr.removeChildStr(SECTION_NAME);
|
||||||
String valStr = xattr.removeChildStr("val");
|
String valStr = xattr.removeChildStr(INODE_SECTION_VAL);
|
||||||
byte[] val = null;
|
byte[] val = null;
|
||||||
if (valStr == null) {
|
if (valStr == null) {
|
||||||
String valHex = xattr.removeChildStr("valHex");
|
String valHex = xattr.removeChildStr(INODE_SECTION_VAL_HEX);
|
||||||
if (valHex == null) {
|
if (valHex == null) {
|
||||||
throw new IOException("<xattr> had no <val> or <valHex> entry.");
|
throw new IOException("<xattr> had no <val> or <valHex> entry.");
|
||||||
}
|
}
|
||||||
@ -787,24 +793,28 @@ public void process() throws IOException {
|
|||||||
loadNodeChildren(secretHeader, "SecretManager fields",
|
loadNodeChildren(secretHeader, "SecretManager fields",
|
||||||
"delegationKey", "token");
|
"delegationKey", "token");
|
||||||
SecretManagerSection.Builder b = SecretManagerSection.newBuilder();
|
SecretManagerSection.Builder b = SecretManagerSection.newBuilder();
|
||||||
Integer currentId = secretHeader.removeChildInt("currentId");
|
Integer currentId =
|
||||||
|
secretHeader.removeChildInt(SECRET_MANAGER_SECTION_CURRENT_ID);
|
||||||
if (currentId == null) {
|
if (currentId == null) {
|
||||||
throw new IOException("SecretManager section had no <currentId>");
|
throw new IOException("SecretManager section had no <currentId>");
|
||||||
}
|
}
|
||||||
b.setCurrentId(currentId);
|
b.setCurrentId(currentId);
|
||||||
Integer tokenSequenceNumber = secretHeader.removeChildInt("tokenSequenceNumber");
|
Integer tokenSequenceNumber = secretHeader.removeChildInt(
|
||||||
|
SECRET_MANAGER_SECTION_TOKEN_SEQUENCE_NUMBER);
|
||||||
if (tokenSequenceNumber == null) {
|
if (tokenSequenceNumber == null) {
|
||||||
throw new IOException("SecretManager section had no " +
|
throw new IOException("SecretManager section had no " +
|
||||||
"<tokenSequenceNumber>");
|
"<tokenSequenceNumber>");
|
||||||
}
|
}
|
||||||
b.setTokenSequenceNumber(tokenSequenceNumber);
|
b.setTokenSequenceNumber(tokenSequenceNumber);
|
||||||
Integer expectedNumKeys = secretHeader.removeChildInt("numDelegationKeys");
|
Integer expectedNumKeys = secretHeader.removeChildInt(
|
||||||
|
SECRET_MANAGER_SECTION_NUM_DELEGATION_KEYS);
|
||||||
if (expectedNumKeys == null) {
|
if (expectedNumKeys == null) {
|
||||||
throw new IOException("SecretManager section had no " +
|
throw new IOException("SecretManager section had no " +
|
||||||
"<numDelegationKeys>");
|
"<numDelegationKeys>");
|
||||||
}
|
}
|
||||||
b.setNumKeys(expectedNumKeys);
|
b.setNumKeys(expectedNumKeys);
|
||||||
Integer expectedNumTokens = secretHeader.removeChildInt("numTokens");
|
Integer expectedNumTokens =
|
||||||
|
secretHeader.removeChildInt(SECRET_MANAGER_SECTION_NUM_TOKENS);
|
||||||
if (expectedNumTokens == null) {
|
if (expectedNumTokens == null) {
|
||||||
throw new IOException("SecretManager section had no " +
|
throw new IOException("SecretManager section had no " +
|
||||||
"<numTokens>");
|
"<numTokens>");
|
||||||
@ -815,7 +825,7 @@ public void process() throws IOException {
|
|||||||
for (int actualNumKeys = 0; actualNumKeys < expectedNumKeys;
|
for (int actualNumKeys = 0; actualNumKeys < expectedNumKeys;
|
||||||
actualNumKeys++) {
|
actualNumKeys++) {
|
||||||
try {
|
try {
|
||||||
expectTag("delegationKey", false);
|
expectTag(SECRET_MANAGER_SECTION_DELEGATION_KEY, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + actualNumKeys +
|
throw new IOException("Only read " + actualNumKeys +
|
||||||
" delegation keys out of " + expectedNumKeys, e);
|
" delegation keys out of " + expectedNumKeys, e);
|
||||||
@ -824,32 +834,32 @@ public void process() throws IOException {
|
|||||||
SecretManagerSection.DelegationKey.newBuilder();
|
SecretManagerSection.DelegationKey.newBuilder();
|
||||||
Node dkey = new Node();
|
Node dkey = new Node();
|
||||||
loadNodeChildren(dkey, "Delegation key fields");
|
loadNodeChildren(dkey, "Delegation key fields");
|
||||||
Integer id = dkey.removeChildInt("id");
|
Integer id = dkey.removeChildInt(SECTION_ID);
|
||||||
if (id == null) {
|
if (id == null) {
|
||||||
throw new IOException("Delegation key stanza <delegationKey> " +
|
throw new IOException("Delegation key stanza <delegationKey> " +
|
||||||
"lacked an <id> field.");
|
"lacked an <id> field.");
|
||||||
}
|
}
|
||||||
dbld.setId(id);
|
dbld.setId(id);
|
||||||
String expiry = dkey.removeChildStr("expiry");
|
String expiry = dkey.removeChildStr(SECRET_MANAGER_SECTION_EXPIRY);
|
||||||
if (expiry == null) {
|
if (expiry == null) {
|
||||||
throw new IOException("Delegation key stanza <delegationKey> " +
|
throw new IOException("Delegation key stanza <delegationKey> " +
|
||||||
"lacked an <expiry> field.");
|
"lacked an <expiry> field.");
|
||||||
}
|
}
|
||||||
dbld.setExpiryDate(dateStrToLong(expiry));
|
dbld.setExpiryDate(dateStrToLong(expiry));
|
||||||
String keyHex = dkey.removeChildStr("key");
|
String keyHex = dkey.removeChildStr(SECRET_MANAGER_SECTION_KEY);
|
||||||
if (keyHex == null) {
|
if (keyHex == null) {
|
||||||
throw new IOException("Delegation key stanza <delegationKey> " +
|
throw new IOException("Delegation key stanza <delegationKey> " +
|
||||||
"lacked a <key> field.");
|
"lacked a <key> field.");
|
||||||
}
|
}
|
||||||
byte[] key = new HexBinaryAdapter().unmarshal(keyHex);
|
byte[] key = new HexBinaryAdapter().unmarshal(keyHex);
|
||||||
dkey.verifyNoRemainingKeys("delegationKey");
|
dkey.verifyNoRemainingKeys(SECRET_MANAGER_SECTION_DELEGATION_KEY);
|
||||||
dbld.setKey(ByteString.copyFrom(key));
|
dbld.setKey(ByteString.copyFrom(key));
|
||||||
dbld.build().writeDelimitedTo(out);
|
dbld.build().writeDelimitedTo(out);
|
||||||
}
|
}
|
||||||
for (int actualNumTokens = 0; actualNumTokens < expectedNumTokens;
|
for (int actualNumTokens = 0; actualNumTokens < expectedNumTokens;
|
||||||
actualNumTokens++) {
|
actualNumTokens++) {
|
||||||
try {
|
try {
|
||||||
expectTag("token", false);
|
expectTag(SECRET_MANAGER_SECTION_TOKEN, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + actualNumTokens +
|
throw new IOException("Only read " + actualNumTokens +
|
||||||
" tokens out of " + expectedNumTokens, e);
|
" tokens out of " + expectedNumTokens, e);
|
||||||
@ -858,46 +868,54 @@ public void process() throws IOException {
|
|||||||
SecretManagerSection.PersistToken.newBuilder();
|
SecretManagerSection.PersistToken.newBuilder();
|
||||||
Node token = new Node();
|
Node token = new Node();
|
||||||
loadNodeChildren(token, "PersistToken key fields");
|
loadNodeChildren(token, "PersistToken key fields");
|
||||||
Integer version = token.removeChildInt("version");
|
Integer version =
|
||||||
|
token.removeChildInt(SECRET_MANAGER_SECTION_VERSION);
|
||||||
if (version != null) {
|
if (version != null) {
|
||||||
tbld.setVersion(version);
|
tbld.setVersion(version);
|
||||||
}
|
}
|
||||||
String owner = token.removeChildStr("owner");
|
String owner = token.removeChildStr(SECRET_MANAGER_SECTION_OWNER);
|
||||||
if (owner != null) {
|
if (owner != null) {
|
||||||
tbld.setOwner(owner);
|
tbld.setOwner(owner);
|
||||||
}
|
}
|
||||||
String renewer = token.removeChildStr("renewer");
|
String renewer =
|
||||||
|
token.removeChildStr(SECRET_MANAGER_SECTION_RENEWER);
|
||||||
if (renewer != null) {
|
if (renewer != null) {
|
||||||
tbld.setRenewer(renewer);
|
tbld.setRenewer(renewer);
|
||||||
}
|
}
|
||||||
String realUser = token.removeChildStr("realUser");
|
String realUser =
|
||||||
|
token.removeChildStr(SECRET_MANAGER_SECTION_REAL_USER);
|
||||||
if (realUser != null) {
|
if (realUser != null) {
|
||||||
tbld.setRealUser(realUser);
|
tbld.setRealUser(realUser);
|
||||||
}
|
}
|
||||||
String issueDateStr = token.removeChildStr("issueDate");
|
String issueDateStr =
|
||||||
|
token.removeChildStr(SECRET_MANAGER_SECTION_ISSUE_DATE);
|
||||||
if (issueDateStr != null) {
|
if (issueDateStr != null) {
|
||||||
tbld.setIssueDate(dateStrToLong(issueDateStr));
|
tbld.setIssueDate(dateStrToLong(issueDateStr));
|
||||||
}
|
}
|
||||||
String maxDateStr = token.removeChildStr("maxDate");
|
String maxDateStr =
|
||||||
|
token.removeChildStr(SECRET_MANAGER_SECTION_MAX_DATE);
|
||||||
if (maxDateStr != null) {
|
if (maxDateStr != null) {
|
||||||
tbld.setMaxDate(dateStrToLong(maxDateStr));
|
tbld.setMaxDate(dateStrToLong(maxDateStr));
|
||||||
}
|
}
|
||||||
Integer seqNo = token.removeChildInt("sequenceNumber");
|
Integer seqNo =
|
||||||
|
token.removeChildInt(SECRET_MANAGER_SECTION_SEQUENCE_NUMBER);
|
||||||
if (seqNo != null) {
|
if (seqNo != null) {
|
||||||
tbld.setSequenceNumber(seqNo);
|
tbld.setSequenceNumber(seqNo);
|
||||||
}
|
}
|
||||||
Integer masterKeyId = token.removeChildInt("masterKeyId");
|
Integer masterKeyId =
|
||||||
|
token.removeChildInt(SECRET_MANAGER_SECTION_MASTER_KEY_ID);
|
||||||
if (masterKeyId != null) {
|
if (masterKeyId != null) {
|
||||||
tbld.setMasterKeyId(masterKeyId);
|
tbld.setMasterKeyId(masterKeyId);
|
||||||
}
|
}
|
||||||
String expiryDateStr = token.removeChildStr("expiryDate");
|
String expiryDateStr =
|
||||||
|
token.removeChildStr(SECRET_MANAGER_SECTION_EXPIRY_DATE);
|
||||||
if (expiryDateStr != null) {
|
if (expiryDateStr != null) {
|
||||||
tbld.setExpiryDate(dateStrToLong(expiryDateStr));
|
tbld.setExpiryDate(dateStrToLong(expiryDateStr));
|
||||||
}
|
}
|
||||||
token.verifyNoRemainingKeys("token");
|
token.verifyNoRemainingKeys("token");
|
||||||
tbld.build().writeDelimitedTo(out);
|
tbld.build().writeDelimitedTo(out);
|
||||||
}
|
}
|
||||||
expectTagEnd("SecretManagerSection");
|
expectTagEnd(SECRET_MANAGER_SECTION_NAME);
|
||||||
recordSectionLength(SectionName.SECRET_MANAGER.name());
|
recordSectionLength(SectionName.SECRET_MANAGER.name());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -919,17 +937,20 @@ public void process() throws IOException {
|
|||||||
Node node = new Node();
|
Node node = new Node();
|
||||||
loadNodeChildren(node, "CacheManager fields", "pool", "directive");
|
loadNodeChildren(node, "CacheManager fields", "pool", "directive");
|
||||||
CacheManagerSection.Builder b = CacheManagerSection.newBuilder();
|
CacheManagerSection.Builder b = CacheManagerSection.newBuilder();
|
||||||
Long nextDirectiveId = node.removeChildLong("nextDirectiveId");
|
Long nextDirectiveId =
|
||||||
|
node.removeChildLong(CACHE_MANAGER_SECTION_NEXT_DIRECTIVE_ID);
|
||||||
if (nextDirectiveId == null) {
|
if (nextDirectiveId == null) {
|
||||||
throw new IOException("CacheManager section had no <nextDirectiveId>");
|
throw new IOException("CacheManager section had no <nextDirectiveId>");
|
||||||
}
|
}
|
||||||
b.setNextDirectiveId(nextDirectiveId);
|
b.setNextDirectiveId(nextDirectiveId);
|
||||||
Integer expectedNumPools = node.removeChildInt("numPools");
|
Integer expectedNumPools =
|
||||||
|
node.removeChildInt(CACHE_MANAGER_SECTION_NUM_POOLS);
|
||||||
if (expectedNumPools == null) {
|
if (expectedNumPools == null) {
|
||||||
throw new IOException("CacheManager section had no <numPools>");
|
throw new IOException("CacheManager section had no <numPools>");
|
||||||
}
|
}
|
||||||
b.setNumPools(expectedNumPools);
|
b.setNumPools(expectedNumPools);
|
||||||
Integer expectedNumDirectives = node.removeChildInt("numDirectives");
|
Integer expectedNumDirectives =
|
||||||
|
node.removeChildInt(CACHE_MANAGER_SECTION_NUM_DIRECTIVES);
|
||||||
if (expectedNumDirectives == null) {
|
if (expectedNumDirectives == null) {
|
||||||
throw new IOException("CacheManager section had no <numDirectives>");
|
throw new IOException("CacheManager section had no <numDirectives>");
|
||||||
}
|
}
|
||||||
@ -938,7 +959,7 @@ public void process() throws IOException {
|
|||||||
long actualNumPools = 0;
|
long actualNumPools = 0;
|
||||||
while (actualNumPools < expectedNumPools) {
|
while (actualNumPools < expectedNumPools) {
|
||||||
try {
|
try {
|
||||||
expectTag("pool", false);
|
expectTag(CACHE_MANAGER_SECTION_POOL, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + actualNumPools +
|
throw new IOException("Only read " + actualNumPools +
|
||||||
" cache pools out of " + expectedNumPools, e);
|
" cache pools out of " + expectedNumPools, e);
|
||||||
@ -951,7 +972,7 @@ public void process() throws IOException {
|
|||||||
long actualNumDirectives = 0;
|
long actualNumDirectives = 0;
|
||||||
while (actualNumDirectives < expectedNumDirectives) {
|
while (actualNumDirectives < expectedNumDirectives) {
|
||||||
try {
|
try {
|
||||||
expectTag("directive", false);
|
expectTag(CACHE_MANAGER_SECTION_DIRECTIVE, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + actualNumDirectives +
|
throw new IOException("Only read " + actualNumDirectives +
|
||||||
" cache pools out of " + expectedNumDirectives, e);
|
" cache pools out of " + expectedNumDirectives, e);
|
||||||
@ -961,38 +982,42 @@ public void process() throws IOException {
|
|||||||
loadNodeChildren(pool, "directive fields", "");
|
loadNodeChildren(pool, "directive fields", "");
|
||||||
processDirectiveXml(node);
|
processDirectiveXml(node);
|
||||||
}
|
}
|
||||||
expectTagEnd("CacheManagerSection");
|
expectTagEnd(CACHE_MANAGER_SECTION_NAME);
|
||||||
recordSectionLength(SectionName.CACHE_MANAGER.name());
|
recordSectionLength(SectionName.CACHE_MANAGER.name());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void processPoolXml(Node pool) throws IOException {
|
private void processPoolXml(Node pool) throws IOException {
|
||||||
CachePoolInfoProto.Builder bld = CachePoolInfoProto.newBuilder();
|
CachePoolInfoProto.Builder bld = CachePoolInfoProto.newBuilder();
|
||||||
String poolName = pool.removeChildStr("poolName");
|
String poolName =
|
||||||
|
pool.removeChildStr(CACHE_MANAGER_SECTION_POOL_NAME);
|
||||||
if (poolName == null) {
|
if (poolName == null) {
|
||||||
throw new IOException("<pool> found without <poolName>");
|
throw new IOException("<pool> found without <poolName>");
|
||||||
}
|
}
|
||||||
bld.setPoolName(poolName);
|
bld.setPoolName(poolName);
|
||||||
String ownerName = pool.removeChildStr("ownerName");
|
String ownerName =
|
||||||
|
pool.removeChildStr(CACHE_MANAGER_SECTION_OWNER_NAME);
|
||||||
if (ownerName == null) {
|
if (ownerName == null) {
|
||||||
throw new IOException("<pool> found without <ownerName>");
|
throw new IOException("<pool> found without <ownerName>");
|
||||||
}
|
}
|
||||||
bld.setOwnerName(ownerName);
|
bld.setOwnerName(ownerName);
|
||||||
String groupName = pool.removeChildStr("groupName");
|
String groupName =
|
||||||
|
pool.removeChildStr(CACHE_MANAGER_SECTION_GROUP_NAME);
|
||||||
if (groupName == null) {
|
if (groupName == null) {
|
||||||
throw new IOException("<pool> found without <groupName>");
|
throw new IOException("<pool> found without <groupName>");
|
||||||
}
|
}
|
||||||
bld.setGroupName(groupName);
|
bld.setGroupName(groupName);
|
||||||
Integer mode = pool.removeChildInt("mode");
|
Integer mode = pool.removeChildInt(CACHE_MANAGER_SECTION_MODE);
|
||||||
if (mode == null) {
|
if (mode == null) {
|
||||||
throw new IOException("<pool> found without <mode>");
|
throw new IOException("<pool> found without <mode>");
|
||||||
}
|
}
|
||||||
bld.setMode(mode);
|
bld.setMode(mode);
|
||||||
Long limit = pool.removeChildLong("limit");
|
Long limit = pool.removeChildLong(CACHE_MANAGER_SECTION_LIMIT);
|
||||||
if (limit == null) {
|
if (limit == null) {
|
||||||
throw new IOException("<pool> found without <limit>");
|
throw new IOException("<pool> found without <limit>");
|
||||||
}
|
}
|
||||||
bld.setLimit(limit);
|
bld.setLimit(limit);
|
||||||
Long maxRelativeExpiry = pool.removeChildLong("maxRelativeExpiry");
|
Long maxRelativeExpiry =
|
||||||
|
pool.removeChildLong(CACHE_MANAGER_SECTION_MAX_RELATIVE_EXPIRY);
|
||||||
if (maxRelativeExpiry == null) {
|
if (maxRelativeExpiry == null) {
|
||||||
throw new IOException("<pool> found without <maxRelativeExpiry>");
|
throw new IOException("<pool> found without <maxRelativeExpiry>");
|
||||||
}
|
}
|
||||||
@ -1004,37 +1029,39 @@ private void processPoolXml(Node pool) throws IOException {
|
|||||||
private void processDirectiveXml(Node directive) throws IOException {
|
private void processDirectiveXml(Node directive) throws IOException {
|
||||||
CacheDirectiveInfoProto.Builder bld =
|
CacheDirectiveInfoProto.Builder bld =
|
||||||
CacheDirectiveInfoProto.newBuilder();
|
CacheDirectiveInfoProto.newBuilder();
|
||||||
Long id = directive.removeChildLong("id");
|
Long id = directive.removeChildLong(SECTION_ID);
|
||||||
if (id == null) {
|
if (id == null) {
|
||||||
throw new IOException("<directive> found without <id>");
|
throw new IOException("<directive> found without <id>");
|
||||||
}
|
}
|
||||||
bld.setId(id);
|
bld.setId(id);
|
||||||
String path = directive.removeChildStr("path");
|
String path = directive.removeChildStr(SECTION_PATH);
|
||||||
if (path == null) {
|
if (path == null) {
|
||||||
throw new IOException("<directive> found without <path>");
|
throw new IOException("<directive> found without <path>");
|
||||||
}
|
}
|
||||||
bld.setPath(path);
|
bld.setPath(path);
|
||||||
Integer replication = directive.removeChildInt("replication");
|
Integer replication = directive.removeChildInt(SECTION_REPLICATION);
|
||||||
if (replication == null) {
|
if (replication == null) {
|
||||||
throw new IOException("<directive> found without <replication>");
|
throw new IOException("<directive> found without <replication>");
|
||||||
}
|
}
|
||||||
bld.setReplication(replication);
|
bld.setReplication(replication);
|
||||||
String pool = directive.removeChildStr("pool");
|
String pool = directive.removeChildStr(CACHE_MANAGER_SECTION_POOL);
|
||||||
if (path == null) {
|
if (path == null) {
|
||||||
throw new IOException("<directive> found without <pool>");
|
throw new IOException("<directive> found without <pool>");
|
||||||
}
|
}
|
||||||
bld.setPool(pool);
|
bld.setPool(pool);
|
||||||
Node expiration = directive.removeChild("expiration");
|
Node expiration =
|
||||||
|
directive.removeChild(CACHE_MANAGER_SECTION_EXPIRATION);
|
||||||
if (expiration != null) {
|
if (expiration != null) {
|
||||||
CacheDirectiveInfoExpirationProto.Builder ebld =
|
CacheDirectiveInfoExpirationProto.Builder ebld =
|
||||||
CacheDirectiveInfoExpirationProto.newBuilder();
|
CacheDirectiveInfoExpirationProto.newBuilder();
|
||||||
Long millis = expiration.removeChildLong("millis");
|
Long millis =
|
||||||
|
expiration.removeChildLong(CACHE_MANAGER_SECTION_MILLIS);
|
||||||
if (millis == null) {
|
if (millis == null) {
|
||||||
throw new IOException("cache directive <expiration> found " +
|
throw new IOException("cache directive <expiration> found " +
|
||||||
"without <millis>");
|
"without <millis>");
|
||||||
}
|
}
|
||||||
ebld.setMillis(millis);
|
ebld.setMillis(millis);
|
||||||
if (expiration.removeChildBool("relative")) {
|
if (expiration.removeChildBool(CACHE_MANAGER_SECTION_RELATIVE)) {
|
||||||
ebld.setIsRelative(true);
|
ebld.setIsRelative(true);
|
||||||
} else {
|
} else {
|
||||||
ebld.setIsRelative(false);
|
ebld.setIsRelative(false);
|
||||||
@ -1054,7 +1081,7 @@ public void process() throws IOException {
|
|||||||
// There is no header for this section.
|
// There is no header for this section.
|
||||||
// We process the repeated <ref> elements.
|
// We process the repeated <ref> elements.
|
||||||
while (true) {
|
while (true) {
|
||||||
XMLEvent ev = expectTag("ref", true);
|
XMLEvent ev = expectTag(INODE_REFERENCE_SECTION_REF, true);
|
||||||
if (ev.isEndElement()) {
|
if (ev.isEndElement()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1062,7 +1089,8 @@ public void process() throws IOException {
|
|||||||
FsImageProto.INodeReferenceSection.INodeReference.Builder bld =
|
FsImageProto.INodeReferenceSection.INodeReference.Builder bld =
|
||||||
FsImageProto.INodeReferenceSection.INodeReference.newBuilder();
|
FsImageProto.INodeReferenceSection.INodeReference.newBuilder();
|
||||||
loadNodeChildren(inodeRef, "INodeReference");
|
loadNodeChildren(inodeRef, "INodeReference");
|
||||||
Long referredId = inodeRef.removeChildLong("referredId");
|
Long referredId =
|
||||||
|
inodeRef.removeChildLong(INODE_REFERENCE_SECTION_REFERRED_ID);
|
||||||
if (referredId != null) {
|
if (referredId != null) {
|
||||||
bld.setReferredId(referredId);
|
bld.setReferredId(referredId);
|
||||||
}
|
}
|
||||||
@ -1070,11 +1098,13 @@ public void process() throws IOException {
|
|||||||
if (name != null) {
|
if (name != null) {
|
||||||
bld.setName(ByteString.copyFrom(name, "UTF8"));
|
bld.setName(ByteString.copyFrom(name, "UTF8"));
|
||||||
}
|
}
|
||||||
Integer dstSnapshotId = inodeRef.removeChildInt("dstSnapshotId");
|
Integer dstSnapshotId = inodeRef.removeChildInt(
|
||||||
|
INODE_REFERENCE_SECTION_DST_SNAPSHOT_ID);
|
||||||
if (dstSnapshotId != null) {
|
if (dstSnapshotId != null) {
|
||||||
bld.setDstSnapshotId(dstSnapshotId);
|
bld.setDstSnapshotId(dstSnapshotId);
|
||||||
}
|
}
|
||||||
Integer lastSnapshotId = inodeRef.removeChildInt("lastSnapshotId");
|
Integer lastSnapshotId = inodeRef.removeChildInt(
|
||||||
|
INODE_REFERENCE_SECTION_LAST_SNAPSHOT_ID);
|
||||||
if (lastSnapshotId != null) {
|
if (lastSnapshotId != null) {
|
||||||
bld.setLastSnapshotId(lastSnapshotId);
|
bld.setLastSnapshotId(lastSnapshotId);
|
||||||
}
|
}
|
||||||
@ -1093,7 +1123,7 @@ public void process() throws IOException {
|
|||||||
// No header for this section
|
// No header for this section
|
||||||
// Process the repeated <directory> elements.
|
// Process the repeated <directory> elements.
|
||||||
while (true) {
|
while (true) {
|
||||||
XMLEvent ev = expectTag("directory", true);
|
XMLEvent ev = expectTag(INODE_DIRECTORY_SECTION_DIRECTORY, true);
|
||||||
if (ev.isEndElement()) {
|
if (ev.isEndElement()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1101,19 +1131,22 @@ public void process() throws IOException {
|
|||||||
FsImageProto.INodeDirectorySection.DirEntry.Builder bld =
|
FsImageProto.INodeDirectorySection.DirEntry.Builder bld =
|
||||||
FsImageProto.INodeDirectorySection.DirEntry.newBuilder();
|
FsImageProto.INodeDirectorySection.DirEntry.newBuilder();
|
||||||
loadNodeChildren(directory, "directory");
|
loadNodeChildren(directory, "directory");
|
||||||
Long parent = directory.removeChildLong("parent");
|
Long parent = directory.removeChildLong(
|
||||||
|
INODE_DIRECTORY_SECTION_PARENT);
|
||||||
if (parent != null) {
|
if (parent != null) {
|
||||||
bld.setParent(parent);
|
bld.setParent(parent);
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
Node child = directory.removeChild("child");
|
Node child = directory.removeChild(
|
||||||
|
INODE_DIRECTORY_SECTION_CHILD);
|
||||||
if (child == null) {
|
if (child == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
bld.addChildren(Long.parseLong(child.getVal()));
|
bld.addChildren(Long.parseLong(child.getVal()));
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
Node refChild = directory.removeChild("refChild");
|
Node refChild = directory.removeChild(
|
||||||
|
INODE_DIRECTORY_SECTION_REF_CHILD);
|
||||||
if (refChild == null) {
|
if (refChild == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1135,7 +1168,7 @@ public void process() throws IOException {
|
|||||||
// No header for this section type.
|
// No header for this section type.
|
||||||
// Process the repeated files under construction elements.
|
// Process the repeated files under construction elements.
|
||||||
while (true) {
|
while (true) {
|
||||||
XMLEvent ev = expectTag("inode", true);
|
XMLEvent ev = expectTag(INODE_SECTION_INODE, true);
|
||||||
if (ev.isEndElement()) {
|
if (ev.isEndElement()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1143,11 +1176,12 @@ public void process() throws IOException {
|
|||||||
loadNodeChildren(fileUnderConstruction, "file under construction");
|
loadNodeChildren(fileUnderConstruction, "file under construction");
|
||||||
FileUnderConstructionEntry.Builder bld =
|
FileUnderConstructionEntry.Builder bld =
|
||||||
FileUnderConstructionEntry.newBuilder();
|
FileUnderConstructionEntry.newBuilder();
|
||||||
Long id = fileUnderConstruction.removeChildLong("id");
|
Long id = fileUnderConstruction.removeChildLong(SECTION_ID);
|
||||||
if (id != null) {
|
if (id != null) {
|
||||||
bld.setInodeId(id);
|
bld.setInodeId(id);
|
||||||
}
|
}
|
||||||
String fullpath = fileUnderConstruction.removeChildStr("path");
|
String fullpath =
|
||||||
|
fileUnderConstruction.removeChildStr(SECTION_PATH);
|
||||||
if (fullpath != null) {
|
if (fullpath != null) {
|
||||||
bld.setFullPath(fullpath);
|
bld.setFullPath(fullpath);
|
||||||
}
|
}
|
||||||
@ -1167,24 +1201,26 @@ public void process() throws IOException {
|
|||||||
FsImageProto.SnapshotSection.newBuilder();
|
FsImageProto.SnapshotSection.newBuilder();
|
||||||
Node header = new Node();
|
Node header = new Node();
|
||||||
loadNodeChildren(header, "SnapshotSection fields", "snapshot");
|
loadNodeChildren(header, "SnapshotSection fields", "snapshot");
|
||||||
Integer snapshotCounter = header.removeChildInt("snapshotCounter");
|
Integer snapshotCounter = header.removeChildInt(
|
||||||
|
SNAPSHOT_SECTION_SNAPSHOT_COUNTER);
|
||||||
if (snapshotCounter == null) {
|
if (snapshotCounter == null) {
|
||||||
throw new IOException("No <snapshotCounter> entry found in " +
|
throw new IOException("No <snapshotCounter> entry found in " +
|
||||||
"SnapshotSection header");
|
"SnapshotSection header");
|
||||||
}
|
}
|
||||||
bld.setSnapshotCounter(snapshotCounter);
|
bld.setSnapshotCounter(snapshotCounter);
|
||||||
Integer expectedNumSnapshots = header.removeChildInt("numSnapshots");
|
Integer expectedNumSnapshots = header.removeChildInt(
|
||||||
|
SNAPSHOT_SECTION_NUM_SNAPSHOTS);
|
||||||
if (expectedNumSnapshots == null) {
|
if (expectedNumSnapshots == null) {
|
||||||
throw new IOException("No <numSnapshots> entry found in " +
|
throw new IOException("No <numSnapshots> entry found in " +
|
||||||
"SnapshotSection header");
|
"SnapshotSection header");
|
||||||
}
|
}
|
||||||
bld.setNumSnapshots(expectedNumSnapshots);
|
bld.setNumSnapshots(expectedNumSnapshots);
|
||||||
while (true) {
|
while (true) {
|
||||||
Node sd = header.removeChild("snapshottableDir");
|
Node sd = header.removeChild(SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR);
|
||||||
if (sd == null) {
|
if (sd == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Long dir = sd.removeChildLong("dir");
|
Long dir = sd.removeChildLong(SNAPSHOT_SECTION_DIR);
|
||||||
sd.verifyNoRemainingKeys("<dir>");
|
sd.verifyNoRemainingKeys("<dir>");
|
||||||
bld.addSnapshottableDir(dir);
|
bld.addSnapshottableDir(dir);
|
||||||
}
|
}
|
||||||
@ -1193,7 +1229,7 @@ public void process() throws IOException {
|
|||||||
int actualNumSnapshots = 0;
|
int actualNumSnapshots = 0;
|
||||||
while (actualNumSnapshots < expectedNumSnapshots) {
|
while (actualNumSnapshots < expectedNumSnapshots) {
|
||||||
try {
|
try {
|
||||||
expectTag("snapshot", false);
|
expectTag(SNAPSHOT_SECTION_SNAPSHOT, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + actualNumSnapshots +
|
throw new IOException("Only read " + actualNumSnapshots +
|
||||||
" <snapshot> entries out of " + expectedNumSnapshots, e);
|
" <snapshot> entries out of " + expectedNumSnapshots, e);
|
||||||
@ -1203,17 +1239,17 @@ public void process() throws IOException {
|
|||||||
loadNodeChildren(snapshot, "snapshot fields");
|
loadNodeChildren(snapshot, "snapshot fields");
|
||||||
FsImageProto.SnapshotSection.Snapshot.Builder s =
|
FsImageProto.SnapshotSection.Snapshot.Builder s =
|
||||||
FsImageProto.SnapshotSection.Snapshot.newBuilder();
|
FsImageProto.SnapshotSection.Snapshot.newBuilder();
|
||||||
Integer snapshotId = snapshot.removeChildInt("id");
|
Integer snapshotId = snapshot.removeChildInt(SECTION_ID);
|
||||||
if (snapshotId == null) {
|
if (snapshotId == null) {
|
||||||
throw new IOException("<snapshot> section was missing <id>");
|
throw new IOException("<snapshot> section was missing <id>");
|
||||||
}
|
}
|
||||||
s.setSnapshotId(snapshotId);
|
s.setSnapshotId(snapshotId);
|
||||||
Node snapshotRoot = snapshot.removeChild("root");
|
Node snapshotRoot = snapshot.removeChild(SNAPSHOT_SECTION_ROOT);
|
||||||
INodeSection.INode.Builder inodeBld = processINodeXml(snapshotRoot);
|
INodeSection.INode.Builder inodeBld = processINodeXml(snapshotRoot);
|
||||||
s.setRoot(inodeBld);
|
s.setRoot(inodeBld);
|
||||||
s.build().writeDelimitedTo(out);
|
s.build().writeDelimitedTo(out);
|
||||||
}
|
}
|
||||||
expectTagEnd("SnapshotSection");
|
expectTagEnd(SNAPSHOT_SECTION_NAME);
|
||||||
recordSectionLength(SectionName.SNAPSHOT.name());
|
recordSectionLength(SectionName.SNAPSHOT.name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1229,15 +1265,15 @@ public void process() throws IOException {
|
|||||||
XMLEvent ev = expectTag("[diff start tag]", true);
|
XMLEvent ev = expectTag("[diff start tag]", true);
|
||||||
if (ev.isEndElement()) {
|
if (ev.isEndElement()) {
|
||||||
String name = ev.asEndElement().getName().getLocalPart();
|
String name = ev.asEndElement().getName().getLocalPart();
|
||||||
if (name.equals("SnapshotDiffSection")) {
|
if (name.equals(SNAPSHOT_DIFF_SECTION_NAME)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
throw new IOException("Got unexpected end tag for " + name);
|
throw new IOException("Got unexpected end tag for " + name);
|
||||||
}
|
}
|
||||||
String tagName = ev.asStartElement().getName().getLocalPart();
|
String tagName = ev.asStartElement().getName().getLocalPart();
|
||||||
if (tagName.equals("dirDiffEntry")) {
|
if (tagName.equals(SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY)) {
|
||||||
processDirDiffEntry();
|
processDirDiffEntry();
|
||||||
} else if (tagName.equals("fileDiffEntry")) {
|
} else if (tagName.equals(SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY)) {
|
||||||
processFileDiffEntry();
|
processFileDiffEntry();
|
||||||
} else {
|
} else {
|
||||||
throw new IOException("SnapshotDiffSection contained unexpected " +
|
throw new IOException("SnapshotDiffSection contained unexpected " +
|
||||||
@ -1253,12 +1289,14 @@ private void processDirDiffEntry() throws IOException {
|
|||||||
headerBld.setType(DiffEntry.Type.DIRECTORYDIFF);
|
headerBld.setType(DiffEntry.Type.DIRECTORYDIFF);
|
||||||
Node dirDiffHeader = new Node();
|
Node dirDiffHeader = new Node();
|
||||||
loadNodeChildren(dirDiffHeader, "dirDiffEntry fields", "dirDiff");
|
loadNodeChildren(dirDiffHeader, "dirDiffEntry fields", "dirDiff");
|
||||||
Long inodeId = dirDiffHeader.removeChildLong("inodeId");
|
Long inodeId = dirDiffHeader.removeChildLong(
|
||||||
|
SNAPSHOT_DIFF_SECTION_INODE_ID);
|
||||||
if (inodeId == null) {
|
if (inodeId == null) {
|
||||||
throw new IOException("<dirDiffEntry> contained no <inodeId> entry.");
|
throw new IOException("<dirDiffEntry> contained no <inodeId> entry.");
|
||||||
}
|
}
|
||||||
headerBld.setInodeId(inodeId);
|
headerBld.setInodeId(inodeId);
|
||||||
Integer expectedDiffs = dirDiffHeader.removeChildInt("count");
|
Integer expectedDiffs = dirDiffHeader.removeChildInt(
|
||||||
|
SNAPSHOT_DIFF_SECTION_COUNT);
|
||||||
if (expectedDiffs == null) {
|
if (expectedDiffs == null) {
|
||||||
throw new IOException("<dirDiffEntry> contained no <count> entry.");
|
throw new IOException("<dirDiffEntry> contained no <count> entry.");
|
||||||
}
|
}
|
||||||
@ -1267,7 +1305,7 @@ private void processDirDiffEntry() throws IOException {
|
|||||||
headerBld.build().writeDelimitedTo(out);
|
headerBld.build().writeDelimitedTo(out);
|
||||||
for (int actualDiffs = 0; actualDiffs < expectedDiffs; actualDiffs++) {
|
for (int actualDiffs = 0; actualDiffs < expectedDiffs; actualDiffs++) {
|
||||||
try {
|
try {
|
||||||
expectTag("dirDiff", false);
|
expectTag(SNAPSHOT_DIFF_SECTION_DIR_DIFF, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + (actualDiffs + 1) +
|
throw new IOException("Only read " + (actualDiffs + 1) +
|
||||||
" diffs out of " + expectedDiffs, e);
|
" diffs out of " + expectedDiffs, e);
|
||||||
@ -1276,38 +1314,43 @@ private void processDirDiffEntry() throws IOException {
|
|||||||
loadNodeChildren(dirDiff, "dirDiff fields");
|
loadNodeChildren(dirDiff, "dirDiff fields");
|
||||||
FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder bld =
|
FsImageProto.SnapshotDiffSection.DirectoryDiff.Builder bld =
|
||||||
FsImageProto.SnapshotDiffSection.DirectoryDiff.newBuilder();
|
FsImageProto.SnapshotDiffSection.DirectoryDiff.newBuilder();
|
||||||
Integer snapshotId = dirDiff.removeChildInt("snapshotId");
|
Integer snapshotId = dirDiff.removeChildInt(
|
||||||
|
SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID);
|
||||||
if (snapshotId != null) {
|
if (snapshotId != null) {
|
||||||
bld.setSnapshotId(snapshotId);
|
bld.setSnapshotId(snapshotId);
|
||||||
}
|
}
|
||||||
Integer childrenSize = dirDiff.removeChildInt("childrenSize");
|
Integer childrenSize = dirDiff.removeChildInt(
|
||||||
|
SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE);
|
||||||
if (childrenSize == null) {
|
if (childrenSize == null) {
|
||||||
throw new IOException("Expected to find <childrenSize> in " +
|
throw new IOException("Expected to find <childrenSize> in " +
|
||||||
"<dirDiff> section.");
|
"<dirDiff> section.");
|
||||||
}
|
}
|
||||||
bld.setIsSnapshotRoot(dirDiff.removeChildBool("isSnapshotRoot"));
|
bld.setIsSnapshotRoot(dirDiff.removeChildBool(
|
||||||
|
SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT));
|
||||||
bld.setChildrenSize(childrenSize);
|
bld.setChildrenSize(childrenSize);
|
||||||
String name = dirDiff.removeChildStr("name");
|
String name = dirDiff.removeChildStr(SECTION_NAME);
|
||||||
if (name != null) {
|
if (name != null) {
|
||||||
bld.setName(ByteString.copyFrom(name, "UTF8"));
|
bld.setName(ByteString.copyFrom(name, "UTF8"));
|
||||||
}
|
}
|
||||||
// TODO: add missing snapshotCopy field to XML
|
// TODO: add missing snapshotCopy field to XML
|
||||||
Integer expectedCreatedListSize =
|
Integer expectedCreatedListSize = dirDiff.removeChildInt(
|
||||||
dirDiff.removeChildInt("createdListSize");
|
SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE);
|
||||||
if (expectedCreatedListSize == null) {
|
if (expectedCreatedListSize == null) {
|
||||||
throw new IOException("Expected to find <createdListSize> in " +
|
throw new IOException("Expected to find <createdListSize> in " +
|
||||||
"<dirDiff> section.");
|
"<dirDiff> section.");
|
||||||
}
|
}
|
||||||
bld.setCreatedListSize(expectedCreatedListSize);
|
bld.setCreatedListSize(expectedCreatedListSize);
|
||||||
while (true) {
|
while (true) {
|
||||||
Node deleted = dirDiff.removeChild("deletedInode");
|
Node deleted = dirDiff.removeChild(
|
||||||
|
SNAPSHOT_DIFF_SECTION_DELETED_INODE);
|
||||||
if (deleted == null){
|
if (deleted == null){
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
bld.addDeletedINode(Long.parseLong(deleted.getVal()));
|
bld.addDeletedINode(Long.parseLong(deleted.getVal()));
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
Node deleted = dirDiff.removeChild("deletedInoderef");
|
Node deleted = dirDiff.removeChild(
|
||||||
|
SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF);
|
||||||
if (deleted == null){
|
if (deleted == null){
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1317,11 +1360,12 @@ private void processDirDiffEntry() throws IOException {
|
|||||||
// After the DirectoryDiff header comes a list of CreatedListEntry PBs.
|
// After the DirectoryDiff header comes a list of CreatedListEntry PBs.
|
||||||
int actualCreatedListSize = 0;
|
int actualCreatedListSize = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
Node created = dirDiff.removeChild("created");
|
Node created = dirDiff.removeChild(
|
||||||
|
SNAPSHOT_DIFF_SECTION_CREATED);
|
||||||
if (created == null){
|
if (created == null){
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
String cleName = created.removeChildStr("name");
|
String cleName = created.removeChildStr(SECTION_NAME);
|
||||||
if (cleName == null) {
|
if (cleName == null) {
|
||||||
throw new IOException("Expected <created> entry to have " +
|
throw new IOException("Expected <created> entry to have " +
|
||||||
"a <name> field");
|
"a <name> field");
|
||||||
@ -1339,7 +1383,7 @@ private void processDirDiffEntry() throws IOException {
|
|||||||
}
|
}
|
||||||
dirDiff.verifyNoRemainingKeys("dirDiff");
|
dirDiff.verifyNoRemainingKeys("dirDiff");
|
||||||
}
|
}
|
||||||
expectTagEnd("dirDiffEntry");
|
expectTagEnd(SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void processFileDiffEntry() throws IOException {
|
private void processFileDiffEntry() throws IOException {
|
||||||
@ -1348,12 +1392,14 @@ private void processFileDiffEntry() throws IOException {
|
|||||||
headerBld.setType(DiffEntry.Type.FILEDIFF);
|
headerBld.setType(DiffEntry.Type.FILEDIFF);
|
||||||
Node fileDiffHeader = new Node();
|
Node fileDiffHeader = new Node();
|
||||||
loadNodeChildren(fileDiffHeader, "fileDiffEntry fields", "fileDiff");
|
loadNodeChildren(fileDiffHeader, "fileDiffEntry fields", "fileDiff");
|
||||||
Long inodeId = fileDiffHeader.removeChildLong("inodeid");
|
Long inodeId = fileDiffHeader.removeChildLong(
|
||||||
|
SNAPSHOT_DIFF_SECTION_INODE_ID);
|
||||||
if (inodeId == null) {
|
if (inodeId == null) {
|
||||||
throw new IOException("<fileDiffEntry> contained no <inodeid> entry.");
|
throw new IOException("<fileDiffEntry> contained no <inodeid> entry.");
|
||||||
}
|
}
|
||||||
headerBld.setInodeId(inodeId);
|
headerBld.setInodeId(inodeId);
|
||||||
Integer expectedDiffs = fileDiffHeader.removeChildInt("count");
|
Integer expectedDiffs = fileDiffHeader.removeChildInt(
|
||||||
|
SNAPSHOT_DIFF_SECTION_COUNT);
|
||||||
if (expectedDiffs == null) {
|
if (expectedDiffs == null) {
|
||||||
throw new IOException("<fileDiffEntry> contained no <count> entry.");
|
throw new IOException("<fileDiffEntry> contained no <count> entry.");
|
||||||
}
|
}
|
||||||
@ -1362,7 +1408,7 @@ private void processFileDiffEntry() throws IOException {
|
|||||||
headerBld.build().writeDelimitedTo(out);
|
headerBld.build().writeDelimitedTo(out);
|
||||||
for (int actualDiffs = 0; actualDiffs < expectedDiffs; actualDiffs++) {
|
for (int actualDiffs = 0; actualDiffs < expectedDiffs; actualDiffs++) {
|
||||||
try {
|
try {
|
||||||
expectTag("fileDiff", false);
|
expectTag(SNAPSHOT_DIFF_SECTION_FILE_DIFF, false);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IOException("Only read " + (actualDiffs + 1) +
|
throw new IOException("Only read " + (actualDiffs + 1) +
|
||||||
" diffs out of " + expectedDiffs, e);
|
" diffs out of " + expectedDiffs, e);
|
||||||
@ -1371,15 +1417,17 @@ private void processFileDiffEntry() throws IOException {
|
|||||||
loadNodeChildren(fileDiff, "fileDiff fields");
|
loadNodeChildren(fileDiff, "fileDiff fields");
|
||||||
FsImageProto.SnapshotDiffSection.FileDiff.Builder bld =
|
FsImageProto.SnapshotDiffSection.FileDiff.Builder bld =
|
||||||
FsImageProto.SnapshotDiffSection.FileDiff.newBuilder();
|
FsImageProto.SnapshotDiffSection.FileDiff.newBuilder();
|
||||||
Integer snapshotId = fileDiff.removeChildInt("snapshotId");
|
Integer snapshotId = fileDiff.removeChildInt(
|
||||||
|
SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID);
|
||||||
if (snapshotId != null) {
|
if (snapshotId != null) {
|
||||||
bld.setSnapshotId(snapshotId);
|
bld.setSnapshotId(snapshotId);
|
||||||
}
|
}
|
||||||
Long size = fileDiff.removeChildLong("size");
|
Long size = fileDiff.removeChildLong(
|
||||||
|
SNAPSHOT_DIFF_SECTION_SIZE);
|
||||||
if (size != null) {
|
if (size != null) {
|
||||||
bld.setFileSize(size);
|
bld.setFileSize(size);
|
||||||
}
|
}
|
||||||
String name = fileDiff.removeChildStr("name");
|
String name = fileDiff.removeChildStr(SECTION_NAME);
|
||||||
if (name != null) {
|
if (name != null) {
|
||||||
bld.setName(ByteString.copyFrom(name, "UTF8"));
|
bld.setName(ByteString.copyFrom(name, "UTF8"));
|
||||||
}
|
}
|
||||||
@ -1388,7 +1436,7 @@ private void processFileDiffEntry() throws IOException {
|
|||||||
fileDiff.verifyNoRemainingKeys("fileDiff");
|
fileDiff.verifyNoRemainingKeys("fileDiff");
|
||||||
bld.build().writeDelimitedTo(out);
|
bld.build().writeDelimitedTo(out);
|
||||||
}
|
}
|
||||||
expectTagEnd("fileDiffEntry");
|
expectTagEnd(SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,6 +77,154 @@
|
|||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public final class PBImageXmlWriter {
|
public final class PBImageXmlWriter {
|
||||||
|
public static final String NAME_SECTION_NAME = "NameSection";
|
||||||
|
public static final String INODE_SECTION_NAME = "INodeSection";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_NAME =
|
||||||
|
"SecretManagerSection";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_NAME = "CacheManagerSection";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_NAME = "SnapshotDiffSection";
|
||||||
|
public static final String INODE_REFERENCE_SECTION_NAME =
|
||||||
|
"INodeReferenceSection";
|
||||||
|
public static final String INODE_DIRECTORY_SECTION_NAME =
|
||||||
|
"INodeDirectorySection";
|
||||||
|
public static final String FILE_UNDER_CONSTRUCTION_SECTION_NAME =
|
||||||
|
"FileUnderConstructionSection";
|
||||||
|
public static final String SNAPSHOT_SECTION_NAME = "SnapshotSection";
|
||||||
|
|
||||||
|
public static final String SECTION_ID = "id";
|
||||||
|
public static final String SECTION_REPLICATION = "replication";
|
||||||
|
public static final String SECTION_PATH = "path";
|
||||||
|
public static final String SECTION_NAME = "name";
|
||||||
|
|
||||||
|
public static final String NAME_SECTION_NAMESPACE_ID = "namespaceId";
|
||||||
|
public static final String NAME_SECTION_GENSTAMPV1 = "genstampV1";
|
||||||
|
public static final String NAME_SECTION_GENSTAMPV2 = "genstampV2";
|
||||||
|
public static final String NAME_SECTION_GENSTAMPV1_LIMIT = "genstampV1Limit";
|
||||||
|
public static final String NAME_SECTION_LAST_ALLOCATED_BLOCK_ID =
|
||||||
|
"lastAllocatedBlockId";
|
||||||
|
public static final String NAME_SECTION_TXID = "txid";
|
||||||
|
public static final String NAME_SECTION_ROLLING_UPGRADE_START_TIME =
|
||||||
|
"rollingUpgradeStartTime";
|
||||||
|
public static final String NAME_SECTION_LAST_ALLOCATED_STRIPED_BLOCK_ID =
|
||||||
|
"lastAllocatedStripedBlockId";
|
||||||
|
|
||||||
|
public static final String INODE_SECTION_LAST_INODE_ID = "lastInodeId";
|
||||||
|
public static final String INODE_SECTION_NUM_INODES = "numInodes";
|
||||||
|
public static final String INODE_SECTION_TYPE = "type";
|
||||||
|
public static final String INODE_SECTION_MTIME = "mtime";
|
||||||
|
public static final String INODE_SECTION_ATIME = "atime";
|
||||||
|
public static final String INODE_SECTION_PREFERRED_BLOCK_SIZE =
|
||||||
|
"preferredBlockSize";
|
||||||
|
public static final String INODE_SECTION_PERMISSION = "permission";
|
||||||
|
public static final String INODE_SECTION_BLOCKS = "blocks";
|
||||||
|
public static final String INODE_SECTION_BLOCK = "block";
|
||||||
|
public static final String INODE_SECTION_GEMSTAMP = "genstamp";
|
||||||
|
public static final String INODE_SECTION_NUM_BYTES = "numBytes";
|
||||||
|
public static final String INODE_SECTION_FILE_UNDER_CONSTRUCTION =
|
||||||
|
"file-under-construction";
|
||||||
|
public static final String INODE_SECTION_CLIENT_NAME = "clientName";
|
||||||
|
public static final String INODE_SECTION_CLIENT_MACHINE = "clientMachine";
|
||||||
|
public static final String INODE_SECTION_ACL = "acl";
|
||||||
|
public static final String INODE_SECTION_ACLS = "acls";
|
||||||
|
public static final String INODE_SECTION_XATTR = "xattr";
|
||||||
|
public static final String INODE_SECTION_XATTRS = "xattrs";
|
||||||
|
public static final String INODE_SECTION_STORAGE_POLICY_ID =
|
||||||
|
"storagePolicyId";
|
||||||
|
public static final String INODE_SECTION_IS_STRIPED = "isStriped";
|
||||||
|
public static final String INODE_SECTION_NS_QUOTA = "nsquota";
|
||||||
|
public static final String INODE_SECTION_DS_QUOTA = "dsquota";
|
||||||
|
public static final String INODE_SECTION_TYPE_QUOTA = "typeQuota";
|
||||||
|
public static final String INODE_SECTION_QUOTA = "quota";
|
||||||
|
public static final String INODE_SECTION_TARGET = "target";
|
||||||
|
public static final String INODE_SECTION_NS = "ns";
|
||||||
|
public static final String INODE_SECTION_VAL = "val";
|
||||||
|
public static final String INODE_SECTION_VAL_HEX = "valHex";
|
||||||
|
public static final String INODE_SECTION_INODE = "inode";
|
||||||
|
|
||||||
|
public static final String SECRET_MANAGER_SECTION_CURRENT_ID = "currentId";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_TOKEN_SEQUENCE_NUMBER =
|
||||||
|
"tokenSequenceNumber";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_NUM_DELEGATION_KEYS =
|
||||||
|
"numDelegationKeys";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_NUM_TOKENS = "numTokens";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_EXPIRY = "expiry";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_KEY = "key";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_DELEGATION_KEY =
|
||||||
|
"delegationKey";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_VERSION = "version";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_OWNER = "owner";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_RENEWER = "renewer";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_REAL_USER = "realUser";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_ISSUE_DATE = "issueDate";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_MAX_DATE = "maxDate";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_SEQUENCE_NUMBER =
|
||||||
|
"sequenceNumber";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_MASTER_KEY_ID =
|
||||||
|
"masterKeyId";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_EXPIRY_DATE = "expiryDate";
|
||||||
|
public static final String SECRET_MANAGER_SECTION_TOKEN = "token";
|
||||||
|
|
||||||
|
public static final String CACHE_MANAGER_SECTION_NEXT_DIRECTIVE_ID =
|
||||||
|
"nextDirectiveId";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_NUM_POOLS = "numPools";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_NUM_DIRECTIVES =
|
||||||
|
"numDirectives";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_POOL_NAME = "poolName";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_OWNER_NAME = "ownerName";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_GROUP_NAME = "groupName";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_MODE = "mode";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_LIMIT = "limit";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_MAX_RELATIVE_EXPIRY =
|
||||||
|
"maxRelativeExpiry";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_POOL = "pool";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_EXPIRATION = "expiration";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_MILLIS = "millis";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_RELATIVE = "relative";
|
||||||
|
public static final String CACHE_MANAGER_SECTION_DIRECTIVE = "directive";
|
||||||
|
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_INODE_ID = "inodeId";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_COUNT = "count";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID = "snapshotId";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE =
|
||||||
|
"childrenSize";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT =
|
||||||
|
"isSnapshotRoot";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE =
|
||||||
|
"createdListSize";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_DELETED_INODE =
|
||||||
|
"deletedInode";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF =
|
||||||
|
"deletedInoderef";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_CREATED = "created";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_SIZE = "size";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY =
|
||||||
|
"fileDiffEntry";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY =
|
||||||
|
"dirDiffEntry";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_FILE_DIFF = "fileDiff";
|
||||||
|
public static final String SNAPSHOT_DIFF_SECTION_DIR_DIFF = "dirDiff";
|
||||||
|
|
||||||
|
public static final String INODE_REFERENCE_SECTION_REFERRED_ID = "referredId";
|
||||||
|
public static final String INODE_REFERENCE_SECTION_DST_SNAPSHOT_ID =
|
||||||
|
"dstSnapshotId";
|
||||||
|
public static final String INODE_REFERENCE_SECTION_LAST_SNAPSHOT_ID =
|
||||||
|
"lastSnapshotId";
|
||||||
|
public static final String INODE_REFERENCE_SECTION_REF = "ref";
|
||||||
|
|
||||||
|
public static final String INODE_DIRECTORY_SECTION_PARENT = "parent";
|
||||||
|
public static final String INODE_DIRECTORY_SECTION_CHILD = "child";
|
||||||
|
public static final String INODE_DIRECTORY_SECTION_REF_CHILD = "refChild";
|
||||||
|
public static final String INODE_DIRECTORY_SECTION_DIRECTORY = "directory";
|
||||||
|
|
||||||
|
public static final String SNAPSHOT_SECTION_SNAPSHOT_COUNTER =
|
||||||
|
"snapshotCounter";
|
||||||
|
public static final String SNAPSHOT_SECTION_NUM_SNAPSHOTS = "numSnapshots";
|
||||||
|
public static final String SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR =
|
||||||
|
"snapshottableDir";
|
||||||
|
public static final String SNAPSHOT_SECTION_DIR = "dir";
|
||||||
|
public static final String SNAPSHOT_SECTION_ROOT = "root";
|
||||||
|
public static final String SNAPSHOT_SECTION_SNAPSHOT = "snapshot";
|
||||||
|
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
private final PrintStream out;
|
private final PrintStream out;
|
||||||
private final SimpleDateFormat isoDateFormat;
|
private final SimpleDateFormat isoDateFormat;
|
||||||
@ -177,98 +325,106 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void dumpCacheManagerSection(InputStream is) throws IOException {
|
private void dumpCacheManagerSection(InputStream is) throws IOException {
|
||||||
out.print("<CacheManagerSection>");
|
out.print("<" + CACHE_MANAGER_SECTION_NAME + ">");
|
||||||
CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(is);
|
CacheManagerSection s = CacheManagerSection.parseDelimitedFrom(is);
|
||||||
o("nextDirectiveId", s.getNextDirectiveId());
|
o(CACHE_MANAGER_SECTION_NEXT_DIRECTIVE_ID, s.getNextDirectiveId());
|
||||||
o("numDirectives", s.getNumDirectives());
|
o(CACHE_MANAGER_SECTION_NUM_DIRECTIVES, s.getNumDirectives());
|
||||||
o("numPools", s.getNumPools());
|
o(CACHE_MANAGER_SECTION_NUM_POOLS, s.getNumPools());
|
||||||
for (int i = 0; i < s.getNumPools(); ++i) {
|
for (int i = 0; i < s.getNumPools(); ++i) {
|
||||||
CachePoolInfoProto p = CachePoolInfoProto.parseDelimitedFrom(is);
|
CachePoolInfoProto p = CachePoolInfoProto.parseDelimitedFrom(is);
|
||||||
out.print("<pool>");
|
out.print("<" + CACHE_MANAGER_SECTION_POOL +">");
|
||||||
o("poolName", p.getPoolName()).o("ownerName", p.getOwnerName())
|
o(CACHE_MANAGER_SECTION_POOL_NAME, p.getPoolName()).
|
||||||
.o("groupName", p.getGroupName()).o("mode", p.getMode())
|
o(CACHE_MANAGER_SECTION_OWNER_NAME, p.getOwnerName())
|
||||||
.o("limit", p.getLimit())
|
.o(CACHE_MANAGER_SECTION_GROUP_NAME, p.getGroupName())
|
||||||
.o("maxRelativeExpiry", p.getMaxRelativeExpiry());
|
.o(CACHE_MANAGER_SECTION_MODE, p.getMode())
|
||||||
out.print("</pool>\n");
|
.o(CACHE_MANAGER_SECTION_LIMIT, p.getLimit())
|
||||||
|
.o(CACHE_MANAGER_SECTION_MAX_RELATIVE_EXPIRY,
|
||||||
|
p.getMaxRelativeExpiry());
|
||||||
|
out.print("</" + CACHE_MANAGER_SECTION_POOL + ">\n");
|
||||||
}
|
}
|
||||||
for (int i = 0; i < s.getNumDirectives(); ++i) {
|
for (int i = 0; i < s.getNumDirectives(); ++i) {
|
||||||
CacheDirectiveInfoProto p = CacheDirectiveInfoProto
|
CacheDirectiveInfoProto p = CacheDirectiveInfoProto
|
||||||
.parseDelimitedFrom(is);
|
.parseDelimitedFrom(is);
|
||||||
out.print("<directive>");
|
out.print("<" + CACHE_MANAGER_SECTION_DIRECTIVE + ">");
|
||||||
o("id", p.getId()).o("path", p.getPath())
|
o(SECTION_ID, p.getId()).o(SECTION_PATH, p.getPath())
|
||||||
.o("replication", p.getReplication()).o("pool", p.getPool());
|
.o(SECTION_REPLICATION, p.getReplication())
|
||||||
out.print("<expiration>");
|
.o(CACHE_MANAGER_SECTION_POOL, p.getPool());
|
||||||
|
out.print("<" + CACHE_MANAGER_SECTION_EXPIRATION +">");
|
||||||
CacheDirectiveInfoExpirationProto e = p.getExpiration();
|
CacheDirectiveInfoExpirationProto e = p.getExpiration();
|
||||||
o("millis", e.getMillis()).o("relative", e.getIsRelative());
|
o(CACHE_MANAGER_SECTION_MILLIS, e.getMillis())
|
||||||
out.print("</expiration>\n");
|
.o(CACHE_MANAGER_SECTION_RELATIVE, e.getIsRelative());
|
||||||
out.print("</directive>\n");
|
out.print("</" + CACHE_MANAGER_SECTION_EXPIRATION+ ">\n");
|
||||||
|
out.print("</" + CACHE_MANAGER_SECTION_DIRECTIVE + ">\n");
|
||||||
}
|
}
|
||||||
out.print("</CacheManagerSection>\n");
|
out.print("</" + CACHE_MANAGER_SECTION_NAME + ">\n");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpFileUnderConstructionSection(InputStream in)
|
private void dumpFileUnderConstructionSection(InputStream in)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
out.print("<FileUnderConstructionSection>");
|
out.print("<" + FILE_UNDER_CONSTRUCTION_SECTION_NAME + ">");
|
||||||
while (true) {
|
while (true) {
|
||||||
FileUnderConstructionEntry e = FileUnderConstructionEntry
|
FileUnderConstructionEntry e = FileUnderConstructionEntry
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
if (e == null) {
|
if (e == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
out.print("<inode>");
|
out.print("<" + INODE_SECTION_INODE + ">");
|
||||||
o("id", e.getInodeId()).o("path", e.getFullPath());
|
o(SECTION_ID, e.getInodeId())
|
||||||
out.print("</inode>\n");
|
.o(SECTION_PATH, e.getFullPath());
|
||||||
|
out.print("</" + INODE_SECTION_INODE + ">\n");
|
||||||
}
|
}
|
||||||
out.print("</FileUnderConstructionSection>\n");
|
out.print("</" + FILE_UNDER_CONSTRUCTION_SECTION_NAME + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpXattrs(INodeSection.XAttrFeatureProto xattrs) {
|
private void dumpXattrs(INodeSection.XAttrFeatureProto xattrs) {
|
||||||
out.print("<xattrs>");
|
out.print("<" + INODE_SECTION_XATTRS + ">");
|
||||||
for (INodeSection.XAttrCompactProto xattr : xattrs.getXAttrsList()) {
|
for (INodeSection.XAttrCompactProto xattr : xattrs.getXAttrsList()) {
|
||||||
out.print("<xattr>");
|
out.print("<" + INODE_SECTION_XATTR + ">");
|
||||||
int encodedName = xattr.getName();
|
int encodedName = xattr.getName();
|
||||||
int ns = (XATTR_NAMESPACE_MASK & (encodedName >> XATTR_NAMESPACE_OFFSET)) |
|
int ns = (XATTR_NAMESPACE_MASK & (encodedName >> XATTR_NAMESPACE_OFFSET)) |
|
||||||
((XATTR_NAMESPACE_EXT_MASK & (encodedName >> XATTR_NAMESPACE_EXT_OFFSET)) << 2);
|
((XATTR_NAMESPACE_EXT_MASK & (encodedName >> XATTR_NAMESPACE_EXT_OFFSET)) << 2);
|
||||||
o("ns", XAttrProtos.XAttrProto.
|
o(INODE_SECTION_NS, XAttrProtos.XAttrProto.
|
||||||
XAttrNamespaceProto.valueOf(ns).toString());
|
XAttrNamespaceProto.valueOf(ns).toString());
|
||||||
o("name", stringTable[XATTR_NAME_MASK & (encodedName >> XATTR_NAME_OFFSET)]);
|
o(SECTION_NAME,
|
||||||
|
stringTable[XATTR_NAME_MASK & (encodedName >> XATTR_NAME_OFFSET)]);
|
||||||
ByteString val = xattr.getValue();
|
ByteString val = xattr.getValue();
|
||||||
if (val.isValidUtf8()) {
|
if (val.isValidUtf8()) {
|
||||||
o("val", val.toStringUtf8());
|
o(INODE_SECTION_VAL, val.toStringUtf8());
|
||||||
} else {
|
} else {
|
||||||
o("valHex", Hex.encodeHexString(val.toByteArray()));
|
o(INODE_SECTION_VAL_HEX, Hex.encodeHexString(val.toByteArray()));
|
||||||
}
|
}
|
||||||
out.print("</xattr>");
|
out.print("</" + INODE_SECTION_XATTR + ">");
|
||||||
}
|
}
|
||||||
out.print("</xattrs>");
|
out.print("</" + INODE_SECTION_XATTRS + ">");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeDirectory(INodeDirectory d) {
|
private void dumpINodeDirectory(INodeDirectory d) {
|
||||||
o("mtime", d.getModificationTime()).o("permission",
|
o(INODE_SECTION_MTIME, d.getModificationTime())
|
||||||
dumpPermission(d.getPermission()));
|
.o(INODE_SECTION_PERMISSION, dumpPermission(d.getPermission()));
|
||||||
if (d.hasXAttrs()) {
|
if (d.hasXAttrs()) {
|
||||||
dumpXattrs(d.getXAttrs());
|
dumpXattrs(d.getXAttrs());
|
||||||
}
|
}
|
||||||
dumpAcls(d.getAcl());
|
dumpAcls(d.getAcl());
|
||||||
if (d.hasDsQuota() && d.hasNsQuota()) {
|
if (d.hasDsQuota() && d.hasNsQuota()) {
|
||||||
o("nsquota", d.getNsQuota()).o("dsquota", d.getDsQuota());
|
o(INODE_SECTION_NS_QUOTA, d.getNsQuota())
|
||||||
|
.o(INODE_SECTION_DS_QUOTA, d.getDsQuota());
|
||||||
}
|
}
|
||||||
INodeSection.QuotaByStorageTypeFeatureProto typeQuotas =
|
INodeSection.QuotaByStorageTypeFeatureProto typeQuotas =
|
||||||
d.getTypeQuotas();
|
d.getTypeQuotas();
|
||||||
if (typeQuotas != null) {
|
if (typeQuotas != null) {
|
||||||
for (INodeSection.QuotaByStorageTypeEntryProto entry:
|
for (INodeSection.QuotaByStorageTypeEntryProto entry:
|
||||||
typeQuotas.getQuotasList()) {
|
typeQuotas.getQuotasList()) {
|
||||||
out.print("<typeQuota>");
|
out.print("<" + INODE_SECTION_TYPE_QUOTA + ">");
|
||||||
o("type", entry.getStorageType().toString());
|
o(INODE_SECTION_TYPE, entry.getStorageType().toString());
|
||||||
o("quota", entry.getQuota());
|
o(INODE_SECTION_QUOTA, entry.getQuota());
|
||||||
out.print("</typeQuota>");
|
out.print("</" + INODE_SECTION_TYPE_QUOTA + ">");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeDirectorySection(InputStream in) throws IOException {
|
private void dumpINodeDirectorySection(InputStream in) throws IOException {
|
||||||
out.print("<INodeDirectorySection>");
|
out.print("<" + INODE_DIRECTORY_SECTION_NAME + ">");
|
||||||
while (true) {
|
while (true) {
|
||||||
INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
|
INodeDirectorySection.DirEntry e = INodeDirectorySection.DirEntry
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
@ -276,21 +432,21 @@ private void dumpINodeDirectorySection(InputStream in) throws IOException {
|
|||||||
if (e == null) {
|
if (e == null) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
out.print("<directory>");
|
out.print("<" + INODE_DIRECTORY_SECTION_DIRECTORY + ">");
|
||||||
o("parent", e.getParent());
|
o(INODE_DIRECTORY_SECTION_PARENT, e.getParent());
|
||||||
for (long id : e.getChildrenList()) {
|
for (long id : e.getChildrenList()) {
|
||||||
o("child", id);
|
o(INODE_DIRECTORY_SECTION_CHILD, id);
|
||||||
}
|
}
|
||||||
for (int refId : e.getRefChildrenList()) {
|
for (int refId : e.getRefChildrenList()) {
|
||||||
o("refChild", refId);
|
o(INODE_DIRECTORY_SECTION_REF_CHILD, refId);
|
||||||
}
|
}
|
||||||
out.print("</directory>\n");
|
out.print("</" + INODE_DIRECTORY_SECTION_DIRECTORY + ">\n");
|
||||||
}
|
}
|
||||||
out.print("</INodeDirectorySection>\n");
|
out.print("</" + INODE_DIRECTORY_SECTION_NAME + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeReferenceSection(InputStream in) throws IOException {
|
private void dumpINodeReferenceSection(InputStream in) throws IOException {
|
||||||
out.print("<INodeReferenceSection>");
|
out.print("<" + INODE_REFERENCE_SECTION_NAME + ">");
|
||||||
while (true) {
|
while (true) {
|
||||||
INodeReferenceSection.INodeReference e = INodeReferenceSection
|
INodeReferenceSection.INodeReference e = INodeReferenceSection
|
||||||
.INodeReference.parseDelimitedFrom(in);
|
.INodeReference.parseDelimitedFrom(in);
|
||||||
@ -299,49 +455,53 @@ private void dumpINodeReferenceSection(InputStream in) throws IOException {
|
|||||||
}
|
}
|
||||||
dumpINodeReference(e);
|
dumpINodeReference(e);
|
||||||
}
|
}
|
||||||
out.print("</INodeReferenceSection>");
|
out.print("</" + INODE_REFERENCE_SECTION_NAME + ">");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeReference(INodeReferenceSection.INodeReference r) {
|
private void dumpINodeReference(INodeReferenceSection.INodeReference r) {
|
||||||
out.print("<ref>");
|
out.print("<" + INODE_REFERENCE_SECTION_REF + ">");
|
||||||
o("referredId", r.getReferredId()).o("name", r.getName().toStringUtf8())
|
o(INODE_REFERENCE_SECTION_REFERRED_ID, r.getReferredId())
|
||||||
.o("dstSnapshotId", r.getDstSnapshotId())
|
.o(SECTION_NAME, r.getName().toStringUtf8())
|
||||||
.o("lastSnapshotId", r.getLastSnapshotId());
|
.o(INODE_REFERENCE_SECTION_DST_SNAPSHOT_ID, r.getDstSnapshotId())
|
||||||
out.print("</ref>\n");
|
.o(INODE_REFERENCE_SECTION_LAST_SNAPSHOT_ID,
|
||||||
|
r.getLastSnapshotId());
|
||||||
|
out.print("</" + INODE_REFERENCE_SECTION_REF + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeFile(INodeSection.INodeFile f) {
|
private void dumpINodeFile(INodeSection.INodeFile f) {
|
||||||
o("replication", f.getReplication()).o("mtime", f.getModificationTime())
|
o(SECTION_REPLICATION, f.getReplication())
|
||||||
.o("atime", f.getAccessTime())
|
.o(INODE_SECTION_MTIME, f.getModificationTime())
|
||||||
.o("preferredBlockSize", f.getPreferredBlockSize())
|
.o(INODE_SECTION_ATIME, f.getAccessTime())
|
||||||
.o("permission", dumpPermission(f.getPermission()));
|
.o(INODE_SECTION_PREFERRED_BLOCK_SIZE, f.getPreferredBlockSize())
|
||||||
|
.o(INODE_SECTION_PERMISSION, dumpPermission(f.getPermission()));
|
||||||
if (f.hasXAttrs()) {
|
if (f.hasXAttrs()) {
|
||||||
dumpXattrs(f.getXAttrs());
|
dumpXattrs(f.getXAttrs());
|
||||||
}
|
}
|
||||||
dumpAcls(f.getAcl());
|
dumpAcls(f.getAcl());
|
||||||
if (f.getBlocksCount() > 0) {
|
if (f.getBlocksCount() > 0) {
|
||||||
out.print("<blocks>");
|
out.print("<" + INODE_SECTION_BLOCKS + ">");
|
||||||
for (BlockProto b : f.getBlocksList()) {
|
for (BlockProto b : f.getBlocksList()) {
|
||||||
out.print("<block>");
|
out.print("<" + INODE_SECTION_BLOCK + ">");
|
||||||
o("id", b.getBlockId()).o("genstamp", b.getGenStamp()).o("numBytes",
|
o(SECTION_ID, b.getBlockId())
|
||||||
b.getNumBytes());
|
.o(INODE_SECTION_GEMSTAMP, b.getGenStamp())
|
||||||
out.print("</block>\n");
|
.o(INODE_SECTION_NUM_BYTES, b.getNumBytes());
|
||||||
|
out.print("</" + INODE_SECTION_BLOCK + ">\n");
|
||||||
}
|
}
|
||||||
out.print("</blocks>\n");
|
out.print("</" + INODE_SECTION_BLOCKS + ">\n");
|
||||||
}
|
}
|
||||||
if (f.hasStoragePolicyID()) {
|
if (f.hasStoragePolicyID()) {
|
||||||
o("storagePolicyId", f.getStoragePolicyID());
|
o(INODE_SECTION_STORAGE_POLICY_ID, f.getStoragePolicyID());
|
||||||
}
|
}
|
||||||
if (f.getIsStriped()) {
|
if (f.getIsStriped()) {
|
||||||
out.print("<isStriped/>");
|
out.print("<" + INODE_SECTION_IS_STRIPED + "/>");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f.hasFileUC()) {
|
if (f.hasFileUC()) {
|
||||||
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
|
INodeSection.FileUnderConstructionFeature u = f.getFileUC();
|
||||||
out.print("<file-under-construction>");
|
out.print("<" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">");
|
||||||
o("clientName", u.getClientName()).o("clientMachine",
|
o(INODE_SECTION_CLIENT_NAME, u.getClientName())
|
||||||
u.getClientMachine());
|
.o(INODE_SECTION_CLIENT_MACHINE, u.getClientMachine());
|
||||||
out.print("</file-under-construction>\n");
|
out.print("</" + INODE_SECTION_FILE_UNDER_CONSTRUCTION + ">\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,31 +509,31 @@ private void dumpAcls(AclFeatureProto aclFeatureProto) {
|
|||||||
ImmutableList<AclEntry> aclEntryList = FSImageFormatPBINode.Loader
|
ImmutableList<AclEntry> aclEntryList = FSImageFormatPBINode.Loader
|
||||||
.loadAclEntries(aclFeatureProto, stringTable);
|
.loadAclEntries(aclFeatureProto, stringTable);
|
||||||
if (aclEntryList.size() > 0) {
|
if (aclEntryList.size() > 0) {
|
||||||
out.print("<acls>");
|
out.print("<" + INODE_SECTION_ACLS + ">");
|
||||||
for (AclEntry aclEntry : aclEntryList) {
|
for (AclEntry aclEntry : aclEntryList) {
|
||||||
o("acl", aclEntry.toString());
|
o(INODE_SECTION_ACL, aclEntry.toString());
|
||||||
}
|
}
|
||||||
out.print("</acls>");
|
out.print("</" + INODE_SECTION_ACLS + ">");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeSection(InputStream in) throws IOException {
|
private void dumpINodeSection(InputStream in) throws IOException {
|
||||||
INodeSection s = INodeSection.parseDelimitedFrom(in);
|
INodeSection s = INodeSection.parseDelimitedFrom(in);
|
||||||
out.print("<INodeSection>");
|
out.print("<" + INODE_SECTION_NAME + ">");
|
||||||
o("lastInodeId", s.getLastInodeId());
|
o(INODE_SECTION_LAST_INODE_ID, s.getLastInodeId());
|
||||||
o("numInodes", s.getNumInodes());
|
o(INODE_SECTION_NUM_INODES, s.getNumInodes());
|
||||||
for (int i = 0; i < s.getNumInodes(); ++i) {
|
for (int i = 0; i < s.getNumInodes(); ++i) {
|
||||||
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
|
INodeSection.INode p = INodeSection.INode.parseDelimitedFrom(in);
|
||||||
out.print("<inode>");
|
out.print("<" + INODE_SECTION_INODE + ">");
|
||||||
dumpINodeFields(p);
|
dumpINodeFields(p);
|
||||||
out.print("</inode>\n");
|
out.print("</" + INODE_SECTION_INODE + ">\n");
|
||||||
}
|
}
|
||||||
out.print("</INodeSection>\n");
|
out.print("</" + INODE_SECTION_NAME + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeFields(INodeSection.INode p) {
|
private void dumpINodeFields(INodeSection.INode p) {
|
||||||
o("id", p.getId()).o("type", p.getType()).o("name",
|
o(SECTION_ID, p.getId()).o(INODE_SECTION_TYPE, p.getType())
|
||||||
p.getName().toStringUtf8());
|
.o(SECTION_NAME, p.getName().toStringUtf8());
|
||||||
if (p.hasFile()) {
|
if (p.hasFile()) {
|
||||||
dumpINodeFile(p.getFile());
|
dumpINodeFile(p.getFile());
|
||||||
} else if (p.hasDirectory()) {
|
} else if (p.hasDirectory()) {
|
||||||
@ -384,20 +544,23 @@ private void dumpINodeFields(INodeSection.INode p) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void dumpINodeSymlink(INodeSymlink s) {
|
private void dumpINodeSymlink(INodeSymlink s) {
|
||||||
o("permission", dumpPermission(s.getPermission()))
|
o(INODE_SECTION_PERMISSION, dumpPermission(s.getPermission()))
|
||||||
.o("target", s.getTarget().toStringUtf8())
|
.o(INODE_SECTION_TARGET, s.getTarget().toStringUtf8())
|
||||||
.o("mtime", s.getModificationTime()).o("atime", s.getAccessTime());
|
.o(INODE_SECTION_MTIME, s.getModificationTime())
|
||||||
|
.o(INODE_SECTION_ATIME, s.getAccessTime());
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpNameSection(InputStream in) throws IOException {
|
private void dumpNameSection(InputStream in) throws IOException {
|
||||||
NameSystemSection s = NameSystemSection.parseDelimitedFrom(in);
|
NameSystemSection s = NameSystemSection.parseDelimitedFrom(in);
|
||||||
out.print("<NameSection>");
|
out.print("<" + NAME_SECTION_NAME + ">");
|
||||||
o("namespaceId", s.getNamespaceId());
|
o(NAME_SECTION_NAMESPACE_ID, s.getNamespaceId());
|
||||||
o("genstampV1", s.getGenstampV1()).o("genstampV2", s.getGenstampV2())
|
o(NAME_SECTION_GENSTAMPV1, s.getGenstampV1())
|
||||||
.o("genstampV1Limit", s.getGenstampV1Limit())
|
.o(NAME_SECTION_GENSTAMPV2, s.getGenstampV2())
|
||||||
.o("lastAllocatedBlockId", s.getLastAllocatedBlockId())
|
.o(NAME_SECTION_GENSTAMPV1_LIMIT, s.getGenstampV1Limit())
|
||||||
.o("txid", s.getTransactionId());
|
.o(NAME_SECTION_LAST_ALLOCATED_BLOCK_ID,
|
||||||
out.print("</NameSection>\n");
|
s.getLastAllocatedBlockId())
|
||||||
|
.o(NAME_SECTION_TXID, s.getTransactionId());
|
||||||
|
out.print("</" + NAME_SECTION_NAME + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private String dumpPermission(long permission) {
|
private String dumpPermission(long permission) {
|
||||||
@ -408,59 +571,63 @@ private String dumpPermission(long permission) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void dumpSecretManagerSection(InputStream is) throws IOException {
|
private void dumpSecretManagerSection(InputStream is) throws IOException {
|
||||||
out.print("<SecretManagerSection>");
|
out.print("<" + SECRET_MANAGER_SECTION_NAME + ">");
|
||||||
SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(is);
|
SecretManagerSection s = SecretManagerSection.parseDelimitedFrom(is);
|
||||||
int expectedNumDelegationKeys = s.getNumKeys();
|
int expectedNumDelegationKeys = s.getNumKeys();
|
||||||
int expectedNumTokens = s.getNumTokens();
|
int expectedNumTokens = s.getNumTokens();
|
||||||
o("currentId", s.getCurrentId()).o("tokenSequenceNumber",
|
o(SECRET_MANAGER_SECTION_CURRENT_ID, s.getCurrentId())
|
||||||
s.getTokenSequenceNumber()).
|
.o(SECRET_MANAGER_SECTION_TOKEN_SEQUENCE_NUMBER,
|
||||||
o("numDelegationKeys", expectedNumDelegationKeys).
|
s.getTokenSequenceNumber()).
|
||||||
o("numTokens", expectedNumTokens);
|
o(SECRET_MANAGER_SECTION_NUM_DELEGATION_KEYS,
|
||||||
|
expectedNumDelegationKeys).
|
||||||
|
o(SECRET_MANAGER_SECTION_NUM_TOKENS, expectedNumTokens);
|
||||||
for (int i = 0; i < expectedNumDelegationKeys; i++) {
|
for (int i = 0; i < expectedNumDelegationKeys; i++) {
|
||||||
SecretManagerSection.DelegationKey dkey =
|
SecretManagerSection.DelegationKey dkey =
|
||||||
SecretManagerSection.DelegationKey.parseDelimitedFrom(is);
|
SecretManagerSection.DelegationKey.parseDelimitedFrom(is);
|
||||||
out.print("<delegationKey>");
|
out.print("<" + SECRET_MANAGER_SECTION_DELEGATION_KEY + ">");
|
||||||
o("id", dkey.getId());
|
o(SECTION_ID, dkey.getId());
|
||||||
o("key", Hex.encodeHexString(dkey.getKey().toByteArray()));
|
o(SECRET_MANAGER_SECTION_KEY,
|
||||||
|
Hex.encodeHexString(dkey.getKey().toByteArray()));
|
||||||
if (dkey.hasExpiryDate()) {
|
if (dkey.hasExpiryDate()) {
|
||||||
dumpDate("expiry", dkey.getExpiryDate());
|
dumpDate(SECRET_MANAGER_SECTION_EXPIRY, dkey.getExpiryDate());
|
||||||
}
|
}
|
||||||
out.print("</delegationKey>");
|
out.print("</" + SECRET_MANAGER_SECTION_DELEGATION_KEY + ">");
|
||||||
}
|
}
|
||||||
for (int i = 0; i < expectedNumTokens; i++) {
|
for (int i = 0; i < expectedNumTokens; i++) {
|
||||||
SecretManagerSection.PersistToken token =
|
SecretManagerSection.PersistToken token =
|
||||||
SecretManagerSection.PersistToken.parseDelimitedFrom(is);
|
SecretManagerSection.PersistToken.parseDelimitedFrom(is);
|
||||||
out.print("<token>");
|
out.print("<" + SECRET_MANAGER_SECTION_TOKEN + ">");
|
||||||
if (token.hasVersion()) {
|
if (token.hasVersion()) {
|
||||||
o("version", token.getVersion());
|
o(SECRET_MANAGER_SECTION_VERSION, token.getVersion());
|
||||||
}
|
}
|
||||||
if (token.hasOwner()) {
|
if (token.hasOwner()) {
|
||||||
o("owner", token.getOwner());
|
o(SECRET_MANAGER_SECTION_OWNER, token.getOwner());
|
||||||
}
|
}
|
||||||
if (token.hasRenewer()) {
|
if (token.hasRenewer()) {
|
||||||
o("renewer", token.getRenewer());
|
o(SECRET_MANAGER_SECTION_RENEWER, token.getRenewer());
|
||||||
}
|
}
|
||||||
if (token.hasRealUser()) {
|
if (token.hasRealUser()) {
|
||||||
o("realUser", token.getRealUser());
|
o(SECRET_MANAGER_SECTION_REAL_USER, token.getRealUser());
|
||||||
}
|
}
|
||||||
if (token.hasIssueDate()) {
|
if (token.hasIssueDate()) {
|
||||||
dumpDate("issueDate", token.getIssueDate());
|
dumpDate(SECRET_MANAGER_SECTION_ISSUE_DATE, token.getIssueDate());
|
||||||
}
|
}
|
||||||
if (token.hasMaxDate()) {
|
if (token.hasMaxDate()) {
|
||||||
dumpDate("maxDate", token.getMaxDate());
|
dumpDate(SECRET_MANAGER_SECTION_MAX_DATE, token.getMaxDate());
|
||||||
}
|
}
|
||||||
if (token.hasSequenceNumber()) {
|
if (token.hasSequenceNumber()) {
|
||||||
o("sequenceNumber", token.getSequenceNumber());
|
o(SECRET_MANAGER_SECTION_SEQUENCE_NUMBER,
|
||||||
|
token.getSequenceNumber());
|
||||||
}
|
}
|
||||||
if (token.hasMasterKeyId()) {
|
if (token.hasMasterKeyId()) {
|
||||||
o("masterKeyId", token.getMasterKeyId());
|
o(SECRET_MANAGER_SECTION_MASTER_KEY_ID, token.getMasterKeyId());
|
||||||
}
|
}
|
||||||
if (token.hasExpiryDate()) {
|
if (token.hasExpiryDate()) {
|
||||||
dumpDate("expiryDate", token.getExpiryDate());
|
dumpDate(SECRET_MANAGER_SECTION_EXPIRY_DATE, token.getExpiryDate());
|
||||||
}
|
}
|
||||||
out.print("</token>");
|
out.print("</" + SECRET_MANAGER_SECTION_TOKEN + ">");
|
||||||
}
|
}
|
||||||
out.print("</SecretManagerSection>");
|
out.print("</" + SECRET_MANAGER_SECTION_NAME + ">");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpDate(String tag, long date) {
|
private void dumpDate(String tag, long date) {
|
||||||
@ -469,7 +636,7 @@ private void dumpDate(String tag, long date) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void dumpSnapshotDiffSection(InputStream in) throws IOException {
|
private void dumpSnapshotDiffSection(InputStream in) throws IOException {
|
||||||
out.print("<SnapshotDiffSection>");
|
out.print("<" + SNAPSHOT_DIFF_SECTION_NAME + ">");
|
||||||
while (true) {
|
while (true) {
|
||||||
SnapshotDiffSection.DiffEntry e = SnapshotDiffSection.DiffEntry
|
SnapshotDiffSection.DiffEntry e = SnapshotDiffSection.DiffEntry
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
@ -478,52 +645,54 @@ private void dumpSnapshotDiffSection(InputStream in) throws IOException {
|
|||||||
}
|
}
|
||||||
switch (e.getType()) {
|
switch (e.getType()) {
|
||||||
case FILEDIFF:
|
case FILEDIFF:
|
||||||
out.print("<fileDiffEntry>");
|
out.print("<" + SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY + ">");
|
||||||
break;
|
break;
|
||||||
case DIRECTORYDIFF:
|
case DIRECTORYDIFF:
|
||||||
out.print("<dirDiffEntry>");
|
out.print("<" + SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY + ">");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("unknown DiffEntry type " + e.getType());
|
throw new IOException("unknown DiffEntry type " + e.getType());
|
||||||
}
|
}
|
||||||
o("inodeId", e.getInodeId());
|
o(SNAPSHOT_DIFF_SECTION_INODE_ID, e.getInodeId());
|
||||||
o("count", e.getNumOfDiff());
|
o(SNAPSHOT_DIFF_SECTION_COUNT, e.getNumOfDiff());
|
||||||
switch (e.getType()) {
|
switch (e.getType()) {
|
||||||
case FILEDIFF: {
|
case FILEDIFF: {
|
||||||
for (int i = 0; i < e.getNumOfDiff(); ++i) {
|
for (int i = 0; i < e.getNumOfDiff(); ++i) {
|
||||||
out.print("<fileDiff>");
|
out.print("<" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">");
|
||||||
SnapshotDiffSection.FileDiff f = SnapshotDiffSection.FileDiff
|
SnapshotDiffSection.FileDiff f = SnapshotDiffSection.FileDiff
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
o("snapshotId", f.getSnapshotId()).o("size", f.getFileSize()).o(
|
o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, f.getSnapshotId())
|
||||||
"name", f.getName().toStringUtf8());
|
.o(SNAPSHOT_DIFF_SECTION_SIZE, f.getFileSize())
|
||||||
out.print("</fileDiff>\n");
|
.o(SECTION_NAME, f.getName().toStringUtf8());
|
||||||
|
out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case DIRECTORYDIFF: {
|
case DIRECTORYDIFF: {
|
||||||
for (int i = 0; i < e.getNumOfDiff(); ++i) {
|
for (int i = 0; i < e.getNumOfDiff(); ++i) {
|
||||||
out.print("<dirDiff>");
|
out.print("<" + SNAPSHOT_DIFF_SECTION_DIR_DIFF + ">");
|
||||||
SnapshotDiffSection.DirectoryDiff d = SnapshotDiffSection.DirectoryDiff
|
SnapshotDiffSection.DirectoryDiff d = SnapshotDiffSection.DirectoryDiff
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
o("snapshotId", d.getSnapshotId())
|
o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, d.getSnapshotId())
|
||||||
.o("childrenSize", d.getChildrenSize())
|
.o(SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE, d.getChildrenSize())
|
||||||
.o("isSnapshotRoot", d.getIsSnapshotRoot())
|
.o(SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT, d.getIsSnapshotRoot())
|
||||||
.o("name", d.getName().toStringUtf8())
|
.o(SECTION_NAME, d.getName().toStringUtf8())
|
||||||
.o("createdListSize", d.getCreatedListSize());
|
.o(SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE,
|
||||||
|
d.getCreatedListSize());
|
||||||
for (long did : d.getDeletedINodeList()) {
|
for (long did : d.getDeletedINodeList()) {
|
||||||
o("deletedInode", did);
|
o(SNAPSHOT_DIFF_SECTION_DELETED_INODE, did);
|
||||||
}
|
}
|
||||||
for (int dRefid : d.getDeletedINodeRefList()) {
|
for (int dRefid : d.getDeletedINodeRefList()) {
|
||||||
o("deletedInoderef", dRefid);
|
o(SNAPSHOT_DIFF_SECTION_DELETED_INODE_REF, dRefid);
|
||||||
}
|
}
|
||||||
for (int j = 0; j < d.getCreatedListSize(); ++j) {
|
for (int j = 0; j < d.getCreatedListSize(); ++j) {
|
||||||
SnapshotDiffSection.CreatedListEntry ce = SnapshotDiffSection.CreatedListEntry
|
SnapshotDiffSection.CreatedListEntry ce = SnapshotDiffSection.CreatedListEntry
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
out.print("<created>");
|
out.print("<" + SNAPSHOT_DIFF_SECTION_CREATED + ">");
|
||||||
o("name", ce.getName().toStringUtf8());
|
o(SECTION_NAME, ce.getName().toStringUtf8());
|
||||||
out.print("</created>\n");
|
out.print("</" + SNAPSHOT_DIFF_SECTION_CREATED + ">\n");
|
||||||
}
|
}
|
||||||
out.print("</dirDiff>\n");
|
out.print("</" + SNAPSHOT_DIFF_SECTION_DIR_DIFF + ">\n");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -532,41 +701,41 @@ private void dumpSnapshotDiffSection(InputStream in) throws IOException {
|
|||||||
}
|
}
|
||||||
switch (e.getType()) {
|
switch (e.getType()) {
|
||||||
case FILEDIFF:
|
case FILEDIFF:
|
||||||
out.print("</fileDiffEntry>");
|
out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF_ENTRY + ">");
|
||||||
break;
|
break;
|
||||||
case DIRECTORYDIFF:
|
case DIRECTORYDIFF:
|
||||||
out.print("</dirDiffEntry>");
|
out.print("</" + SNAPSHOT_DIFF_SECTION_DIR_DIFF_ENTRY + ">");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new IOException("unknown DiffEntry type " + e.getType());
|
throw new IOException("unknown DiffEntry type " + e.getType());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
out.print("</SnapshotDiffSection>\n");
|
out.print("</" + SNAPSHOT_DIFF_SECTION_NAME + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void dumpSnapshotSection(InputStream in) throws IOException {
|
private void dumpSnapshotSection(InputStream in) throws IOException {
|
||||||
out.print("<SnapshotSection>");
|
out.print("<" + SNAPSHOT_SECTION_NAME + ">");
|
||||||
SnapshotSection s = SnapshotSection.parseDelimitedFrom(in);
|
SnapshotSection s = SnapshotSection.parseDelimitedFrom(in);
|
||||||
o("snapshotCounter", s.getSnapshotCounter());
|
o(SNAPSHOT_SECTION_SNAPSHOT_COUNTER, s.getSnapshotCounter());
|
||||||
o("numSnapshots", s.getNumSnapshots());
|
o(SNAPSHOT_SECTION_NUM_SNAPSHOTS, s.getNumSnapshots());
|
||||||
if (s.getSnapshottableDirCount() > 0) {
|
if (s.getSnapshottableDirCount() > 0) {
|
||||||
out.print("<snapshottableDir>");
|
out.print("<" + SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR + ">");
|
||||||
for (long id : s.getSnapshottableDirList()) {
|
for (long id : s.getSnapshottableDirList()) {
|
||||||
o("dir", id);
|
o(SNAPSHOT_SECTION_DIR, id);
|
||||||
}
|
}
|
||||||
out.print("</snapshottableDir>\n");
|
out.print("</" + SNAPSHOT_SECTION_SNAPSHOT_TABLE_DIR + ">\n");
|
||||||
}
|
}
|
||||||
for (int i = 0; i < s.getNumSnapshots(); ++i) {
|
for (int i = 0; i < s.getNumSnapshots(); ++i) {
|
||||||
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
|
SnapshotSection.Snapshot pbs = SnapshotSection.Snapshot
|
||||||
.parseDelimitedFrom(in);
|
.parseDelimitedFrom(in);
|
||||||
out.print("<snapshot>");
|
out.print("<" + SNAPSHOT_SECTION_SNAPSHOT + ">");
|
||||||
o("id", pbs.getSnapshotId());
|
o(SECTION_ID, pbs.getSnapshotId());
|
||||||
out.print("<root>");
|
out.print("<" + SNAPSHOT_SECTION_ROOT + ">");
|
||||||
dumpINodeFields(pbs.getRoot());
|
dumpINodeFields(pbs.getRoot());
|
||||||
out.print("</root>");
|
out.print("</" + SNAPSHOT_SECTION_ROOT + ">");
|
||||||
out.print("</snapshot>");
|
out.print("</" + SNAPSHOT_SECTION_SNAPSHOT + ">");
|
||||||
}
|
}
|
||||||
out.print("</SnapshotSection>\n");
|
out.print("</" + SNAPSHOT_SECTION_NAME + ">\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void loadStringTable(InputStream in) throws IOException {
|
private void loadStringTable(InputStream in) throws IOException {
|
||||||
|
@ -2606,6 +2606,24 @@
|
|||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.namenode.edekcacheloader.interval.ms</name>
|
||||||
|
<value>1000</value>
|
||||||
|
<description>When KeyProvider is configured, the interval time of warming
|
||||||
|
up edek cache on NN starts up / becomes active. All edeks will be loaded
|
||||||
|
from KMS into provider cache. The edek cache loader will try to warm up the
|
||||||
|
cache until succeed or NN leaves active state.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.namenode.edekcacheloader.initial.delay.ms</name>
|
||||||
|
<value>3000</value>
|
||||||
|
<description>When KeyProvider is configured, the time delayed until the first
|
||||||
|
attempt to warm up edek cache on NN start up / become active.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.namenode.inotify.max.events.per.rpc</name>
|
<name>dfs.namenode.inotify.max.events.per.rpc</name>
|
||||||
<value>1000</value>
|
<value>1000</value>
|
||||||
@ -3004,4 +3022,18 @@
|
|||||||
retries or failovers for WebHDFS client.
|
retries or failovers for WebHDFS client.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>dfs.namenode.hosts.provider.classname</name>
|
||||||
|
<value>org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager</value>
|
||||||
|
<description>
|
||||||
|
The class that provides access for host files.
|
||||||
|
org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager is used
|
||||||
|
by default which loads files specified by dfs.hosts and dfs.hosts.exclude.
|
||||||
|
If org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager is
|
||||||
|
used, it will load the JSON file defined in dfs.hosts.
|
||||||
|
To change class name, nn restart is required. "dfsadmin -refreshNodes" only
|
||||||
|
refreshes the configuration files used by the class.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
@ -106,7 +106,8 @@
|
|||||||
</div>
|
</div>
|
||||||
{/if}
|
{/if}
|
||||||
|
|
||||||
{@if cond="{NumberOfMissingBlocks} > 0"}
|
{@eq key=nnstat.State value="active"}
|
||||||
|
{@if cond="{NumberOfMissingBlocks} > 0"}
|
||||||
<div class="alert alert-dismissable alert-warning">
|
<div class="alert alert-dismissable alert-warning">
|
||||||
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
|
<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>
|
||||||
|
|
||||||
@ -119,7 +120,8 @@
|
|||||||
</div>
|
</div>
|
||||||
<p>Please check the logs or run fsck in order to identify the missing blocks.{@if cond="{NumberOfSnapshottableDirs} > 0"} Please run fsck with -includeSnapshots in order to see detailed reports about snapshots.{/if} See the Hadoop FAQ for common causes and potential solutions.</p>
|
<p>Please check the logs or run fsck in order to identify the missing blocks.{@if cond="{NumberOfSnapshottableDirs} > 0"} Please run fsck with -includeSnapshots in order to see detailed reports about snapshots.{/if} See the Hadoop FAQ for common causes and potential solutions.</p>
|
||||||
</div>
|
</div>
|
||||||
{/if}
|
{/if}
|
||||||
|
{/eq}
|
||||||
{/nn}
|
{/nn}
|
||||||
|
|
||||||
<div class="page-header"><h1>Overview {#nnstat}<small>'{HostAndPort}' ({State})</small>{/nnstat}</h1></div>
|
<div class="page-header"><h1>Overview {#nnstat}<small>'{HostAndPort}' ({State})</small>{/nnstat}</h1></div>
|
||||||
@ -173,10 +175,13 @@
|
|||||||
<tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
|
<tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
|
||||||
<tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
|
<tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
|
||||||
<tr><th><a href="#tab-datanode-volume-failures">Total Datanode Volume Failures</a></th><td>{VolumeFailuresTotal} ({EstimatedCapacityLostTotal|fmt_bytes})</td></tr>
|
<tr><th><a href="#tab-datanode-volume-failures">Total Datanode Volume Failures</a></th><td>{VolumeFailuresTotal} ({EstimatedCapacityLostTotal|fmt_bytes})</td></tr>
|
||||||
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
|
{@eq key=nnstat.State value="active"}
|
||||||
<tr><th>Number of Blocks Pending Deletion</th><td>{PendingDeletionBlocks}</td></tr>
|
<tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
|
||||||
|
<tr><th>Number of Blocks Pending Deletion</th><td>{PendingDeletionBlocks}</td></tr>
|
||||||
|
{/eq}
|
||||||
<tr><th>Block Deletion Start Time</th><td>{BlockDeletionStartTime|date_tostring}</td></tr>
|
<tr><th>Block Deletion Start Time</th><td>{BlockDeletionStartTime|date_tostring}</td></tr>
|
||||||
{/fs}
|
{/fs}
|
||||||
|
|
||||||
</table>
|
</table>
|
||||||
|
|
||||||
<div class="page-header"><h1>NameNode Journal Status</h1></div>
|
<div class="page-header"><h1>NameNode Journal Status</h1></div>
|
||||||
|
@ -142,12 +142,16 @@ The `bin/hdfs dfsadmin` command supports a few HDFS administration related opera
|
|||||||
during last upgrade.
|
during last upgrade.
|
||||||
|
|
||||||
* `-refreshNodes`: Updates the namenode with the set of datanodes
|
* `-refreshNodes`: Updates the namenode with the set of datanodes
|
||||||
allowed to connect to the namenode. Namenodes re-read datanode
|
allowed to connect to the namenode. By default, Namenodes re-read datanode
|
||||||
hostnames in the file defined by `dfs.hosts`, `dfs.hosts.exclude`
|
hostnames in the file defined by `dfs.hosts`, `dfs.hosts.exclude`
|
||||||
Hosts defined in `dfs.hosts` are the datanodes that are part of the
|
Hosts defined in `dfs.hosts` are the datanodes that are part of the
|
||||||
cluster. If there are entries in `dfs.hosts`, only the hosts in it
|
cluster. If there are entries in `dfs.hosts`, only the hosts in it
|
||||||
are allowed to register with the namenode. Entries in
|
are allowed to register with the namenode. Entries in
|
||||||
`dfs.hosts.exclude` are datanodes that need to be decommissioned.
|
`dfs.hosts.exclude` are datanodes that need to be decommissioned.
|
||||||
|
Alternatively if `dfs.namenode.hosts.provider.classname` is set to
|
||||||
|
`org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager`,
|
||||||
|
all include and exclude hosts are specified in the JSON file defined by
|
||||||
|
`dfs.hosts`.
|
||||||
Datanodes complete decommissioning when all the replicas from them
|
Datanodes complete decommissioning when all the replicas from them
|
||||||
are replicated to other datanodes. Decommissioned nodes are not
|
are replicated to other datanodes. Decommissioned nodes are not
|
||||||
automatically shutdown and are not chosen for writing for new
|
automatically shutdown and are not chosen for writing for new
|
||||||
|
@ -65,7 +65,6 @@ WebHDFS REST API
|
|||||||
* [Rename Snapshot](#Rename_Snapshot)
|
* [Rename Snapshot](#Rename_Snapshot)
|
||||||
* [Delegation Token Operations](#Delegation_Token_Operations)
|
* [Delegation Token Operations](#Delegation_Token_Operations)
|
||||||
* [Get Delegation Token](#Get_Delegation_Token)
|
* [Get Delegation Token](#Get_Delegation_Token)
|
||||||
* [Get Delegation Tokens](#Get_Delegation_Tokens)
|
|
||||||
* [Renew Delegation Token](#Renew_Delegation_Token)
|
* [Renew Delegation Token](#Renew_Delegation_Token)
|
||||||
* [Cancel Delegation Token](#Cancel_Delegation_Token)
|
* [Cancel Delegation Token](#Cancel_Delegation_Token)
|
||||||
* [Error Responses](#Error_Responses)
|
* [Error Responses](#Error_Responses)
|
||||||
@ -89,7 +88,6 @@ WebHDFS REST API
|
|||||||
* [RemoteException JSON Schema](#RemoteException_JSON_Schema)
|
* [RemoteException JSON Schema](#RemoteException_JSON_Schema)
|
||||||
* [Token JSON Schema](#Token_JSON_Schema)
|
* [Token JSON Schema](#Token_JSON_Schema)
|
||||||
* [Token Properties](#Token_Properties)
|
* [Token Properties](#Token_Properties)
|
||||||
* [Tokens JSON Schema](#Tokens_JSON_Schema)
|
|
||||||
* [HTTP Query Parameter Dictionary](#HTTP_Query_Parameter_Dictionary)
|
* [HTTP Query Parameter Dictionary](#HTTP_Query_Parameter_Dictionary)
|
||||||
* [ACL Spec](#ACL_Spec)
|
* [ACL Spec](#ACL_Spec)
|
||||||
* [XAttr Name](#XAttr_Name)
|
* [XAttr Name](#XAttr_Name)
|
||||||
@ -148,7 +146,6 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
|
|||||||
* [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum)
|
* [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum)
|
||||||
* [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory)
|
* [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory)
|
||||||
* [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken)
|
* [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken)
|
||||||
* [`GETDELEGATIONTOKENS`](#Get_Delegation_Tokens) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationTokens)
|
|
||||||
* [`GETXATTRS`](#Get_an_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttr)
|
* [`GETXATTRS`](#Get_an_XAttr) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttr)
|
||||||
* [`GETXATTRS`](#Get_multiple_XAttrs) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs)
|
* [`GETXATTRS`](#Get_multiple_XAttrs) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs)
|
||||||
* [`GETXATTRS`](#Get_all_XAttrs) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs)
|
* [`GETXATTRS`](#Get_all_XAttrs) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getXAttrs)
|
||||||
@ -1033,32 +1030,6 @@ Delegation Token Operations
|
|||||||
|
|
||||||
See also: [`renewer`](#Renewer), [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken, [`kind`](#Token_Kind), [`service`](#Token_Service)
|
See also: [`renewer`](#Renewer), [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken, [`kind`](#Token_Kind), [`service`](#Token_Service)
|
||||||
|
|
||||||
### Get Delegation Tokens
|
|
||||||
|
|
||||||
* Submit a HTTP GET request.
|
|
||||||
|
|
||||||
curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKENS&renewer=<USER>"
|
|
||||||
|
|
||||||
The client receives a response with a [`Tokens` JSON object](#Tokens_JSON_Schema):
|
|
||||||
|
|
||||||
HTTP/1.1 200 OK
|
|
||||||
Content-Type: application/json
|
|
||||||
Transfer-Encoding: chunked
|
|
||||||
|
|
||||||
{
|
|
||||||
"Tokens":
|
|
||||||
{
|
|
||||||
"Token":
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"urlString":"KAAKSm9i ..."
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
See also: [`renewer`](#Renewer), [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationTokens
|
|
||||||
|
|
||||||
### Renew Delegation Token
|
### Renew Delegation Token
|
||||||
|
|
||||||
* Submit a HTTP PUT request.
|
* Submit a HTTP PUT request.
|
||||||
@ -1655,7 +1626,7 @@ See also: [`Token` Properties](#Token_Properties), [`GETDELEGATIONTOKEN`](#Get_D
|
|||||||
|
|
||||||
#### Token Properties
|
#### Token Properties
|
||||||
|
|
||||||
JavaScript syntax is used to define `tokenProperties` so that it can be referred in both `Token` and `Tokens` JSON schemas.
|
JavaScript syntax is used to define `tokenProperties` so that it can be referred in `Token` JSON schema.
|
||||||
|
|
||||||
```json
|
```json
|
||||||
var tokenProperties =
|
var tokenProperties =
|
||||||
@ -1673,33 +1644,7 @@ var tokenProperties =
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### Tokens JSON Schema
|
See also: [`Token` Properties](#Token_Properties), the note in [Delegation](#Delegation).
|
||||||
|
|
||||||
A `Tokens` JSON object represents an array of `Token` JSON objects.
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name" : "Tokens",
|
|
||||||
"properties":
|
|
||||||
{
|
|
||||||
"Tokens":
|
|
||||||
{
|
|
||||||
"type" : "object",
|
|
||||||
"properties":
|
|
||||||
{
|
|
||||||
"Token":
|
|
||||||
{
|
|
||||||
"description": "An array of Token",
|
|
||||||
"type" : "array",
|
|
||||||
"items" : "Token": tokenProperties //See Token Properties
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
See also: [`Token` Properties](#Token_Properties), [`GETDELEGATIONTOKENS`](#Get_Delegation_Tokens), the note in [Delegation](#Delegation).
|
|
||||||
|
|
||||||
HTTP Query Parameter Dictionary
|
HTTP Query Parameter Dictionary
|
||||||
-------------------------------
|
-------------------------------
|
||||||
@ -2013,7 +1958,7 @@ See also: [`RENAME`](#Rename_a_FileDirectory)
|
|||||||
| Valid Values | Any valid username. |
|
| Valid Values | Any valid username. |
|
||||||
| Syntax | Any string. |
|
| Syntax | Any string. |
|
||||||
|
|
||||||
See also: [`GETDELEGATIONTOKEN`](#Get_Delegation_Token), [`GETDELEGATIONTOKENS`](#Get_Delegation_Tokens)
|
See also: [`GETDELEGATIONTOKEN`](#Get_Delegation_Token)
|
||||||
|
|
||||||
### Replication
|
### Replication
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ public void testZeroCopyMmapCache() throws Exception {
|
|||||||
fsIn.close();
|
fsIn.close();
|
||||||
fsIn = fs.open(TEST_PATH);
|
fsIn = fs.open(TEST_PATH);
|
||||||
final ShortCircuitCache cache = ClientContext.get(
|
final ShortCircuitCache cache = ClientContext.get(
|
||||||
CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
|
CONTEXT, conf).getShortCircuitCache();
|
||||||
cache.accept(new CountingVisitor(0, 5, 5, 0));
|
cache.accept(new CountingVisitor(0, 5, 5, 0));
|
||||||
results[0] = fsIn.read(null, BLOCK_SIZE,
|
results[0] = fsIn.read(null, BLOCK_SIZE,
|
||||||
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
|
EnumSet.of(ReadOption.SKIP_CHECKSUMS));
|
||||||
@ -661,7 +661,7 @@ public void testZeroCopyReadOfCachedData() throws Exception {
|
|||||||
final ExtendedBlock firstBlock =
|
final ExtendedBlock firstBlock =
|
||||||
DFSTestUtil.getFirstBlock(fs, TEST_PATH);
|
DFSTestUtil.getFirstBlock(fs, TEST_PATH);
|
||||||
final ShortCircuitCache cache = ClientContext.get(
|
final ShortCircuitCache cache = ClientContext.get(
|
||||||
CONTEXT, new DfsClientConf(conf)). getShortCircuitCache();
|
CONTEXT, conf).getShortCircuitCache();
|
||||||
waitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
|
waitForReplicaAnchorStatus(cache, firstBlock, true, true, 1);
|
||||||
// Uncache the replica
|
// Uncache the replica
|
||||||
fs.removeCacheDirective(directiveId);
|
fs.removeCacheDirective(directiveId);
|
||||||
|
@ -43,6 +43,7 @@
|
|||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
|
||||||
import org.apache.hadoop.fs.FsTracer;
|
import org.apache.hadoop.fs.FsTracer;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
@ -736,7 +737,8 @@ private void testStatistics(boolean isShortCircuit) throws Exception {
|
|||||||
byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH];
|
byte original[] = new byte[BlockReaderLocalTest.TEST_LENGTH];
|
||||||
FileSystem fs = null;
|
FileSystem fs = null;
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).
|
||||||
|
hosts(new String[] {NetUtils.getLocalHostname()}).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
DFSTestUtil.createFile(fs, TEST_PATH,
|
DFSTestUtil.createFile(fs, TEST_PATH,
|
||||||
|
@ -99,8 +99,6 @@ public void testReadFromOneDN() throws Exception {
|
|||||||
DFSClient client = new DFSClient(
|
DFSClient client = new DFSClient(
|
||||||
new InetSocketAddress("localhost",
|
new InetSocketAddress("localhost",
|
||||||
util.getCluster().getNameNodePort()), util.getConf());
|
util.getCluster().getNameNodePort()), util.getConf());
|
||||||
ClientContext cacheContext =
|
|
||||||
ClientContext.get(contextName, client.getConf());
|
|
||||||
DFSInputStream in = client.open(testFile.toString());
|
DFSInputStream in = client.open(testFile.toString());
|
||||||
LOG.info("opened " + testFile.toString());
|
LOG.info("opened " + testFile.toString());
|
||||||
byte[] dataBuf = new byte[BLOCK_SIZE];
|
byte[] dataBuf = new byte[BLOCK_SIZE];
|
||||||
|
@ -29,11 +29,16 @@
|
|||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
|
import org.apache.hadoop.hdfs.util.HostsFileWriter;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -43,7 +48,57 @@ public class TestDatanodeReport {
|
|||||||
static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
|
static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
|
||||||
final static private Configuration conf = new HdfsConfiguration();
|
final static private Configuration conf = new HdfsConfiguration();
|
||||||
final static private int NUM_OF_DATANODES = 4;
|
final static private int NUM_OF_DATANODES = 4;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test verifies upgrade domain is set according to the JSON host file.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDatanodeReportWithUpgradeDomain() throws Exception {
|
||||||
|
conf.setInt(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
|
||||||
|
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
|
||||||
|
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
||||||
|
CombinedHostFileManager.class, HostConfigManager.class);
|
||||||
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
|
hostsFileWriter.initialize(conf, "temp/datanodeReport");
|
||||||
|
|
||||||
|
MiniDFSCluster cluster =
|
||||||
|
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
|
final DFSClient client = cluster.getFileSystem().dfs;
|
||||||
|
final String ud1 = "ud1";
|
||||||
|
final String ud2 = "ud2";
|
||||||
|
|
||||||
|
try {
|
||||||
|
//wait until the cluster is up
|
||||||
|
cluster.waitActive();
|
||||||
|
|
||||||
|
DatanodeAdminProperties datanode = new DatanodeAdminProperties();
|
||||||
|
datanode.setHostName(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
|
||||||
|
datanode.setUpgradeDomain(ud1);
|
||||||
|
hostsFileWriter.initIncludeHosts(
|
||||||
|
new DatanodeAdminProperties[]{datanode});
|
||||||
|
client.refreshNodes();
|
||||||
|
DatanodeInfo[] all = client.datanodeReport(DatanodeReportType.ALL);
|
||||||
|
assertEquals(all[0].getUpgradeDomain(), ud1);
|
||||||
|
|
||||||
|
datanode.setUpgradeDomain(null);
|
||||||
|
hostsFileWriter.initIncludeHosts(
|
||||||
|
new DatanodeAdminProperties[]{datanode});
|
||||||
|
client.refreshNodes();
|
||||||
|
all = client.datanodeReport(DatanodeReportType.ALL);
|
||||||
|
assertEquals(all[0].getUpgradeDomain(), null);
|
||||||
|
|
||||||
|
datanode.setUpgradeDomain(ud2);
|
||||||
|
hostsFileWriter.initIncludeHosts(
|
||||||
|
new DatanodeAdminProperties[]{datanode});
|
||||||
|
client.refreshNodes();
|
||||||
|
all = client.datanodeReport(DatanodeReportType.ALL);
|
||||||
|
assertEquals(all[0].getUpgradeDomain(), ud2);
|
||||||
|
} finally {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This test attempts to different types of datanode report.
|
* This test attempts to different types of datanode report.
|
||||||
*/
|
*/
|
||||||
|
@ -70,6 +70,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
@ -651,6 +652,67 @@ private void checkStatistics(FileSystem fs, int readOps, int writeOps,
|
|||||||
assertEquals(largeReadOps, DFSTestUtil.getStatistics(fs).getLargeReadOps());
|
assertEquals(largeReadOps, DFSTestUtil.getStatistics(fs).getLargeReadOps());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Checks read statistics. */
|
||||||
|
private void checkReadStatistics(FileSystem fs, int distance, long expectedReadBytes) {
|
||||||
|
long bytesRead = DFSTestUtil.getStatistics(fs).
|
||||||
|
getBytesReadByDistance(distance);
|
||||||
|
assertEquals(expectedReadBytes, bytesRead);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLocalHostReadStatistics() throws Exception {
|
||||||
|
testReadFileSystemStatistics(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testLocalRackReadStatistics() throws Exception {
|
||||||
|
testReadFileSystemStatistics(2);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testRemoteRackOfFirstDegreeReadStatistics() throws Exception {
|
||||||
|
testReadFileSystemStatistics(4);
|
||||||
|
}
|
||||||
|
|
||||||
|
/** expectedDistance is the expected distance between client and dn.
|
||||||
|
* 0 means local host.
|
||||||
|
* 2 means same rack.
|
||||||
|
* 4 means remote rack of first degree.
|
||||||
|
*/
|
||||||
|
private void testReadFileSystemStatistics(int expectedDistance)
|
||||||
|
throws IOException {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
final Configuration conf = getTestConfiguration();
|
||||||
|
|
||||||
|
// create a cluster with a dn with the expected distance.
|
||||||
|
if (expectedDistance == 0) {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).
|
||||||
|
hosts(new String[] {NetUtils.getLocalHostname()}).build();
|
||||||
|
} else if (expectedDistance == 2) {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).
|
||||||
|
hosts(new String[] {"hostFoo"}).build();
|
||||||
|
} else if (expectedDistance == 4) {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).
|
||||||
|
racks(new String[] {"/rackFoo"}).build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a file, read the file and verify the metrics
|
||||||
|
try {
|
||||||
|
final FileSystem fs = cluster.getFileSystem();
|
||||||
|
DFSTestUtil.getStatistics(fs).reset();
|
||||||
|
Path dir = new Path("/test");
|
||||||
|
Path file = new Path(dir, "file");
|
||||||
|
String input = "hello world";
|
||||||
|
DFSTestUtil.writeFile(fs, file, input);
|
||||||
|
FSDataInputStream stm = fs.open(file);
|
||||||
|
byte[] actual = new byte[4096];
|
||||||
|
stm.read(actual);
|
||||||
|
checkReadStatistics(fs, expectedDistance, input.length());
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testFileChecksum() throws Exception {
|
public void testFileChecksum() throws Exception {
|
||||||
final long seed = RAN.nextLong();
|
final long seed = RAN.nextLong();
|
||||||
|
@ -117,10 +117,10 @@
|
|||||||
|
|
||||||
public class TestEncryptionZones {
|
public class TestEncryptionZones {
|
||||||
|
|
||||||
private Configuration conf;
|
protected Configuration conf;
|
||||||
private FileSystemTestHelper fsHelper;
|
private FileSystemTestHelper fsHelper;
|
||||||
|
|
||||||
private MiniDFSCluster cluster;
|
protected MiniDFSCluster cluster;
|
||||||
protected HdfsAdmin dfsAdmin;
|
protected HdfsAdmin dfsAdmin;
|
||||||
protected DistributedFileSystem fs;
|
protected DistributedFileSystem fs;
|
||||||
private File testRootDir;
|
private File testRootDir;
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import com.google.common.base.Supplier;
|
||||||
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
||||||
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
|
import org.apache.hadoop.crypto.key.kms.server.MiniKMS;
|
||||||
import org.apache.hadoop.security.Credentials;
|
import org.apache.hadoop.security.Credentials;
|
||||||
@ -26,10 +27,12 @@
|
|||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mockito.internal.util.reflection.Whitebox;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
@ -71,8 +74,10 @@ public void testCreateEZPopulatesEDEKCache() throws Exception {
|
|||||||
final Path zonePath = new Path("/TestEncryptionZone");
|
final Path zonePath = new Path("/TestEncryptionZone");
|
||||||
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
|
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
|
||||||
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
|
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
|
||||||
assertTrue(((KMSClientProvider)fs.getClient().getKeyProvider()).
|
@SuppressWarnings("unchecked")
|
||||||
getEncKeyQueueSize(TEST_KEY) > 0);
|
KMSClientProvider kcp = (KMSClientProvider) Whitebox
|
||||||
|
.getInternalState(cluster.getNamesystem().getProvider(), "extension");
|
||||||
|
assertTrue(kcp.getEncKeyQueueSize(TEST_KEY) > 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
@ -92,4 +97,31 @@ public void testDelegationToken() throws Exception {
|
|||||||
Assert.assertEquals(0, tokens.length);
|
Assert.assertEquals(0, tokens.length);
|
||||||
Assert.assertEquals(2, creds.numberOfTokens());
|
Assert.assertEquals(2, creds.numberOfTokens());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test(timeout = 120000)
|
||||||
|
public void testWarmupEDEKCacheOnStartup() throws Exception {
|
||||||
|
final Path zonePath = new Path("/TestEncryptionZone");
|
||||||
|
fsWrapper.mkdir(zonePath, FsPermission.getDirDefault(), false);
|
||||||
|
dfsAdmin.createEncryptionZone(zonePath, TEST_KEY);
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
KMSClientProvider spy = (KMSClientProvider) Whitebox
|
||||||
|
.getInternalState(cluster.getNamesystem().getProvider(), "extension");
|
||||||
|
assertTrue("key queue is empty after creating encryption zone",
|
||||||
|
spy.getEncKeyQueueSize(TEST_KEY) > 0);
|
||||||
|
|
||||||
|
conf.setInt(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_EDEKCACHELOADER_INITIAL_DELAY_MS_KEY, 0);
|
||||||
|
cluster.restartNameNode(true);
|
||||||
|
|
||||||
|
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||||
|
@Override
|
||||||
|
public Boolean get() {
|
||||||
|
final KMSClientProvider kspy = (KMSClientProvider) Whitebox
|
||||||
|
.getInternalState(cluster.getNamesystem().getProvider(),
|
||||||
|
"extension");
|
||||||
|
return kspy.getEncKeyQueueSize(TEST_KEY) > 0;
|
||||||
|
}
|
||||||
|
}, 1000, 60000);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
@ -246,6 +247,11 @@ public boolean isShortCircuit() {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getNetworkDistance() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
synchronized String getError() {
|
synchronized String getError() {
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
@ -271,7 +277,7 @@ public void testExternalBlockReader() throws Exception {
|
|||||||
String uuid = UUID.randomUUID().toString();
|
String uuid = UUID.randomUUID().toString();
|
||||||
conf.set(SYNTHETIC_BLOCK_READER_TEST_UUID_KEY, uuid);
|
conf.set(SYNTHETIC_BLOCK_READER_TEST_UUID_KEY, uuid);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.numDataNodes(1)
|
.hosts(new String[] {NetUtils.getLocalHostname()})
|
||||||
.build();
|
.build();
|
||||||
final int TEST_LENGTH = 2047;
|
final int TEST_LENGTH = 2047;
|
||||||
DistributedFileSystem dfs = cluster.getFileSystem();
|
DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
@ -0,0 +1,247 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileChecksum;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test serves a prototype to demo the idea proposed so far. It creates two
|
||||||
|
* files using the same data, one is in replica mode, the other is in stripped
|
||||||
|
* layout. For simple, it assumes 6 data blocks in both files and the block size
|
||||||
|
* are the same.
|
||||||
|
*/
|
||||||
|
public class TestFileChecksum {
|
||||||
|
public static final Log LOG = LogFactory.getLog(TestFileChecksum.class);
|
||||||
|
|
||||||
|
private int dataBlocks = StripedFileTestUtil.NUM_DATA_BLOCKS;
|
||||||
|
private int parityBlocks = StripedFileTestUtil.NUM_PARITY_BLOCKS;
|
||||||
|
|
||||||
|
private MiniDFSCluster cluster;
|
||||||
|
private DistributedFileSystem fs;
|
||||||
|
private Configuration conf;
|
||||||
|
private DFSClient client;
|
||||||
|
|
||||||
|
private int cellSize = StripedFileTestUtil.BLOCK_STRIPED_CELL_SIZE;
|
||||||
|
private int stripesPerBlock = 6;
|
||||||
|
private int blockSize = cellSize * stripesPerBlock;
|
||||||
|
private int numBlockGroups = 10;
|
||||||
|
private int stripSize = cellSize * dataBlocks;
|
||||||
|
private int blockGroupSize = stripesPerBlock * stripSize;
|
||||||
|
private int fileSize = numBlockGroups * blockGroupSize;
|
||||||
|
|
||||||
|
private String ecDir = "/striped";
|
||||||
|
private String stripedFile1 = ecDir + "/stripedFileChecksum1";
|
||||||
|
private String stripedFile2 = ecDir + "/stripedFileChecksum2";
|
||||||
|
private String replicatedFile = "/replicatedFileChecksum";
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() throws IOException {
|
||||||
|
int numDNs = dataBlocks + parityBlocks + 2;
|
||||||
|
conf = new Configuration();
|
||||||
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
|
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
|
||||||
|
false);
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();
|
||||||
|
Path ecPath = new Path(ecDir);
|
||||||
|
cluster.getFileSystem().mkdir(ecPath, FsPermission.getDirDefault());
|
||||||
|
cluster.getFileSystem().getClient().setErasureCodingPolicy(ecDir, null);
|
||||||
|
fs = cluster.getFileSystem();
|
||||||
|
client = fs.getClient();
|
||||||
|
|
||||||
|
prepareTestFiles();
|
||||||
|
|
||||||
|
getDataNodeToKill(stripedFile1);
|
||||||
|
getDataNodeToKill(replicatedFile);
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
cluster = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum1() throws Exception {
|
||||||
|
int length = 0;
|
||||||
|
testStripedFileChecksum(length, length + 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum2() throws Exception {
|
||||||
|
int length = stripSize - 1;
|
||||||
|
testStripedFileChecksum(length, length - 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum3() throws Exception {
|
||||||
|
int length = stripSize;
|
||||||
|
testStripedFileChecksum(length, length - 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum4() throws Exception {
|
||||||
|
int length = stripSize + cellSize * 2;
|
||||||
|
testStripedFileChecksum(length, length - 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum5() throws Exception {
|
||||||
|
int length = blockGroupSize;
|
||||||
|
testStripedFileChecksum(length, length - 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum6() throws Exception {
|
||||||
|
int length = blockGroupSize + blockSize;
|
||||||
|
testStripedFileChecksum(length, length - 10);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedFileChecksum7() throws Exception {
|
||||||
|
int length = -1; // whole file
|
||||||
|
testStripedFileChecksum(length, fileSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
void testStripedFileChecksum(int range1, int range2) throws Exception {
|
||||||
|
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1,
|
||||||
|
range1, false);
|
||||||
|
FileChecksum stripedFileChecksum2 = getFileChecksum(stripedFile2,
|
||||||
|
range1, false);
|
||||||
|
FileChecksum stripedFileChecksum3 = getFileChecksum(stripedFile2,
|
||||||
|
range2, false);
|
||||||
|
|
||||||
|
LOG.info("stripedFileChecksum1:" + stripedFileChecksum1);
|
||||||
|
LOG.info("stripedFileChecksum2:" + stripedFileChecksum2);
|
||||||
|
LOG.info("stripedFileChecksum3:" + stripedFileChecksum3);
|
||||||
|
|
||||||
|
Assert.assertTrue(stripedFileChecksum1.equals(stripedFileChecksum2));
|
||||||
|
if (range1 >=0 && range1 != range2) {
|
||||||
|
Assert.assertFalse(stripedFileChecksum1.equals(stripedFileChecksum3));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testStripedAndReplicatedFileChecksum() throws Exception {
|
||||||
|
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1,
|
||||||
|
10, false);
|
||||||
|
FileChecksum replicatedFileChecksum = getFileChecksum(replicatedFile,
|
||||||
|
10, false);
|
||||||
|
|
||||||
|
Assert.assertFalse(stripedFileChecksum1.equals(replicatedFileChecksum));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// TODO: allow datanode failure, HDFS-9833
|
||||||
|
@Test
|
||||||
|
public void testStripedAndReplicatedWithFailure() throws Exception {
|
||||||
|
FileChecksum stripedFileChecksum1 = getFileChecksum(stripedFile1,
|
||||||
|
10, true);
|
||||||
|
FileChecksum replicatedFileChecksum = getFileChecksum(replicatedFile,
|
||||||
|
10, true);
|
||||||
|
|
||||||
|
Assert.assertFalse(stripedFileChecksum1.equals(replicatedFileChecksum));
|
||||||
|
}*/
|
||||||
|
|
||||||
|
private FileChecksum getFileChecksum(String filePath, int range,
|
||||||
|
boolean killDn) throws Exception {
|
||||||
|
int dnIdxToDie = -1;
|
||||||
|
if (killDn) {
|
||||||
|
dnIdxToDie = getDataNodeToKill(filePath);
|
||||||
|
DataNode dnToDie = cluster.getDataNodes().get(dnIdxToDie);
|
||||||
|
shutdownDataNode(dnToDie);
|
||||||
|
}
|
||||||
|
|
||||||
|
Path testPath = new Path(filePath);
|
||||||
|
FileChecksum fc;
|
||||||
|
|
||||||
|
if (range >= 0) {
|
||||||
|
fc = fs.getFileChecksum(testPath, range);
|
||||||
|
} else {
|
||||||
|
fc = fs.getFileChecksum(testPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (dnIdxToDie != -1) {
|
||||||
|
cluster.restartDataNode(dnIdxToDie, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
return fc;
|
||||||
|
}
|
||||||
|
|
||||||
|
void prepareTestFiles() throws IOException {
|
||||||
|
byte[] fileData = StripedFileTestUtil.generateBytes(fileSize);
|
||||||
|
|
||||||
|
String[] filePaths = new String[] {
|
||||||
|
stripedFile1, stripedFile2, replicatedFile
|
||||||
|
};
|
||||||
|
|
||||||
|
for (String filePath : filePaths) {
|
||||||
|
Path testPath = new Path(filePath);
|
||||||
|
DFSTestUtil.writeFile(fs, testPath, fileData);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void shutdownDataNode(DataNode dataNode) throws IOException {
|
||||||
|
/*
|
||||||
|
* Kill the datanode which contains one replica
|
||||||
|
* We need to make sure it dead in namenode: clear its update time and
|
||||||
|
* trigger NN to check heartbeat.
|
||||||
|
*/
|
||||||
|
dataNode.shutdown();
|
||||||
|
cluster.setDataNodeDead(dataNode.getDatanodeId());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Determine the datanode that hosts the first block of the file. For simple
|
||||||
|
* this just returns the first datanode as it's firstly tried.
|
||||||
|
*/
|
||||||
|
int getDataNodeToKill(String filePath) throws IOException {
|
||||||
|
LocatedBlocks locatedBlocks = client.getLocatedBlocks(filePath, 0);
|
||||||
|
|
||||||
|
LocatedBlock locatedBlock = locatedBlocks.get(0);
|
||||||
|
DatanodeInfo[] datanodes = locatedBlock.getLocations();
|
||||||
|
DatanodeInfo chosenDn = datanodes[0];
|
||||||
|
|
||||||
|
int idx = 0;
|
||||||
|
for (DataNode dn : cluster.getDataNodes()) {
|
||||||
|
if (dn.getInfoPort() == chosenDn.getInfoPort()) {
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
idx++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
}
|
@ -19,7 +19,6 @@
|
|||||||
package org.apache.hadoop.hdfs.server.blockmanagement;
|
package org.apache.hadoop.hdfs.server.blockmanagement;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertTrue;
|
|
||||||
import static org.junit.Assert.assertArrayEquals;
|
import static org.junit.Assert.assertArrayEquals;
|
||||||
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
@ -39,6 +38,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
|
import org.apache.hadoop.hdfs.util.HostsFileWriter;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
@ -385,17 +385,8 @@ public void testNodeDecomissionRespectsRackPolicy() throws Exception {
|
|||||||
short REPLICATION_FACTOR = 2;
|
short REPLICATION_FACTOR = 2;
|
||||||
final Path filePath = new Path("/testFile");
|
final Path filePath = new Path("/testFile");
|
||||||
|
|
||||||
// Configure an excludes file
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
hostsFileWriter.initialize(conf, "temp/decommission");
|
||||||
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
|
|
||||||
Path dir = new Path(workingDir, "temp/decommission");
|
|
||||||
Path excludeFile = new Path(dir, "exclude");
|
|
||||||
Path includeFile = new Path(dir, "include");
|
|
||||||
assertTrue(localFileSys.mkdirs(dir));
|
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
|
||||||
DFSTestUtil.writeFile(localFileSys, includeFile, "");
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
|
||||||
|
|
||||||
// Two blocks and four racks
|
// Two blocks and four racks
|
||||||
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
|
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
|
||||||
@ -416,7 +407,7 @@ public void testNodeDecomissionRespectsRackPolicy() throws Exception {
|
|||||||
BlockLocation locs[] = fs.getFileBlockLocations(
|
BlockLocation locs[] = fs.getFileBlockLocations(
|
||||||
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
|
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
|
||||||
String name = locs[0].getNames()[0];
|
String name = locs[0].getNames()[0];
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
|
hostsFileWriter.initExcludeHost(name);
|
||||||
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||||
DFSTestUtil.waitForDecommission(fs, name);
|
DFSTestUtil.waitForDecommission(fs, name);
|
||||||
|
|
||||||
@ -424,6 +415,7 @@ public void testNodeDecomissionRespectsRackPolicy() throws Exception {
|
|||||||
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
hostsFileWriter.cleanup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,17 +430,8 @@ public void testNodeDecomissionWithOverreplicationRespectsRackPolicy()
|
|||||||
short REPLICATION_FACTOR = 5;
|
short REPLICATION_FACTOR = 5;
|
||||||
final Path filePath = new Path("/testFile");
|
final Path filePath = new Path("/testFile");
|
||||||
|
|
||||||
// Configure an excludes file
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
hostsFileWriter.initialize(conf, "temp/decommission");
|
||||||
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
|
|
||||||
Path dir = new Path(workingDir, "temp/decommission");
|
|
||||||
Path excludeFile = new Path(dir, "exclude");
|
|
||||||
Path includeFile = new Path(dir, "include");
|
|
||||||
assertTrue(localFileSys.mkdirs(dir));
|
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
|
||||||
DFSTestUtil.writeFile(localFileSys, includeFile, "");
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
|
||||||
|
|
||||||
// All hosts are on two racks, only one host on /rack2
|
// All hosts are on two racks, only one host on /rack2
|
||||||
String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
|
String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
|
||||||
@ -474,7 +457,7 @@ public void testNodeDecomissionWithOverreplicationRespectsRackPolicy()
|
|||||||
for (String top : locs[0].getTopologyPaths()) {
|
for (String top : locs[0].getTopologyPaths()) {
|
||||||
if (!top.startsWith("/rack2")) {
|
if (!top.startsWith("/rack2")) {
|
||||||
String name = top.substring("/rack1".length()+1);
|
String name = top.substring("/rack1".length()+1);
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
|
hostsFileWriter.initExcludeHost(name);
|
||||||
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||||
DFSTestUtil.waitForDecommission(fs, name);
|
DFSTestUtil.waitForDecommission(fs, name);
|
||||||
break;
|
break;
|
||||||
@ -486,6 +469,7 @@ public void testNodeDecomissionWithOverreplicationRespectsRackPolicy()
|
|||||||
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
|
||||||
} finally {
|
} finally {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
hostsFileWriter.cleanup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -383,9 +383,9 @@ public void testRemoveIncludedNode() throws IOException {
|
|||||||
|
|
||||||
DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
|
DatanodeManager dm = mockDatanodeManager(fsn, new Configuration());
|
||||||
HostFileManager hm = new HostFileManager();
|
HostFileManager hm = new HostFileManager();
|
||||||
HostFileManager.HostSet noNodes = new HostFileManager.HostSet();
|
HostSet noNodes = new HostSet();
|
||||||
HostFileManager.HostSet oneNode = new HostFileManager.HostSet();
|
HostSet oneNode = new HostSet();
|
||||||
HostFileManager.HostSet twoNodes = new HostFileManager.HostSet();
|
HostSet twoNodes = new HostSet();
|
||||||
DatanodeRegistration dr1 = new DatanodeRegistration(
|
DatanodeRegistration dr1 = new DatanodeRegistration(
|
||||||
new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123",
|
new DatanodeID("127.0.0.1", "127.0.0.1", "someStorageID-123",
|
||||||
12345, 12345, 12345, 12345),
|
12345, 12345, 12345, 12345),
|
||||||
@ -402,7 +402,7 @@ public void testRemoveIncludedNode() throws IOException {
|
|||||||
oneNode.add(entry("127.0.0.1:23456"));
|
oneNode.add(entry("127.0.0.1:23456"));
|
||||||
|
|
||||||
hm.refresh(twoNodes, noNodes);
|
hm.refresh(twoNodes, noNodes);
|
||||||
Whitebox.setInternalState(dm, "hostFileManager", hm);
|
Whitebox.setInternalState(dm, "hostConfigManager", hm);
|
||||||
|
|
||||||
// Register two data nodes to simulate them coming up.
|
// Register two data nodes to simulate them coming up.
|
||||||
// We need to add two nodes, because if we have only one node, removing it
|
// We need to add two nodes, because if we have only one node, removing it
|
||||||
|
@ -40,7 +40,7 @@ private static InetSocketAddress entry(String e) {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDeduplication() {
|
public void testDeduplication() {
|
||||||
HostFileManager.HostSet s = new HostFileManager.HostSet();
|
HostSet s = new HostSet();
|
||||||
// These entries will be de-duped, since they refer to the same IP
|
// These entries will be de-duped, since they refer to the same IP
|
||||||
// address + port combo.
|
// address + port combo.
|
||||||
s.add(entry("127.0.0.1:12345"));
|
s.add(entry("127.0.0.1:12345"));
|
||||||
@ -60,7 +60,7 @@ public void testDeduplication() {
|
|||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testRelation() {
|
public void testRelation() {
|
||||||
HostFileManager.HostSet s = new HostFileManager.HostSet();
|
HostSet s = new HostSet();
|
||||||
s.add(entry("127.0.0.1:123"));
|
s.add(entry("127.0.0.1:123"));
|
||||||
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
|
Assert.assertTrue(s.match(entry("127.0.0.1:123")));
|
||||||
Assert.assertFalse(s.match(entry("127.0.0.1:12")));
|
Assert.assertFalse(s.match(entry("127.0.0.1:12")));
|
||||||
@ -105,8 +105,8 @@ public void testIncludeExcludeLists() throws IOException {
|
|||||||
FSNamesystem fsn = mock(FSNamesystem.class);
|
FSNamesystem fsn = mock(FSNamesystem.class);
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
HostFileManager hm = new HostFileManager();
|
HostFileManager hm = new HostFileManager();
|
||||||
HostFileManager.HostSet includedNodes = new HostFileManager.HostSet();
|
HostSet includedNodes = new HostSet();
|
||||||
HostFileManager.HostSet excludedNodes = new HostFileManager.HostSet();
|
HostSet excludedNodes = new HostSet();
|
||||||
|
|
||||||
includedNodes.add(entry("127.0.0.1:12345"));
|
includedNodes.add(entry("127.0.0.1:12345"));
|
||||||
includedNodes.add(entry("localhost:12345"));
|
includedNodes.add(entry("localhost:12345"));
|
||||||
@ -122,7 +122,7 @@ public void testIncludeExcludeLists() throws IOException {
|
|||||||
hm.refresh(includedNodes, excludedNodes);
|
hm.refresh(includedNodes, excludedNodes);
|
||||||
|
|
||||||
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
|
DatanodeManager dm = new DatanodeManager(bm, fsn, conf);
|
||||||
Whitebox.setInternalState(dm, "hostFileManager", hm);
|
Whitebox.setInternalState(dm, "hostConfigManager", hm);
|
||||||
Map<String, DatanodeDescriptor> dnMap = (Map<String,
|
Map<String, DatanodeDescriptor> dnMap = (Map<String,
|
||||||
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
|
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
|
||||||
|
|
||||||
|
@ -20,11 +20,10 @@
|
|||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
import java.io.File;
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.commons.io.FileUtils;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
@ -34,7 +33,13 @@
|
|||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager;
|
||||||
|
import org.apache.hadoop.hdfs.util.HostsFileWriter;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.junit.runner.RunWith;
|
||||||
|
import org.junit.runners.Parameterized;
|
||||||
|
|
||||||
import javax.management.MBeanServer;
|
import javax.management.MBeanServer;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
@ -43,9 +48,21 @@
|
|||||||
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
|
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
@RunWith(Parameterized.class)
|
||||||
public class TestHostsFiles {
|
public class TestHostsFiles {
|
||||||
private static final Log LOG =
|
private static final Log LOG =
|
||||||
LogFactory.getLog(TestHostsFiles.class.getName());
|
LogFactory.getLog(TestHostsFiles.class.getName());
|
||||||
|
private Class hostFileMgrClass;
|
||||||
|
|
||||||
|
public TestHostsFiles(Class hostFileMgrClass) {
|
||||||
|
this.hostFileMgrClass = hostFileMgrClass;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Parameterized.Parameters
|
||||||
|
public static Iterable<Object[]> data() {
|
||||||
|
return Arrays.asList(new Object[][]{
|
||||||
|
{HostFileManager.class}, {CombinedHostFileManager.class}});
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return a configuration object with low timeouts for testing and
|
* Return a configuration object with low timeouts for testing and
|
||||||
@ -72,6 +89,10 @@ private Configuration getConf() {
|
|||||||
|
|
||||||
// Indicates we have multiple racks
|
// Indicates we have multiple racks
|
||||||
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
|
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
|
||||||
|
|
||||||
|
// Host file manager
|
||||||
|
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
||||||
|
hostFileMgrClass, HostConfigManager.class);
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,18 +101,8 @@ public void testHostsExcludeInUI() throws Exception {
|
|||||||
Configuration conf = getConf();
|
Configuration conf = getConf();
|
||||||
short REPLICATION_FACTOR = 2;
|
short REPLICATION_FACTOR = 2;
|
||||||
final Path filePath = new Path("/testFile");
|
final Path filePath = new Path("/testFile");
|
||||||
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
// Configure an excludes file
|
hostsFileWriter.initialize(conf, "temp/decommission");
|
||||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
|
||||||
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
|
|
||||||
Path dir = new Path(workingDir, "temp/decommission");
|
|
||||||
Path excludeFile = new Path(dir, "exclude");
|
|
||||||
Path includeFile = new Path(dir, "include");
|
|
||||||
assertTrue(localFileSys.mkdirs(dir));
|
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
|
||||||
DFSTestUtil.writeFile(localFileSys, includeFile, "");
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
|
||||||
|
|
||||||
// Two blocks and four racks
|
// Two blocks and four racks
|
||||||
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
|
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
|
||||||
@ -112,9 +123,8 @@ public void testHostsExcludeInUI() throws Exception {
|
|||||||
BlockLocation locs[] = fs.getFileBlockLocations(
|
BlockLocation locs[] = fs.getFileBlockLocations(
|
||||||
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
|
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
|
||||||
String name = locs[0].getNames()[0];
|
String name = locs[0].getNames()[0];
|
||||||
String names = name + "\n" + "localhost:42\n";
|
LOG.info("adding '" + name + "' to decommission");
|
||||||
LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
|
hostsFileWriter.initExcludeHost(name);
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
|
|
||||||
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||||
DFSTestUtil.waitForDecommission(fs, name);
|
DFSTestUtil.waitForDecommission(fs, name);
|
||||||
|
|
||||||
@ -131,9 +141,7 @@ public void testHostsExcludeInUI() throws Exception {
|
|||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
if (localFileSys.exists(dir)) {
|
hostsFileWriter.cleanup();
|
||||||
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -141,20 +149,10 @@ public void testHostsExcludeInUI() throws Exception {
|
|||||||
public void testHostsIncludeForDeadCount() throws Exception {
|
public void testHostsIncludeForDeadCount() throws Exception {
|
||||||
Configuration conf = getConf();
|
Configuration conf = getConf();
|
||||||
|
|
||||||
// Configure an excludes file
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
FileSystem localFileSys = FileSystem.getLocal(conf);
|
hostsFileWriter.initialize(conf, "temp/decommission");
|
||||||
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
|
hostsFileWriter.initIncludeHosts(new String[]
|
||||||
Path dir = new Path(workingDir, "temp/decommission");
|
{"localhost:52","127.0.0.1:7777"});
|
||||||
Path excludeFile = new Path(dir, "exclude");
|
|
||||||
Path includeFile = new Path(dir, "include");
|
|
||||||
assertTrue(localFileSys.mkdirs(dir));
|
|
||||||
StringBuilder includeHosts = new StringBuilder();
|
|
||||||
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
|
|
||||||
.append("\n");
|
|
||||||
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
|
||||||
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
|
||||||
|
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
@ -174,9 +172,7 @@ public void testHostsIncludeForDeadCount() throws Exception {
|
|||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
if (localFileSys.exists(dir)) {
|
hostsFileWriter.cleanup();
|
||||||
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
|
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
|
||||||
|
import org.apache.hadoop.hdfs.util.HostsFileWriter;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
|
||||||
import org.apache.hadoop.net.ServerSocketUtil;
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
@ -44,9 +45,9 @@
|
|||||||
import javax.management.MBeanServer;
|
import javax.management.MBeanServer;
|
||||||
import javax.management.ObjectName;
|
import javax.management.ObjectName;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
|
||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -236,8 +237,8 @@ public void testLastContactTime() throws Exception {
|
|||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
FileSystem localFileSys = null;
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
Path dir = null;
|
hostsFileWriter.initialize(conf, "temp/TestNameNodeMXBean");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
@ -249,18 +250,12 @@ public void testLastContactTime() throws Exception {
|
|||||||
ObjectName mxbeanName = new ObjectName(
|
ObjectName mxbeanName = new ObjectName(
|
||||||
"Hadoop:service=NameNode,name=NameNodeInfo");
|
"Hadoop:service=NameNode,name=NameNodeInfo");
|
||||||
|
|
||||||
// Define include file to generate deadNodes metrics
|
List<String> hosts = new ArrayList<>();
|
||||||
localFileSys = FileSystem.getLocal(conf);
|
|
||||||
Path workingDir = localFileSys.getWorkingDirectory();
|
|
||||||
dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
|
|
||||||
Path includeFile = new Path(dir, "include");
|
|
||||||
assertTrue(localFileSys.mkdirs(dir));
|
|
||||||
StringBuilder includeHosts = new StringBuilder();
|
|
||||||
for(DataNode dn : cluster.getDataNodes()) {
|
for(DataNode dn : cluster.getDataNodes()) {
|
||||||
includeHosts.append(dn.getDisplayName()).append("\n");
|
hosts.add(dn.getDisplayName());
|
||||||
}
|
}
|
||||||
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
|
hostsFileWriter.initIncludeHosts(hosts.toArray(
|
||||||
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
new String[hosts.size()]));
|
||||||
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
|
||||||
|
|
||||||
cluster.stopDataNode(0);
|
cluster.stopDataNode(0);
|
||||||
@ -282,12 +277,10 @@ public void testLastContactTime() throws Exception {
|
|||||||
assertTrue(deadNode.containsKey("xferaddr"));
|
assertTrue(deadNode.containsKey("xferaddr"));
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if ((localFileSys != null) && localFileSys.exists(dir)) {
|
|
||||||
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
|
|
||||||
}
|
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
hostsFileWriter.cleanup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,14 +29,12 @@
|
|||||||
import java.lang.management.ManagementFactory;
|
import java.lang.management.ManagementFactory;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
@ -56,6 +54,7 @@
|
|||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
import org.apache.hadoop.hdfs.util.HostsFileWriter;
|
||||||
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
import org.apache.hadoop.hdfs.util.MD5FileUtils;
|
||||||
import org.apache.hadoop.io.MD5Hash;
|
import org.apache.hadoop.io.MD5Hash;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
@ -568,27 +567,15 @@ public void testCorruptImageFallback() throws IOException {
|
|||||||
@Test
|
@Test
|
||||||
public void testNNRestart() throws IOException, InterruptedException {
|
public void testNNRestart() throws IOException, InterruptedException {
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
FileSystem localFileSys;
|
|
||||||
Path hostsFile;
|
|
||||||
Path excludeFile;
|
|
||||||
int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
|
int HEARTBEAT_INTERVAL = 1; // heartbeat interval in seconds
|
||||||
// Set up the hosts/exclude files.
|
|
||||||
localFileSys = FileSystem.getLocal(config);
|
|
||||||
Path workingDir = localFileSys.getWorkingDirectory();
|
|
||||||
Path dir = new Path(workingDir, "build/test/data/work-dir/restartnn");
|
|
||||||
hostsFile = new Path(dir, "hosts");
|
|
||||||
excludeFile = new Path(dir, "exclude");
|
|
||||||
|
|
||||||
// Setup conf
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
hostsFileWriter.initialize(config, "work-dir/restartnn");
|
||||||
writeConfigFile(localFileSys, excludeFile, null);
|
|
||||||
config.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
|
|
||||||
// write into hosts file
|
|
||||||
ArrayList<String>list = new ArrayList<String>();
|
|
||||||
byte b[] = {127, 0, 0, 1};
|
byte b[] = {127, 0, 0, 1};
|
||||||
InetAddress inetAddress = InetAddress.getByAddress(b);
|
InetAddress inetAddress = InetAddress.getByAddress(b);
|
||||||
list.add(inetAddress.getHostName());
|
hostsFileWriter.initIncludeHosts(new String[] {inetAddress.getHostName()});
|
||||||
writeConfigFile(localFileSys, hostsFile, list);
|
|
||||||
int numDatanodes = 1;
|
int numDatanodes = 1;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
@ -613,37 +600,12 @@ public void testNNRestart() throws IOException, InterruptedException {
|
|||||||
fail(StringUtils.stringifyException(e));
|
fail(StringUtils.stringifyException(e));
|
||||||
throw e;
|
throw e;
|
||||||
} finally {
|
} finally {
|
||||||
cleanupFile(localFileSys, excludeFile.getParent());
|
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
hostsFileWriter.cleanup();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void writeConfigFile(FileSystem localFileSys, Path name,
|
|
||||||
ArrayList<String> nodes) throws IOException {
|
|
||||||
// delete if it already exists
|
|
||||||
if (localFileSys.exists(name)) {
|
|
||||||
localFileSys.delete(name, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
FSDataOutputStream stm = localFileSys.create(name);
|
|
||||||
if (nodes != null) {
|
|
||||||
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
|
|
||||||
String node = it.next();
|
|
||||||
stm.writeBytes(node);
|
|
||||||
stm.writeBytes("\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
stm.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
|
|
||||||
assertTrue(fileSys.exists(name));
|
|
||||||
fileSys.delete(name, true);
|
|
||||||
assertTrue(!fileSys.exists(name));
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test(timeout = 120000)
|
@Test(timeout = 120000)
|
||||||
public void testXattrConfiguration() throws Exception {
|
public void testXattrConfiguration() throws Exception {
|
||||||
|
@ -0,0 +1,169 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithUpgradeDomain;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.CombinedHostFileManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
import org.apache.hadoop.hdfs.util.HostsFileWriter;
|
||||||
|
import org.apache.hadoop.net.StaticMapping;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* End-to-end test case for upgrade domain
|
||||||
|
* The test configs upgrade domain for nodes via admin json
|
||||||
|
* config file and put some nodes to decommission state.
|
||||||
|
* The test then verifies replicas are placed on the nodes that
|
||||||
|
* satisfy the upgrade domain policy.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class TestUpgradeDomainBlockPlacementPolicy {
|
||||||
|
|
||||||
|
private static final short REPLICATION_FACTOR = (short) 3;
|
||||||
|
private static final int DEFAULT_BLOCK_SIZE = 1024;
|
||||||
|
static final String[] racks =
|
||||||
|
{ "/RACK1", "/RACK1", "/RACK1", "/RACK2", "/RACK2", "/RACK2" };
|
||||||
|
/**
|
||||||
|
* Use host names that can be resolved (
|
||||||
|
* InetSocketAddress#isUnresolved == false). Otherwise,
|
||||||
|
* CombinedHostFileManager won't allow those hosts.
|
||||||
|
*/
|
||||||
|
static final String[] hosts =
|
||||||
|
{ "127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1",
|
||||||
|
"127.0.0.1", "127.0.0.1" };
|
||||||
|
static final String[] upgradeDomains =
|
||||||
|
{ "ud1", "ud2", "ud3", "ud1", "ud2", "ud3" };
|
||||||
|
static final Set<DatanodeID> expectedDatanodeIDs = new HashSet<>();
|
||||||
|
private MiniDFSCluster cluster = null;
|
||||||
|
private NamenodeProtocols nameNodeRpc = null;
|
||||||
|
private FSNamesystem namesystem = null;
|
||||||
|
private PermissionStatus perm = null;
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setup() throws IOException {
|
||||||
|
StaticMapping.resetMap();
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||||
|
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
|
||||||
|
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
|
||||||
|
BlockPlacementPolicyWithUpgradeDomain.class,
|
||||||
|
BlockPlacementPolicy.class);
|
||||||
|
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
||||||
|
CombinedHostFileManager.class, HostConfigManager.class);
|
||||||
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
|
hostsFileWriter.initialize(conf, "temp/upgradedomainpolicy");
|
||||||
|
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).racks(racks)
|
||||||
|
.hosts(hosts).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
nameNodeRpc = cluster.getNameNodeRpc();
|
||||||
|
namesystem = cluster.getNamesystem();
|
||||||
|
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
|
||||||
|
FsPermission.getDefault());
|
||||||
|
refreshDatanodeAdminProperties(hostsFileWriter);
|
||||||
|
hostsFileWriter.cleanup();
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void teardown() {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
cluster = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Define admin properties for these datanodes as follows.
|
||||||
|
* dn0 and dn3 have upgrade domain ud1.
|
||||||
|
* dn1 and dn4 have upgrade domain ud2.
|
||||||
|
* dn2 and dn5 have upgrade domain ud3.
|
||||||
|
* dn0 and dn5 are decommissioned.
|
||||||
|
* Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
|
||||||
|
* rack2. Then any block's replicas should be on either
|
||||||
|
* {dn1, dn2, d3} or {dn2, dn3, dn4}.
|
||||||
|
*/
|
||||||
|
private void refreshDatanodeAdminProperties(HostsFileWriter hostsFileWriter)
|
||||||
|
throws IOException {
|
||||||
|
DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[
|
||||||
|
hosts.length];
|
||||||
|
for (int i = 0; i < hosts.length; i++) {
|
||||||
|
datanodes[i] = new DatanodeAdminProperties();
|
||||||
|
DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
|
||||||
|
datanodes[i].setHostName(datanodeID.getHostName());
|
||||||
|
datanodes[i].setPort(datanodeID.getXferPort());
|
||||||
|
datanodes[i].setUpgradeDomain(upgradeDomains[i]);
|
||||||
|
}
|
||||||
|
datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
|
||||||
|
datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
|
||||||
|
hostsFileWriter.initIncludeHosts(datanodes);
|
||||||
|
cluster.getFileSystem().refreshNodes();
|
||||||
|
|
||||||
|
expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
|
||||||
|
expectedDatanodeIDs.add(cluster.getDataNodes().get(3).getDatanodeId());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testPlacement() throws Exception {
|
||||||
|
String clientMachine = "127.0.0.1";
|
||||||
|
for (int i = 0; i < 5; i++) {
|
||||||
|
String src = "/test-" + i;
|
||||||
|
// Create the file with client machine
|
||||||
|
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
|
||||||
|
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
|
||||||
|
REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
|
||||||
|
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
|
||||||
|
null, null, fileStatus.getFileId(), null);
|
||||||
|
|
||||||
|
assertEquals("Block should be allocated sufficient locations",
|
||||||
|
REPLICATION_FACTOR, locatedBlock.getLocations().length);
|
||||||
|
Set<DatanodeInfo> locs = new HashSet<>(Arrays.asList(
|
||||||
|
locatedBlock.getLocations()));
|
||||||
|
for (DatanodeID datanodeID : expectedDatanodeIDs) {
|
||||||
|
locs.contains(datanodeID);
|
||||||
|
}
|
||||||
|
|
||||||
|
nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
|
||||||
|
src, clientMachine);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,122 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.HashSet;
|
||||||
|
|
||||||
|
|
||||||
|
import org.apache.commons.io.FileUtils;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.HostConfigManager;
|
||||||
|
import org.apache.hadoop.hdfs.server.blockmanagement.HostFileManager;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
|
public class HostsFileWriter {
|
||||||
|
private FileSystem localFileSys;
|
||||||
|
private Path fullDir;
|
||||||
|
private Path excludeFile;
|
||||||
|
private Path includeFile;
|
||||||
|
private Path combinedFile;
|
||||||
|
private boolean isLegacyHostsFile = false;
|
||||||
|
|
||||||
|
public void initialize(Configuration conf, String dir) throws IOException {
|
||||||
|
localFileSys = FileSystem.getLocal(conf);
|
||||||
|
Path workingDir = new Path(MiniDFSCluster.getBaseDirectory());
|
||||||
|
this.fullDir = new Path(workingDir, dir);
|
||||||
|
assertTrue(localFileSys.mkdirs(this.fullDir));
|
||||||
|
|
||||||
|
if (conf.getClass(
|
||||||
|
DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY,
|
||||||
|
HostFileManager.class, HostConfigManager.class).equals(
|
||||||
|
HostFileManager.class)) {
|
||||||
|
isLegacyHostsFile = true;
|
||||||
|
}
|
||||||
|
if (isLegacyHostsFile) {
|
||||||
|
excludeFile = new Path(fullDir, "exclude");
|
||||||
|
includeFile = new Path(fullDir, "include");
|
||||||
|
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
|
||||||
|
DFSTestUtil.writeFile(localFileSys, includeFile, "");
|
||||||
|
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
|
||||||
|
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
|
||||||
|
} else {
|
||||||
|
combinedFile = new Path(fullDir, "all");
|
||||||
|
conf.set(DFSConfigKeys.DFS_HOSTS, combinedFile.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void initExcludeHost(String hostNameAndPort) throws IOException {
|
||||||
|
if (isLegacyHostsFile) {
|
||||||
|
DFSTestUtil.writeFile(localFileSys, excludeFile, hostNameAndPort);
|
||||||
|
} else {
|
||||||
|
DatanodeAdminProperties dn = new DatanodeAdminProperties();
|
||||||
|
String [] hostAndPort = hostNameAndPort.split(":");
|
||||||
|
dn.setHostName(hostAndPort[0]);
|
||||||
|
dn.setPort(Integer.parseInt(hostAndPort[1]));
|
||||||
|
dn.setAdminState(AdminStates.DECOMMISSIONED);
|
||||||
|
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
||||||
|
allDNs.add(dn);
|
||||||
|
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void initIncludeHosts(String[] hostNameAndPorts) throws IOException {
|
||||||
|
StringBuilder includeHosts = new StringBuilder();
|
||||||
|
if (isLegacyHostsFile) {
|
||||||
|
for(String hostNameAndPort : hostNameAndPorts) {
|
||||||
|
includeHosts.append(hostNameAndPort).append("\n");
|
||||||
|
}
|
||||||
|
DFSTestUtil.writeFile(localFileSys, includeFile,
|
||||||
|
includeHosts.toString());
|
||||||
|
} else {
|
||||||
|
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
||||||
|
for(String hostNameAndPort : hostNameAndPorts) {
|
||||||
|
String[] hostAndPort = hostNameAndPort.split(":");
|
||||||
|
DatanodeAdminProperties dn = new DatanodeAdminProperties();
|
||||||
|
dn.setHostName(hostAndPort[0]);
|
||||||
|
dn.setPort(Integer.parseInt(hostAndPort[1]));
|
||||||
|
allDNs.add(dn);
|
||||||
|
}
|
||||||
|
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void initIncludeHosts(DatanodeAdminProperties[] datanodes)
|
||||||
|
throws IOException {
|
||||||
|
CombinedHostsFileWriter.writeFile(combinedFile.toString(),
|
||||||
|
new HashSet<>(Arrays.asList(datanodes)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void cleanup() throws IOException {
|
||||||
|
if (localFileSys.exists(fullDir)) {
|
||||||
|
FileUtils.deleteQuietly(new File(fullDir.toUri().getPath()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,79 @@
|
|||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileWriter;
|
||||||
|
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeAdminProperties;
|
||||||
|
import org.junit.Before;
|
||||||
|
import org.junit.After;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test for JSON based HostsFileReader
|
||||||
|
*/
|
||||||
|
public class TestCombinedHostsFileReader {
|
||||||
|
|
||||||
|
// Using /test/build/data/tmp directory to store temporary files
|
||||||
|
static final String HOSTS_TEST_DIR = new File(System.getProperty(
|
||||||
|
"test.build.data", "/tmp")).getAbsolutePath();
|
||||||
|
File NEW_FILE = new File(HOSTS_TEST_DIR, "dfs.hosts.new.json");
|
||||||
|
|
||||||
|
static final String TEST_CACHE_DATA_DIR =
|
||||||
|
System.getProperty("test.cache.data", "build/test/cache");
|
||||||
|
File EXISTING_FILE = new File(TEST_CACHE_DATA_DIR, "dfs.hosts.json");
|
||||||
|
|
||||||
|
@Before
|
||||||
|
public void setUp() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
@After
|
||||||
|
public void tearDown() throws Exception {
|
||||||
|
// Delete test file after running tests
|
||||||
|
NEW_FILE.delete();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Load the existing test json file
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testLoadExistingJsonFile() throws Exception {
|
||||||
|
Set<DatanodeAdminProperties> all =
|
||||||
|
CombinedHostsFileReader.readFile(EXISTING_FILE.getAbsolutePath());
|
||||||
|
assertEquals(5, all.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test empty json config file
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testEmptyCombinedHostsFileReader() throws Exception {
|
||||||
|
FileWriter hosts = new FileWriter(NEW_FILE);
|
||||||
|
hosts.write("");
|
||||||
|
hosts.close();
|
||||||
|
Set<DatanodeAdminProperties> all =
|
||||||
|
CombinedHostsFileReader.readFile(NEW_FILE.getAbsolutePath());
|
||||||
|
assertEquals(0, all.size());
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user