Merge trunk into HDFS-7240
This commit is contained in:
commit
3dcbbdc1d1
269
LICENSE.txt
269
LICENSE.txt
|
@ -379,3 +379,272 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The binary distribution of this product bundles binaries of leveldbjni
|
||||
(https://github.com/fusesource/leveldbjni), which is available under the
|
||||
following license:
|
||||
|
||||
Copyright (c) 2011 FuseSource Corp. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of FuseSource Corp. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The binary distribution of this product bundles binaries of leveldb
|
||||
(http://code.google.com/p/leveldb/), which is available under the following
|
||||
license:
|
||||
|
||||
Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
The binary distribution of this product bundles binaries of snappy
|
||||
(http://code.google.com/p/snappy/), which is available under the following
|
||||
license:
|
||||
|
||||
Copyright 2011, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
For:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.js
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dataTables.bootstrap.css
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery.dataTables.min.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/
|
||||
--------------------------------------------------------------------------------
|
||||
Copyright (C) 2008-2016, SpryMedia Ltd.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-full-2.0.0.min.js
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2010 Aleksander Williams
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
For:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2011-2016 Tim Wood, Iskren Chernev, Moment.js contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/bootstrap-3.0.2
|
||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2011-2016 Twitter, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
For:
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/jquery-1.10.2.min.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright jQuery Foundation and other contributors, https://jquery.org/
|
||||
|
||||
This software consists of voluntary contributions made by many
|
||||
individuals. For exact contribution history, see the revision history
|
||||
available at https://github.com/jquery/jquery
|
||||
|
||||
The following license applies to all parts of this software except as
|
||||
documented below:
|
||||
|
||||
====
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
====
|
||||
|
||||
All files located in the node_modules and external directories are
|
||||
externally maintained libraries used by this software which have their
|
||||
own licenses; we recommend you read them, as their terms may differ from
|
||||
the terms above.
|
||||
|
||||
For:
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js.gz
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2014 Ivan Bozhanov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For:
|
||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3.v3.js
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
D3 is available under a 3-clause BSD license. For details, see:
|
||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/d3-LICENSE
|
||||
|
|
15
NOTICE.txt
15
NOTICE.txt
|
@ -1,2 +1,17 @@
|
|||
This product includes software developed by The Apache Software
|
||||
Foundation (http://www.apache.org/).
|
||||
|
||||
The binary distribution of this product bundles binaries of
|
||||
org.iq80.leveldb:leveldb-api (https://github.com/dain/leveldb), which has the
|
||||
following notices:
|
||||
* Copyright 2011 Dain Sundstrom <dain@iq80.com>
|
||||
* Copyright 2011 FuseSource Corp. http://fusesource.com
|
||||
|
||||
The binary distribution of this product bundles binaries of
|
||||
org.fusesource.hawtjni:hawtjni-runtime (https://github.com/fusesource/hawtjni),
|
||||
which has the following notices:
|
||||
* This product includes software developed by FuseSource Corp.
|
||||
http://fusesource.com
|
||||
* This product includes software developed at
|
||||
Progress Software Corporation and/or its subsidiaries or affiliates.
|
||||
* This product includes software developed by IBM Corporation and others.
|
||||
|
|
|
@ -0,0 +1,623 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -lt 3 ]] \
|
||||
|| [[ "${BASH_VERSINFO[0]}" -eq 3 && "${BASH_VERSINFO[1]}" -lt 2 ]]; then
|
||||
echo "bash v3.2+ is required. Sorry."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function centered_text
|
||||
{
|
||||
local text="$*"
|
||||
local spacing=$(( (75+${#text}) /2 ))
|
||||
printf "%*s\n" ${spacing} "${text}"
|
||||
}
|
||||
|
||||
function big_console_header
|
||||
{
|
||||
printf "\n\n"
|
||||
echo "****************************************************************************"
|
||||
centered_text "${@}"
|
||||
echo "****************************************************************************"
|
||||
printf "\n\n"
|
||||
}
|
||||
|
||||
## @description Given a filename or dir, return the absolute version of it
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @param directory
|
||||
## @replaceable no
|
||||
## @return 0 success
|
||||
## @return 1 failure
|
||||
## @return stdout abspath
|
||||
function hadoop_abs
|
||||
{
|
||||
declare obj=$1
|
||||
declare dir
|
||||
declare fn
|
||||
|
||||
if [[ ! -e ${obj} ]]; then
|
||||
return 1
|
||||
elif [[ -d ${obj} ]]; then
|
||||
dir=${obj}
|
||||
else
|
||||
dir=$(dirname -- "${obj}")
|
||||
fn=$(basename -- "${obj}")
|
||||
fn="/${fn}"
|
||||
fi
|
||||
|
||||
dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
|
||||
if [[ $? = 0 ]]; then
|
||||
echo "${dir}${fn}"
|
||||
return 0
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
## @description Print a message to stderr
|
||||
## @audience public
|
||||
## @stability stable
|
||||
## @replaceable no
|
||||
## @param string
|
||||
function hadoop_error
|
||||
{
|
||||
echo "$*" 1>&2
|
||||
}
|
||||
|
||||
|
||||
function run_and_redirect
|
||||
{
|
||||
declare logfile=$1
|
||||
shift
|
||||
declare res
|
||||
|
||||
echo "\$ ${*} > ${logfile} 2>&1"
|
||||
# to the log
|
||||
{
|
||||
date
|
||||
echo "cd $(pwd)"
|
||||
echo "${*}"
|
||||
} > "${logfile}"
|
||||
# run the actual command
|
||||
"${@}" >> "${logfile}" 2>&1
|
||||
res=$?
|
||||
if [[ ${res} != 0 ]]; then
|
||||
echo
|
||||
echo "Failed!"
|
||||
echo
|
||||
exit "${res}"
|
||||
fi
|
||||
}
|
||||
|
||||
function hadoop_native_flags
|
||||
{
|
||||
|
||||
# modified version of the Yetus personality
|
||||
|
||||
if [[ ${NATIVE} != true ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
# Based upon HADOOP-11937
|
||||
#
|
||||
# Some notes:
|
||||
#
|
||||
# - getting fuse to compile on anything but Linux
|
||||
# is always tricky.
|
||||
# - Darwin assumes homebrew is in use.
|
||||
# - HADOOP-12027 required for bzip2 on OS X.
|
||||
# - bzip2 is broken in lots of places.
|
||||
# e.g, HADOOP-12027 for OS X. so no -Drequire.bzip2
|
||||
#
|
||||
|
||||
case "${OSNAME}" in
|
||||
Linux)
|
||||
# shellcheck disable=SC2086
|
||||
echo -Pnative -Drequire.snappy -Drequire.openssl -Drequire.fuse
|
||||
;;
|
||||
Darwin)
|
||||
echo \
|
||||
-Pnative -Drequire.snappy \
|
||||
-Drequire.openssl \
|
||||
-Dopenssl.prefix=/usr/local/opt/openssl/ \
|
||||
-Dopenssl.include=/usr/local/opt/openssl/include \
|
||||
-Dopenssl.lib=/usr/local/opt/openssl/lib
|
||||
;;
|
||||
*)
|
||||
# shellcheck disable=SC2086
|
||||
echo \
|
||||
-Pnative \
|
||||
-Drequire.snappy -Drequire.openssl \
|
||||
-Drequire.test.libhadoop
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Function to probe the exit code of the script commands,
|
||||
# and stop in the case of failure with an contextual error
|
||||
# message.
|
||||
function run()
|
||||
{
|
||||
declare res
|
||||
declare logfile
|
||||
|
||||
echo "\$ ${*}"
|
||||
"${@}"
|
||||
res=$?
|
||||
if [[ ${res} != 0 ]]; then
|
||||
echo
|
||||
echo "Failed!"
|
||||
echo
|
||||
exit "${res}"
|
||||
fi
|
||||
}
|
||||
|
||||
function domd5()
|
||||
{
|
||||
run "${MD5SUM}" "${1}" > "${1}.md5"
|
||||
}
|
||||
|
||||
function header()
|
||||
{
|
||||
echo
|
||||
printf "\n\n"
|
||||
echo "============================================================================"
|
||||
echo "============================================================================"
|
||||
centered_text "Hadoop Release Creator"
|
||||
echo "============================================================================"
|
||||
echo "============================================================================"
|
||||
printf "\n\n"
|
||||
echo "Version to create : ${HADOOP_VERSION}"
|
||||
echo "Release Candidate Label: ${RC_LABEL##-}"
|
||||
echo "Source Version : ${DEFAULT_HADOOP_VERSION}"
|
||||
printf "\n\n"
|
||||
}
|
||||
|
||||
function set_defaults
|
||||
{
|
||||
BINDIR=$(dirname "${BIN}")
|
||||
BASEDIR=$(hadoop_abs "${BINDIR}/../..")
|
||||
|
||||
ARTIFACTS_DIR="${BASEDIR}/target/artifacts"
|
||||
|
||||
# Extract Hadoop version from ${BASEDIR}/pom.xml
|
||||
DEFAULT_HADOOP_VERSION=$(grep "<version>" "${BASEDIR}/pom.xml" \
|
||||
| head -1 \
|
||||
| sed -e 's|^ *<version>||' -e 's|</version>.*$||')
|
||||
|
||||
DOCKER=false
|
||||
DOCKERCACHE=false
|
||||
DOCKERFILE="${BASEDIR}/dev-support/docker/Dockerfile"
|
||||
DOCKERRAN=false
|
||||
|
||||
# Extract Java version from ${BASEDIR}/pom.xml
|
||||
# doing this outside of maven means we can do this before
|
||||
# the docker container comes up...
|
||||
JVM_VERSION=$(grep "<javac.version>" "${BASEDIR}/hadoop-project/pom.xml" \
|
||||
| head -1 \
|
||||
| sed -e 's|^ *<javac.version>||' -e 's|</javac.version>.*$||' -e 's|..||')
|
||||
|
||||
GIT=$(command -v git)
|
||||
|
||||
GPG=$(command -v gpg)
|
||||
GPGAGENT=$(command -v gpg-agent)
|
||||
|
||||
HADOOP_VERSION="${DEFAULT_HADOOP_VERSION}"
|
||||
|
||||
INDOCKER=false
|
||||
|
||||
LOGDIR="${BASEDIR}/patchprocess"
|
||||
|
||||
if [[ -z "${MVN}" ]]; then
|
||||
if [[ -n "${MAVEN_HOME}" ]]; then
|
||||
MVN=${MAVEN_HOME}/bin/mvn
|
||||
else
|
||||
MVN=$(command -v mvn)
|
||||
fi
|
||||
fi
|
||||
|
||||
MD5SUM=$(command -v md5sum)
|
||||
if [[ -z "${MD5SUM}" ]]; then
|
||||
MD5SUM=$(command -v md5)
|
||||
fi
|
||||
|
||||
NATIVE=false
|
||||
OSNAME=$(uname -s)
|
||||
|
||||
PUBKEYFILE="https://dist.apache.org/repos/dist/release/hadoop/common/KEYS"
|
||||
}
|
||||
|
||||
function startgpgagent
|
||||
{
|
||||
if [[ "${SIGN}" = true ]]; then
|
||||
if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
|
||||
echo "starting gpg agent"
|
||||
touch "${LOGDIR}/gpgagent.conf"
|
||||
eval $("${GPGAGENT}" --daemon \
|
||||
--options "${LOGDIR}/gpgagent.conf" \
|
||||
--log-file=${LOGDIR}/create-release-gpgagent.log)
|
||||
GPGAGENTPID=$(echo ${GPG_AGENT_INFO} | cut -f 2 -d:)
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function stopgpgagent
|
||||
{
|
||||
if [[ -n "${GPGAGENTPID}" ]]; then
|
||||
kill ${GPGAGENTPID}
|
||||
fi
|
||||
}
|
||||
|
||||
function usage
|
||||
{
|
||||
echo "--artifactsdir=[path] Path to use to store release bits"
|
||||
echo "--asfrelease Make an ASF release"
|
||||
echo "--docker Use Hadoop's Dockerfile for guaranteed environment"
|
||||
echo "--dockercache Use a Docker-private maven cache"
|
||||
echo "--logdir=[path] Path to store logs"
|
||||
echo "--mvncache=[path] Path to the maven cache to use"
|
||||
echo "--native Also build the native components"
|
||||
echo "--rc-label=[label] Add this label to the builds"
|
||||
echo "--sign Use .gnupg dir to sign the jars"
|
||||
echo "--version=[version] Use an alternative version string"
|
||||
}
|
||||
|
||||
function option_parse
|
||||
{
|
||||
declare i
|
||||
|
||||
for i in "$@"; do
|
||||
case ${i} in
|
||||
--asfrelease)
|
||||
ASFRELEASE=true
|
||||
NATIVE=true
|
||||
SIGN=true
|
||||
;;
|
||||
--artifactsdir=*)
|
||||
ARTIFACTS_DIR=${i#*=}
|
||||
;;
|
||||
--docker)
|
||||
DOCKER=true
|
||||
;;
|
||||
--dockercache)
|
||||
DOCKERCACHE=true
|
||||
;;
|
||||
--help)
|
||||
usage
|
||||
exit
|
||||
;;
|
||||
--indocker)
|
||||
INDOCKER=true
|
||||
;;
|
||||
--logdir=*)
|
||||
LOGDIR=${i#*=}
|
||||
;;
|
||||
--mvncache=*)
|
||||
MVNCACHE=${i#*=}
|
||||
;;
|
||||
--native)
|
||||
NATIVE=true
|
||||
;;
|
||||
--rc-label=*)
|
||||
RC_LABEL=${i#*=}
|
||||
;;
|
||||
--sign)
|
||||
SIGN=true
|
||||
;;
|
||||
--version=*)
|
||||
HADOOP_VERSION=${i#*=}
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ! -d "${HOME}/.gnupg" ]]; then
|
||||
hadoop_error "ERROR: No .gnupg dir. Disabling signing capability."
|
||||
SIGN=false
|
||||
fi
|
||||
|
||||
DOCKERCMD=$(command -v docker)
|
||||
if [[ "${DOCKER}" = true && -z "${DOCKERCMD}" ]]; then
|
||||
hadoop_error "ERROR: docker binary not found. Disabling docker mode."
|
||||
DOCKER=false
|
||||
fi
|
||||
|
||||
if [[ "${DOCKERCACHE}" = true && "${DOCKER}" = false ]]; then
|
||||
if [[ "${INDOCKER}" = false ]]; then
|
||||
hadoop_error "ERROR: docker mode not enabled. Disabling dockercache."
|
||||
fi
|
||||
DOCKERCACHE=false
|
||||
fi
|
||||
|
||||
if [[ "${DOCKERCACHE}" = true && -n "${MVNCACHE}" ]]; then
|
||||
hadoop_error "ERROR: Cannot set --mvncache and --dockercache simultaneously."
|
||||
exit 1
|
||||
else
|
||||
MVNCACHE=${MVNCACHE:-"${HOME}/.m2"}
|
||||
fi
|
||||
|
||||
if [[ "${ASFRELEASE}" = true ]]; then
|
||||
if [[ "${SIGN}" = false ]]; then
|
||||
hadoop_error "ERROR: --asfrelease requires --sign. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${OSNAME}" = Linux ]]; then
|
||||
if [[ "${DOCKER}" = false && "${INDOCKER}" = false ]]; then
|
||||
hadoop_error "ERROR: --asfrelease requires --docker on Linux. Exiting."
|
||||
exit 1
|
||||
elif [[ "${DOCKERCACHE}" = false && "${INDOCKER}" = false ]]; then
|
||||
hadoop_error "ERROR: --asfrelease on Linux requires --dockercache. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${MVNCACHE}" ]]; then
|
||||
mkdir -p "${MVNCACHE}"
|
||||
if [[ -d "${MVNCACHE}" ]]; then
|
||||
MVN_ARGS=("-Dmaven.repo.local=${MVNCACHE}")
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function dockermode
|
||||
{
|
||||
declare lines
|
||||
declare -a modp
|
||||
declare imgname
|
||||
declare -a extrad
|
||||
declare user_name
|
||||
declare group_id
|
||||
|
||||
if [[ "${DOCKER}" != true ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
user_name=${SUDO_USER:=$USER}
|
||||
user_id=$(id -u "${user_name}")
|
||||
group_id=$(id -g "${user_name}")
|
||||
|
||||
imgname="hadoop/createrelease:${HADOOP_VERSION}_${RANDOM}"
|
||||
|
||||
if [[ -d "${HOME}/.gnupg" ]]; then
|
||||
extrad+=("-v" "${HOME}/.gnupg:/home/${user_name}/.gnupg")
|
||||
fi
|
||||
|
||||
if [[ -n "${LOGDIR}" ]]; then
|
||||
if [[ ! -d "${LOGDIR}" ]]; then
|
||||
mkdir -p "${LOGDIR}"
|
||||
fi
|
||||
lines=$(hadoop_abs "${LOGDIR}")
|
||||
extrad+=("-v" "${lines}:${lines}")
|
||||
fi
|
||||
|
||||
if [[ -n "${ARTIFACTS_DIR}" ]]; then
|
||||
if [[ ! -d "${ARTIFACTS_DIR}" ]]; then
|
||||
mkdir -p "${ARTIFACTS_DIR}"
|
||||
fi
|
||||
lines=$(hadoop_abs "${ARTIFACTS_DIR}")
|
||||
extrad+=("-v" "${lines}:${lines}")
|
||||
fi
|
||||
|
||||
if [[ "${DOCKERCACHE}" = true ]]; then
|
||||
modp+=("--mvncache=/maven")
|
||||
else
|
||||
lines=$(hadoop_abs "${MVNCACHE}")
|
||||
extrad+=("-v" "${lines}:${lines}")
|
||||
fi
|
||||
|
||||
for lines in "${PARAMS[@]}"; do
|
||||
if [[ "${lines}" != "--docker" ]]; then
|
||||
modp+=("$lines")
|
||||
fi
|
||||
done
|
||||
|
||||
modp+=("--indocker")
|
||||
|
||||
(
|
||||
lines=$(grep -n 'YETUS CUT HERE' "${DOCKERFILE}" | cut -f1 -d:)
|
||||
if [[ -z "${lines}" ]]; then
|
||||
cat "${DOCKERFILE}"
|
||||
else
|
||||
head -n "${lines}" "${DOCKERFILE}"
|
||||
fi
|
||||
# make sure we put some space between, just in case last
|
||||
# line isn't an empty line or whatever
|
||||
printf "\n\n"
|
||||
echo "RUN groupadd --non-unique -g ${group_id} ${user_name}"
|
||||
echo "RUN useradd -g ${group_id} -u ${user_id} -m ${user_name}"
|
||||
echo "RUN chown -R ${user_name} /home/${user_name}"
|
||||
echo "ENV HOME /home/${user_name}"
|
||||
echo "RUN mkdir -p /maven"
|
||||
echo "RUN chown -R ${user_name} /maven"
|
||||
|
||||
# we always force build with the Oracle JDK
|
||||
# but with the correct version
|
||||
echo "ENV JAVA_HOME /usr/lib/jvm/java-${JVM_VERSION}-oracle"
|
||||
echo "USER ${user_name}"
|
||||
printf "\n\n"
|
||||
) | docker build -t "${imgname}" -
|
||||
|
||||
run docker run -i -t \
|
||||
--privileged \
|
||||
"${extrad[@]}" \
|
||||
-v "${BASEDIR}:/build/source" \
|
||||
-u "${user_name}" \
|
||||
-w "/build/source" \
|
||||
"${imgname}" \
|
||||
"/build/source/dev-support/bin/create-release" "${modp[@]}"
|
||||
|
||||
DOCKERRAN=true
|
||||
}
|
||||
|
||||
function makearelease
|
||||
{
|
||||
# let's start at the root
|
||||
run cd "${BASEDIR}"
|
||||
|
||||
big_console_header "Cleaning the Source Tree"
|
||||
|
||||
# git clean to clear any remnants from previous build
|
||||
run "${GIT}" clean -xdf
|
||||
|
||||
mkdir -p "${LOGDIR}"
|
||||
|
||||
# mvn clean for sanity
|
||||
run_and_redirect "${LOGDIR}/mvn_clean.log" "${MVN}" "${MVN_ARGS[@]}" clean
|
||||
|
||||
# Create staging dir for release artifacts
|
||||
run mkdir -p "${ARTIFACTS_DIR}"
|
||||
|
||||
big_console_header "Apache RAT Check"
|
||||
|
||||
# Create RAT report
|
||||
run_and_redirect "${LOGDIR}/mvn_apache_rat.log" "${MVN}" "${MVN_ARGS[@]}" apache-rat:check
|
||||
|
||||
big_console_header "Maven Build and Install"
|
||||
|
||||
# Create SRC and BIN tarballs for release,
|
||||
# Using 'install’ goal instead of 'package' so artifacts are available
|
||||
# in the Maven local cache for the site generation
|
||||
#
|
||||
# shellcheck disable=SC2046
|
||||
run_and_redirect "${LOGDIR}/mvn_install.log" \
|
||||
"${MVN}" "${MVN_ARGS[@]}" install -Pdist,src \
|
||||
-DskipTests -Dtar $(hadoop_native_flags)
|
||||
|
||||
big_console_header "Maven Site"
|
||||
|
||||
# Create site for release
|
||||
run_and_redirect "${LOGDIR}/mvn_site.log" "${MVN}" "${MVN_ARGS[@]}" site site:stage -Pdist,src,releasedocs
|
||||
|
||||
big_console_header "Staging the release"
|
||||
|
||||
run mv "${BASEDIR}/target/staging/hadoop-project" "${BASEDIR}/target/r${HADOOP_VERSION}/"
|
||||
run cd "${BASEDIR}/target/"
|
||||
run tar czpf "hadoop-site-${HADOOP_VERSION}.tar.gz" "r${HADOOP_VERSION}"/*
|
||||
run cd "${BASEDIR}"
|
||||
|
||||
# Stage RAT report
|
||||
#shellcheck disable=SC2038
|
||||
find . -name rat.txt | xargs -I% cat % > "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-rat.txt"
|
||||
|
||||
# Stage CHANGES and RELEASENOTES files
|
||||
for i in CHANGES RELEASENOTES; do
|
||||
run cp -p \
|
||||
"${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}"/${i}*.md \
|
||||
"${ARTIFACTS_DIR}/${i}.md"
|
||||
done
|
||||
|
||||
# Prepare and stage BIN tarball
|
||||
run cd "${BASEDIR}/hadoop-dist/target/"
|
||||
run tar -xzpf "hadoop-${HADOOP_VERSION}.tar.gz"
|
||||
run cp -r "${BASEDIR}/target/r${HADOOP_VERSION}"/* "hadoop-${HADOOP_VERSION}/share/doc/hadoop/"
|
||||
run tar -czpf "hadoop-${HADOOP_VERSION}.tar.gz" "hadoop-${HADOOP_VERSION}"
|
||||
run cd "${BASEDIR}"
|
||||
run mv \
|
||||
"${BASEDIR}/hadoop-dist/target/hadoop-${HADOOP_VERSION}.tar.gz" \
|
||||
"${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz"
|
||||
|
||||
# Stage SRC tarball
|
||||
run mv \
|
||||
"${BASEDIR}/hadoop-dist/target/hadoop-${HADOOP_VERSION}-src.tar.gz" \
|
||||
"${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz"
|
||||
|
||||
# Stage SITE tarball
|
||||
run mv \
|
||||
"${BASEDIR}/target/hadoop-site-${HADOOP_VERSION}.tar.gz" \
|
||||
"${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-site.tar.gz"
|
||||
}
|
||||
|
||||
function signartifacts
|
||||
{
|
||||
declare i
|
||||
|
||||
if [[ "${SIGN}" = false ]]; then
|
||||
for i in ${ARTIFACTS_DIR}/*; do
|
||||
domd5 "${i}"
|
||||
done
|
||||
echo ""
|
||||
echo "Remember to sign the artifacts before staging them on the open"
|
||||
echo ""
|
||||
return
|
||||
fi
|
||||
|
||||
big_console_header "Signing the release"
|
||||
|
||||
for i in ${ARTIFACTS_DIR}/*; do
|
||||
gpg --use-agent --armor --output "${i}.asc" --detach-sig "${i}"
|
||||
gpg --print-mds "${i}" > "${i}.mds"
|
||||
domd5 "${i}"
|
||||
done
|
||||
|
||||
if [[ "${ASFRELEASE}" = true ]]; then
|
||||
echo "Fetching the Apache Hadoop KEYS file..."
|
||||
curl -L "${PUBKEYFILE}" -o "${BASEDIR}/target/KEYS"
|
||||
gpg --import --trustdb "${BASEDIR}/target/testkeysdb" "${BASEDIR}/target/KEYS"
|
||||
gpg --verify --trustdb "${BASEDIR}/target/testkeysdb" \
|
||||
"${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz.asc" \
|
||||
"${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz"
|
||||
if [[ $? != 0 ]]; then
|
||||
hadoop_error "ERROR: GPG key is not present in ${PUBKEYFILE}."
|
||||
hadoop_error "ERROR: This MUST be fixed. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# find root of the source tree
|
||||
BIN=$(hadoop_abs "${BASH_SOURCE:-$0}")
|
||||
PARAMS=("$@")
|
||||
|
||||
set_defaults
|
||||
|
||||
option_parse "${PARAMS[@]}"
|
||||
|
||||
dockermode
|
||||
|
||||
header
|
||||
|
||||
if [[ -n ${RC_LABEL} ]]; then
|
||||
RC_LABEL="-${RC_LABEL}"
|
||||
fi
|
||||
|
||||
if [[ "${INDOCKER}" = true || "${DOCKERRAN}" = false ]]; then
|
||||
|
||||
startgpgagent
|
||||
|
||||
makearelease
|
||||
|
||||
signartifacts
|
||||
|
||||
stopgpgagent
|
||||
fi
|
||||
|
||||
if [[ "${INDOCKER}" = true ]]; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
if [[ $? == 0 ]]; then
|
||||
echo
|
||||
echo "Congratulations, you have successfully built the release"
|
||||
echo "artifacts for Apache Hadoop ${HADOOP_VERSION}${RC_LABEL}"
|
||||
echo
|
||||
echo "The artifacts for this run are available at ${ARTIFACTS_DIR}:"
|
||||
run ls -1 "${ARTIFACTS_DIR}"
|
||||
|
||||
echo
|
||||
fi
|
|
@ -0,0 +1,164 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Bundle a native library if requested. Exit 1 in case error happens.
|
||||
# Usage: bundle_native_lib bundleoption liboption libpattern libdir
|
||||
function bundle_native_lib()
|
||||
{
|
||||
declare bundleoption="$1"
|
||||
declare liboption="$2"
|
||||
declare libpattern="$3"
|
||||
declare libdir="$4"
|
||||
|
||||
|
||||
echo "Checking to bundle with:"
|
||||
echo "bundleoption=${bundleoption}, liboption=${liboption}, pattern=${libpattern} libdir=${libdir}"
|
||||
|
||||
if [[ "${bundleoption}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -z "${libdir}" ]] || [[ ! -d "${libdir}" ]]; then
|
||||
echo "The required option ${liboption} isn't given or invalid. Bundling the lib failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${libdir}" || exit 1
|
||||
${TAR} ./*"${libpattern}"* | (cd "${TARGET_DIR}"/ || exit 1; ${UNTAR})
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Bundling library with ${liboption} failed "
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function bundle_native_bin
|
||||
{
|
||||
declare bundleoption="$1"
|
||||
declare libbundle="$2"
|
||||
declare binoption="$3"
|
||||
declare binpattern="$4"
|
||||
declare libdir="$5"
|
||||
|
||||
echo "Checking to bundle with:"
|
||||
echo "bundleoption=${bundleoption}, libbundle=${libbundle}, binoption=${binoption}, libdir=${libdir}, binpattern=${binpattern}"
|
||||
|
||||
|
||||
if [[ "${bundleoption}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ "${libbundle}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ -z "${libdir}" ]] || [[ ! -d "${libdir}" ]]; then
|
||||
echo "The required option ${liboption} isn't given or invalid. Bundling the lib failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd "${libdir}" || exit 1
|
||||
${TAR} ./*"${libpattern}"* | (cd "${TARGET_BIN_DIR}"/ || exit 1 ; ${UNTAR})
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Bundling bin files for ${binoption} failed"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
for i in "$@"; do
|
||||
case "${i}" in
|
||||
--version=*)
|
||||
VERSION=${i#*=}
|
||||
;;
|
||||
--artifactid=*)
|
||||
ARTIFACTID=${i#*=}
|
||||
;;
|
||||
--builddir=*)
|
||||
BUILD_DIR=${i#*=}
|
||||
;;
|
||||
--isallib=*)
|
||||
ISALLIB=${i#*=}
|
||||
;;
|
||||
--isalbundle=*)
|
||||
ISALBUNDLE=${i#*=}
|
||||
;;
|
||||
--opensslbinbundle=*)
|
||||
OPENSSLBINBUNDLE=${i#*=}
|
||||
;;
|
||||
--openssllib=*)
|
||||
OPENSSLLIB=${i#*=}
|
||||
;;
|
||||
--openssllibbundle=*)
|
||||
OPENSSLLIBBUNDLE=${i#*=}
|
||||
;;
|
||||
--snappybinbundle=*)
|
||||
SNAPPYBINBUNDLE=${i#*=}
|
||||
;;
|
||||
--snappylib=*)
|
||||
SNAPPYLIB=${i#*=}
|
||||
;;
|
||||
--snappylibbundle=*)
|
||||
SNAPPYLIBBUNDLE=${i#*=}
|
||||
;;
|
||||
|
||||
esac
|
||||
done
|
||||
|
||||
TAR='tar cf -'
|
||||
UNTAR='tar xfBp -'
|
||||
LIB_DIR="${BUILD_DIR}/native/target/usr/local/lib"
|
||||
BIN_DIR="${BUILD_DIR}/bin"
|
||||
TARGET_DIR="${BUILD_DIR}/${ARTIFACTID}-${VERSION}/lib/native"
|
||||
TARGET_BIN_DIR="${BUILD_DIR}/${ARTIFACTID}-${VERSION}/bin"
|
||||
|
||||
|
||||
# Most systems
|
||||
|
||||
if [[ -d "${LIB_DIR}" ]]; then
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
cd "${LIB_DIR}" || exit 1
|
||||
${TAR} lib* | (cd "${TARGET_DIR}"/ || exit 1; ${UNTAR})
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Bundling lib files failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bundle_native_lib "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}"
|
||||
|
||||
bundle_native_lib "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}"
|
||||
|
||||
bundle_native_lib "${ISALBUNDLE}" "isal.lib" "isa" "${ISALLIB}"
|
||||
fi
|
||||
|
||||
# Windows
|
||||
|
||||
# Windows doesn't have a LIB_DIR, everything goes into bin
|
||||
|
||||
if [[ -d "${BIN_DIR}" ]] ; then
|
||||
mkdir -p "${TARGET_BIN_DIR}"
|
||||
cd "${BIN_DIR}" || exit 1
|
||||
${TAR} ./* | (cd "${TARGET_BIN_DIR}"/ || exit 1; ${UNTAR})
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Bundling bin files failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bundle_native_bin "${SNAPPYBINBUNDLE}" "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}"
|
||||
|
||||
bundle_native_bin "${OPENSSLBINBUNDLE}" "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}"
|
||||
|
||||
fi
|
|
@ -127,6 +127,8 @@ run copy "${ROOT}/hadoop-common-project/hadoop-common/target/hadoop-common-${VER
|
|||
run copy "${ROOT}/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-client/target/hadoop-hdfs-client-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-native-client/target/hadoop-hdfs-native-client-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-yarn-project/target/hadoop-yarn-project-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-mapreduce-project/target/hadoop-mapreduce-${VERSION}" .
|
||||
run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERSION}" .
|
||||
|
|
|
@ -1,144 +0,0 @@
|
|||
#!/bin/bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Function to probe the exit code of the script commands,
|
||||
# and stop in the case of failure with an contextual error
|
||||
# message.
|
||||
run() {
|
||||
echo "\$ ${@}"
|
||||
"${@}"
|
||||
exitCode=$?
|
||||
if [[ $exitCode != 0 ]]; then
|
||||
echo
|
||||
echo "Failed! running ${@} in `pwd`"
|
||||
echo
|
||||
exit $exitCode
|
||||
fi
|
||||
}
|
||||
|
||||
doMD5() {
|
||||
MD5CMD="md5sum"
|
||||
which $MD5CMD
|
||||
if [[ $? != 0 ]]; then
|
||||
MD5CMD="md5"
|
||||
fi
|
||||
run $MD5CMD ${1} > ${1}.md5
|
||||
}
|
||||
|
||||
# If provided, the created release artifacts will be tagged with it
|
||||
# (use RC#, i.e: RC0). Do not use a label to create the final release
|
||||
# artifact.
|
||||
RC_LABEL=$1
|
||||
|
||||
# Extract Hadoop version from POM
|
||||
HADOOP_VERSION=`cat pom.xml | grep "<version>" | head -1 | sed 's|^ *<version>||' | sed 's|</version>.*$||'`
|
||||
|
||||
# Setup git
|
||||
GIT=${GIT:-git}
|
||||
|
||||
echo
|
||||
echo "*****************************************************************"
|
||||
echo
|
||||
echo "Hadoop version to create release artifacts: ${HADOOP_VERSION}"
|
||||
echo
|
||||
echo "Release Candidate Label: ${RC_LABEL}"
|
||||
echo
|
||||
echo "*****************************************************************"
|
||||
echo
|
||||
|
||||
if [[ ! -z ${RC_LABEL} ]]; then
|
||||
RC_LABEL="-${RC_LABEL}"
|
||||
fi
|
||||
|
||||
# Get Maven command
|
||||
if [ -z "$MAVEN_HOME" ]; then
|
||||
MVN=mvn
|
||||
else
|
||||
MVN=$MAVEN_HOME/bin/mvn
|
||||
fi
|
||||
|
||||
ARTIFACTS_DIR="target/artifacts"
|
||||
|
||||
# git clean to clear any remnants from previous build
|
||||
run ${GIT} clean -xdf
|
||||
|
||||
# mvn clean for sanity
|
||||
run ${MVN} clean
|
||||
|
||||
# Create staging dir for release artifacts
|
||||
run mkdir -p ${ARTIFACTS_DIR}
|
||||
|
||||
# Create RAT report
|
||||
run ${MVN} apache-rat:check
|
||||
|
||||
# Create SRC and BIN tarballs for release,
|
||||
# Using 'install’ goal instead of 'package' so artifacts are available
|
||||
# in the Maven local cache for the site generation
|
||||
run ${MVN} install -Pdist,src,native -DskipTests -Dtar
|
||||
|
||||
# Create site for release
|
||||
run ${MVN} site site:stage -Pdist -Psrc
|
||||
run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn
|
||||
run mkdir -p target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce
|
||||
run cp ./hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
|
||||
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-common/
|
||||
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-hdfs/
|
||||
run cp ./hadoop-yarn-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-yarn/
|
||||
run cp ./hadoop-mapreduce-project/CHANGES.txt target/staging/hadoop-project/hadoop-project-dist/hadoop-mapreduce/
|
||||
run mv target/staging/hadoop-project target/r${HADOOP_VERSION}/
|
||||
run cd target/
|
||||
run tar czf hadoop-site-${HADOOP_VERSION}.tar.gz r${HADOOP_VERSION}/*
|
||||
run cd ..
|
||||
|
||||
# Stage RAT report
|
||||
find . -name rat.txt | xargs -I% cat % > ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-rat.txt
|
||||
|
||||
# Stage CHANGES.txt files
|
||||
run cp ./hadoop-common-project/hadoop-common/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-COMMON-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||
run cp ./hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-HDFS-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||
run cp ./hadoop-mapreduce-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-MAPREDUCE-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||
run cp ./hadoop-yarn-project/CHANGES.txt ${ARTIFACTS_DIR}/CHANGES-YARN-${HADOOP_VERSION}${RC_LABEL}.txt
|
||||
|
||||
# Prepare and stage BIN tarball
|
||||
run cd hadoop-dist/target/
|
||||
run tar -xzf hadoop-${HADOOP_VERSION}.tar.gz
|
||||
run cp -r ../../target/r${HADOOP_VERSION}/* hadoop-${HADOOP_VERSION}/share/doc/hadoop/
|
||||
run tar -czf hadoop-${HADOOP_VERSION}.tar.gz hadoop-${HADOOP_VERSION}
|
||||
run cd ../..
|
||||
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz
|
||||
|
||||
# Stage SRC tarball
|
||||
run mv hadoop-dist/target/hadoop-${HADOOP_VERSION}-src.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz
|
||||
|
||||
# Stage SITE tarball
|
||||
run mv target/hadoop-site-${HADOOP_VERSION}.tar.gz ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-site.tar.gz
|
||||
|
||||
# MD5 SRC and BIN tarballs
|
||||
doMD5 ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz
|
||||
doMD5 ${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}-src.tar.gz
|
||||
|
||||
run cd ${ARTIFACTS_DIR}
|
||||
ARTIFACTS_DIR=`pwd`
|
||||
echo
|
||||
echo "Congratulations, you have successfully built the release"
|
||||
echo "artifacts for Apache Hadoop ${HADOOP_VERSION}${RC_LABEL}"
|
||||
echo
|
||||
echo "The artifacts for this run are available at ${ARTIFACTS_DIR}:"
|
||||
run ls -1 ${ARTIFACTS_DIR}
|
||||
echo
|
||||
echo "Remember to sign them before staging them on the open"
|
||||
echo
|
|
@ -23,23 +23,45 @@ FROM ubuntu:trusty
|
|||
|
||||
WORKDIR /root
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
ENV DEBCONF_TERSE true
|
||||
|
||||
######
|
||||
# Install common dependencies from packages
|
||||
######
|
||||
RUN apt-get update && apt-get install --no-install-recommends -y \
|
||||
git curl ant make maven \
|
||||
cmake gcc g++ \
|
||||
protobuf-compiler libprotoc-dev \
|
||||
protobuf-c-compiler libprotobuf-dev \
|
||||
build-essential libtool \
|
||||
zlib1g-dev pkg-config libssl-dev \
|
||||
snappy libsnappy-dev \
|
||||
bzip2 libbz2-dev \
|
||||
libjansson-dev \
|
||||
fuse libfuse-dev \
|
||||
RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
|
||||
ant \
|
||||
build-essential \
|
||||
bzip2 \
|
||||
cmake \
|
||||
curl \
|
||||
doxygen \
|
||||
fuse \
|
||||
g++ \
|
||||
gcc \
|
||||
git \
|
||||
gnupg-agent \
|
||||
make \
|
||||
maven \
|
||||
libbz2-dev \
|
||||
libcurl4-openssl-dev \
|
||||
python python2.7 pylint \
|
||||
openjdk-7-jdk doxygen
|
||||
libfuse-dev \
|
||||
libjansson-dev \
|
||||
libprotobuf-dev \
|
||||
libprotoc-dev \
|
||||
libsnappy-dev \
|
||||
libssl-dev \
|
||||
libtool \
|
||||
openjdk-7-jdk \
|
||||
pinentry-curses \
|
||||
pkg-config \
|
||||
protobuf-compiler \
|
||||
protobuf-c-compiler \
|
||||
python \
|
||||
python2.7 \
|
||||
python-pip \
|
||||
snappy \
|
||||
zlib1g-dev
|
||||
|
||||
# Fixing the Apache commons / Maven dependency problem under Ubuntu:
|
||||
# See http://wiki.apache.org/commons/VfsProblems
|
||||
|
@ -48,32 +70,36 @@ RUN cd /usr/share/maven/lib && ln -s ../../java/commons-lang.jar .
|
|||
######
|
||||
# Install ISA-L library
|
||||
######
|
||||
RUN curl -L http://http.us.debian.org/debian/pool/main/libi/libisal/libisal2_2.15.0-2_amd64.deb \
|
||||
RUN curl -s -S -L \
|
||||
http://http.us.debian.org/debian/pool/main/libi/libisal/libisal2_2.15.0-2_amd64.deb \
|
||||
-o /opt/libisal2_2.15.0-2_amd64.deb && \
|
||||
dpkg -i /opt/libisal2_2.15.0-2_amd64.deb
|
||||
|
||||
|
||||
#######
|
||||
# Oracle Java
|
||||
#######
|
||||
|
||||
RUN apt-get install -y software-properties-common
|
||||
RUN echo "dot_style = mega" > "/root/.wgetrc"
|
||||
RUN echo "quiet = on" >> "/root/.wgetrc"
|
||||
|
||||
RUN apt-get -q install --no-install-recommends -y software-properties-common
|
||||
RUN add-apt-repository -y ppa:webupd8team/java
|
||||
RUN apt-get update
|
||||
RUN apt-get -q update
|
||||
|
||||
# Auto-accept the Oracle JDK license
|
||||
RUN echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
|
||||
RUN apt-get install -y oracle-java7-installer
|
||||
RUN apt-get -q install --no-install-recommends -y oracle-java7-installer
|
||||
|
||||
# Auto-accept the Oracle JDK license
|
||||
RUN echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
|
||||
RUN apt-get install -y oracle-java8-installer
|
||||
RUN apt-get -q install --no-install-recommends -y oracle-java8-installer
|
||||
|
||||
######
|
||||
# Install findbugs
|
||||
######
|
||||
RUN mkdir -p /opt/findbugs && \
|
||||
curl -L https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download \
|
||||
curl -L -s -S \
|
||||
https://sourceforge.net/projects/findbugs/files/findbugs/3.0.1/findbugs-noUpdateChecks-3.0.1.tar.gz/download \
|
||||
-o /opt/findbugs.tar.gz && \
|
||||
tar xzf /opt/findbugs.tar.gz --strip-components 1 -C /opt/findbugs
|
||||
ENV FINDBUGS_HOME /opt/findbugs
|
||||
|
@ -81,15 +107,25 @@ ENV FINDBUGS_HOME /opt/findbugs
|
|||
####
|
||||
# Install shellcheck
|
||||
####
|
||||
RUN apt-get install -y cabal-install
|
||||
RUN cabal update && cabal install shellcheck --global
|
||||
RUN apt-get -q install -y cabal-install
|
||||
RUN mkdir /root/.cabal
|
||||
RUN echo "remote-repo: hackage.fpcomplete.com:http://hackage.fpcomplete.com/" >> /root/.cabal/config
|
||||
#RUN echo "remote-repo: hackage.haskell.org:http://hackage.haskell.org/" > /root/.cabal/config
|
||||
RUN echo "remote-repo-cache: /root/.cabal/packages" >> /root/.cabal/config
|
||||
RUN cabal update
|
||||
RUN cabal install shellcheck --global
|
||||
|
||||
####
|
||||
# Install bats
|
||||
####
|
||||
RUN add-apt-repository -y ppa:duggan/bats
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y bats
|
||||
RUN apt-get -q update
|
||||
RUN apt-get -q install --no-install-recommends -y bats
|
||||
|
||||
####
|
||||
# Install pylint
|
||||
####
|
||||
RUN pip install pylint
|
||||
|
||||
###
|
||||
# Avoid out of memory errors in builds
|
||||
|
@ -106,7 +142,7 @@ ENV MAVEN_OPTS -Xms256m -Xmx512m
|
|||
# Install Forrest (for Apache Hadoop website)
|
||||
###
|
||||
RUN mkdir -p /usr/local/apache-forrest ; \
|
||||
curl -O http://archive.apache.org/dist/forrest/0.8/apache-forrest-0.8.tar.gz ; \
|
||||
curl -s -S -O http://archive.apache.org/dist/forrest/0.8/apache-forrest-0.8.tar.gz ; \
|
||||
tar xzf *forrest* --strip-components 1 -C /usr/local/apache-forrest ; \
|
||||
echo 'forrest.home=/usr/local/apache-forrest' > build.properties
|
||||
|
||||
|
|
|
@ -139,7 +139,7 @@
|
|||
<outputDirectory>/share/doc/hadoop/${hadoop.component}</outputDirectory>
|
||||
</fileSet>
|
||||
<fileSet>
|
||||
<directory>${basedir}/src/main/native/libhdfs</directory>
|
||||
<directory>${basedir}/src/main/native/libhdfs/include/hdfs</directory>
|
||||
<includes>
|
||||
<include>hdfs.h</include>
|
||||
</includes>
|
||||
|
|
|
@ -1236,6 +1236,17 @@ public abstract class AbstractFileSystem {
|
|||
+ " doesn't support setStoragePolicy");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Unset the storage policy set for a given file or directory.
|
||||
* @param src file or directory path.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void unsetStoragePolicy(final Path src) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support unsetStoragePolicy");
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the storage policy for a given file or directory.
|
||||
*
|
||||
|
|
|
@ -24,11 +24,13 @@ import java.io.IOException;
|
|||
import java.io.InputStream;
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
|
@ -155,11 +157,14 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
throw new IOException("Not a checksum file: "+sumFile);
|
||||
this.bytesPerSum = sums.readInt();
|
||||
set(fs.verifyChecksum, DataChecksum.newCrc32(), bytesPerSum, 4);
|
||||
} catch (FileNotFoundException e) { // quietly ignore
|
||||
set(fs.verifyChecksum, null, 1, 0);
|
||||
} catch (IOException e) { // loudly ignore
|
||||
LOG.warn("Problem opening checksum file: "+ file +
|
||||
". Ignoring exception: " , e);
|
||||
} catch (IOException e) {
|
||||
// mincing the message is terrible, but java throws permission
|
||||
// exceptions as FNF because that's all the method signatures allow!
|
||||
if (!(e instanceof FileNotFoundException) ||
|
||||
e.getMessage().endsWith(" (Permission denied)")) {
|
||||
LOG.warn("Problem opening checksum file: "+ file +
|
||||
". Ignoring exception: " , e);
|
||||
}
|
||||
set(fs.verifyChecksum, null, 1, 0);
|
||||
}
|
||||
}
|
||||
|
@ -478,6 +483,103 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
blockSize, progress);
|
||||
}
|
||||
|
||||
abstract class FsOperation {
|
||||
boolean run(Path p) throws IOException {
|
||||
boolean status = apply(p);
|
||||
if (status) {
|
||||
Path checkFile = getChecksumFile(p);
|
||||
if (fs.exists(checkFile)) {
|
||||
apply(checkFile);
|
||||
}
|
||||
}
|
||||
return status;
|
||||
}
|
||||
abstract boolean apply(Path p) throws IOException;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void setPermission(Path src, final FsPermission permission)
|
||||
throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.setPermission(p, permission);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setOwner(Path src, final String username, final String groupname)
|
||||
throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.setOwner(p, username, groupname);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAcl(Path src, final List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.setAcl(p, aclSpec);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void modifyAclEntries(Path src, final List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.modifyAclEntries(p, aclSpec);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAcl(Path src) throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.removeAcl(p);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeAclEntries(Path src, final List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.removeAclEntries(p, aclSpec);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeDefaultAcl(Path src) throws IOException {
|
||||
new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
fs.removeDefaultAcl(p);
|
||||
return true;
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set replication for an existing file.
|
||||
* Implement the abstract <tt>setReplication</tt> of <tt>FileSystem</tt>
|
||||
|
@ -488,16 +590,14 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
* false if file does not exist or is a directory
|
||||
*/
|
||||
@Override
|
||||
public boolean setReplication(Path src, short replication) throws IOException {
|
||||
boolean value = fs.setReplication(src, replication);
|
||||
if (!value)
|
||||
return false;
|
||||
|
||||
Path checkFile = getChecksumFile(src);
|
||||
if (exists(checkFile))
|
||||
fs.setReplication(checkFile, replication);
|
||||
|
||||
return true;
|
||||
public boolean setReplication(Path src, final short replication)
|
||||
throws IOException {
|
||||
return new FsOperation(){
|
||||
@Override
|
||||
boolean apply(Path p) throws IOException {
|
||||
return fs.setReplication(p, replication);
|
||||
}
|
||||
}.run(src);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -21,6 +21,9 @@ package org.apache.hadoop.fs;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.http.lib.StaticUserWebFilter;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactoryLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
|
||||
|
||||
/**
|
||||
* This class contains constants for configuration keys used
|
||||
|
@ -90,7 +93,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
/**
|
||||
* CallQueue related settings. These are not used directly, but rather
|
||||
* combined with a namespace and port. For instance:
|
||||
* IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
|
||||
* IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
|
||||
*/
|
||||
public static final String IPC_NAMESPACE = "ipc";
|
||||
public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
|
||||
|
@ -156,13 +159,23 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
/** Supported erasure codec classes */
|
||||
public static final String IO_ERASURECODE_CODECS_KEY = "io.erasurecode.codecs";
|
||||
|
||||
/** Raw coder factory for the RS codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.rs.rawcoder";
|
||||
/** Raw coder factory for the RS default codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.rs-default.rawcoder";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT =
|
||||
RSRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
/** Raw coder factory for the RS legacy codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.rs-legacy.rawcoder";
|
||||
public static final String IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_DEFAULT =
|
||||
RSRawErasureCoderFactoryLegacy.class.getCanonicalName();
|
||||
|
||||
/** Raw coder factory for the XOR codec. */
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY =
|
||||
"io.erasurecode.codec.xor.rawcoder";
|
||||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODER_DEFAULT =
|
||||
XORRawErasureCoderFactory.class.getCanonicalName();
|
||||
|
||||
/**
|
||||
* Service Authorization
|
||||
|
|
|
@ -103,7 +103,14 @@ public enum CreateFlag {
|
|||
* Append data to a new block instead of the end of the last partial block.
|
||||
* This is only useful for APPEND.
|
||||
*/
|
||||
NEW_BLOCK((short) 0x20);
|
||||
NEW_BLOCK((short) 0x20),
|
||||
|
||||
/**
|
||||
* Advise that a block replica NOT be written to the local DataNode where
|
||||
* 'local' means the same host as the client is being run on.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HBase"})
|
||||
NO_LOCAL_WRITE((short) 0x40);
|
||||
|
||||
private final short mode;
|
||||
|
||||
|
|
|
@ -2695,6 +2695,23 @@ public class FileContext {
|
|||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unset the storage policy set for a given file or directory.
|
||||
* @param src file or directory path.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void unsetStoragePolicy(final Path src) throws IOException {
|
||||
final Path absF = fixRelativePart(src);
|
||||
new FSLinkResolver<Void>() {
|
||||
@Override
|
||||
public Void next(final AbstractFileSystem fs, final Path p)
|
||||
throws IOException {
|
||||
fs.unsetStoragePolicy(src);
|
||||
return null;
|
||||
}
|
||||
}.resolve(this, absF);
|
||||
}
|
||||
|
||||
/**
|
||||
* Query the effective storage policy ID for the given file or directory.
|
||||
*
|
||||
|
|
|
@ -685,8 +685,8 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
return new BlockLocation[0];
|
||||
|
||||
}
|
||||
String[] name = { "localhost:50010" };
|
||||
String[] host = { "localhost" };
|
||||
String[] name = {"localhost:9866"};
|
||||
String[] host = {"localhost"};
|
||||
return new BlockLocation[] {
|
||||
new BlockLocation(name, host, 0, file.getLen()) };
|
||||
}
|
||||
|
@ -1244,7 +1244,6 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
/**
|
||||
* Renames Path src to Path dst
|
||||
* <ul>
|
||||
* <li
|
||||
* <li>Fails if src is a file and dst is a directory.
|
||||
* <li>Fails if src is a directory and dst is a file.
|
||||
* <li>Fails if the parent of dst does not exist or is a file.
|
||||
|
@ -2663,7 +2662,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
* @param src file or directory path.
|
||||
* @throws IOException
|
||||
*/
|
||||
public void unsetStoragePolicy(Path src) throws IOException {
|
||||
public void unsetStoragePolicy(final Path src) throws IOException {
|
||||
throw new UnsupportedOperationException(getClass().getSimpleName()
|
||||
+ " doesn't support unsetStoragePolicy");
|
||||
}
|
||||
|
|
|
@ -405,6 +405,12 @@ public abstract class FilterFs extends AbstractFileSystem {
|
|||
myFs.setStoragePolicy(path, policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
myFs.unsetStoragePolicy(src);
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
|
|
|
@ -385,6 +385,12 @@ class ChRootedFs extends AbstractFileSystem {
|
|||
myFs.setStoragePolicy(fullPath(path), policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
myFs.unsetStoragePolicy(fullPath(src));
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicySpi getStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
|
|
|
@ -749,6 +749,14 @@ public class ViewFs extends AbstractFileSystem {
|
|||
res.targetFileSystem.setStoragePolicy(res.remainingPath, policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetStoragePolicy(final Path src)
|
||||
throws IOException {
|
||||
InodeTree.ResolveResult<AbstractFileSystem> res =
|
||||
fsState.resolve(getUriPath(src), true);
|
||||
res.targetFileSystem.unsetStoragePolicy(res.remainingPath);
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieve the storage policy for a given file or directory.
|
||||
*
|
||||
|
|
|
@ -17,17 +17,14 @@
|
|||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawEncoder;
|
||||
|
||||
/**
|
||||
* A codec & coder utility to help create raw coders conveniently.
|
||||
|
@ -42,39 +39,55 @@ public final class CodecUtil {
|
|||
* @param conf configuration possibly with some items to configure the coder
|
||||
* @param numDataUnits number of data units in a coding group
|
||||
* @param numParityUnits number of parity units in a coding group
|
||||
* @param codec the codec to use. If null, will use the default codec
|
||||
* @return raw encoder
|
||||
*/
|
||||
public static RawErasureEncoder createRSRawEncoder(
|
||||
Configuration conf, int numDataUnits, int numParityUnits) {
|
||||
RawErasureCoder rawCoder = createRawCoder(conf,
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
true, numDataUnits, numParityUnits);
|
||||
if (rawCoder == null) {
|
||||
rawCoder = new RSRawEncoder(numDataUnits, numParityUnits);
|
||||
Configuration conf, int numDataUnits, int numParityUnits, String codec) {
|
||||
Preconditions.checkNotNull(conf);
|
||||
if (codec == null) {
|
||||
codec = ErasureCodeConstants.RS_DEFAULT_CODEC_NAME;
|
||||
}
|
||||
|
||||
RawErasureCoder rawCoder = createRawCoder(conf,
|
||||
getFactNameFromCodec(conf, codec), true, numDataUnits, numParityUnits);
|
||||
return (RawErasureEncoder) rawCoder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create RS raw encoder using the default codec.
|
||||
*/
|
||||
public static RawErasureEncoder createRSRawEncoder(
|
||||
Configuration conf, int numDataUnits, int numParityUnits) {
|
||||
return createRSRawEncoder(conf, numDataUnits, numParityUnits, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create RS raw decoder according to configuration.
|
||||
* @param conf configuration possibly with some items to configure the coder
|
||||
* @param numDataUnits number of data units in a coding group
|
||||
* @param numParityUnits number of parity units in a coding group
|
||||
* @param codec the codec to use. If null, will use the default codec
|
||||
* @return raw decoder
|
||||
*/
|
||||
public static RawErasureDecoder createRSRawDecoder(
|
||||
Configuration conf, int numDataUnits, int numParityUnits) {
|
||||
RawErasureCoder rawCoder = createRawCoder(conf,
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
false, numDataUnits, numParityUnits);
|
||||
if (rawCoder == null) {
|
||||
rawCoder = new RSRawDecoder(numDataUnits, numParityUnits);
|
||||
Configuration conf, int numDataUnits, int numParityUnits, String codec) {
|
||||
Preconditions.checkNotNull(conf);
|
||||
if (codec == null) {
|
||||
codec = ErasureCodeConstants.RS_DEFAULT_CODEC_NAME;
|
||||
}
|
||||
|
||||
RawErasureCoder rawCoder = createRawCoder(conf,
|
||||
getFactNameFromCodec(conf, codec), false, numDataUnits, numParityUnits);
|
||||
return (RawErasureDecoder) rawCoder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create RS raw decoder using the default codec.
|
||||
*/
|
||||
public static RawErasureDecoder createRSRawDecoder(
|
||||
Configuration conf, int numDataUnits, int numParityUnits) {
|
||||
return createRSRawDecoder(conf, numDataUnits, numParityUnits, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create XOR raw encoder according to configuration.
|
||||
* @param conf configuration possibly with some items to configure the coder
|
||||
|
@ -84,13 +97,10 @@ public final class CodecUtil {
|
|||
*/
|
||||
public static RawErasureEncoder createXORRawEncoder(
|
||||
Configuration conf, int numDataUnits, int numParityUnits) {
|
||||
Preconditions.checkNotNull(conf);
|
||||
RawErasureCoder rawCoder = createRawCoder(conf,
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY,
|
||||
getFactNameFromCodec(conf, ErasureCodeConstants.XOR_CODEC_NAME),
|
||||
true, numDataUnits, numParityUnits);
|
||||
if (rawCoder == null) {
|
||||
rawCoder = new XORRawEncoder(numDataUnits, numParityUnits);
|
||||
}
|
||||
|
||||
return (RawErasureEncoder) rawCoder;
|
||||
}
|
||||
|
||||
|
@ -103,51 +113,65 @@ public final class CodecUtil {
|
|||
*/
|
||||
public static RawErasureDecoder createXORRawDecoder(
|
||||
Configuration conf, int numDataUnits, int numParityUnits) {
|
||||
Preconditions.checkNotNull(conf);
|
||||
RawErasureCoder rawCoder = createRawCoder(conf,
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY,
|
||||
getFactNameFromCodec(conf, ErasureCodeConstants.XOR_CODEC_NAME),
|
||||
false, numDataUnits, numParityUnits);
|
||||
if (rawCoder == null) {
|
||||
rawCoder = new XORRawDecoder(numDataUnits, numParityUnits);
|
||||
}
|
||||
|
||||
return (RawErasureDecoder) rawCoder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create raw coder using specified conf and raw coder factory key.
|
||||
* @param conf configuration possibly with some items to configure the coder
|
||||
* @param rawCoderFactoryKey configuration key to find the raw coder factory
|
||||
* @param rawCoderFactory name of the raw coder factory
|
||||
* @param isEncoder is encoder or not we're going to create
|
||||
* @param numDataUnits number of data units in a coding group
|
||||
* @param numParityUnits number of parity units in a coding group
|
||||
* @return raw coder
|
||||
*/
|
||||
public static RawErasureCoder createRawCoder(Configuration conf,
|
||||
String rawCoderFactoryKey, boolean isEncoder, int numDataUnits,
|
||||
String rawCoderFactory, boolean isEncoder, int numDataUnits,
|
||||
int numParityUnits) {
|
||||
|
||||
if (conf == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
Class<? extends RawErasureCoderFactory> factClass = null;
|
||||
factClass = conf.getClass(rawCoderFactoryKey,
|
||||
factClass, RawErasureCoderFactory.class);
|
||||
|
||||
if (factClass == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
RawErasureCoderFactory fact;
|
||||
try {
|
||||
Class<? extends RawErasureCoderFactory> factClass = conf.getClassByName(
|
||||
rawCoderFactory).asSubclass(RawErasureCoderFactory.class);
|
||||
fact = factClass.newInstance();
|
||||
} catch (InstantiationException e) {
|
||||
throw new RuntimeException("Failed to create raw coder", e);
|
||||
} catch (IllegalAccessException e) {
|
||||
} catch (ClassNotFoundException | InstantiationException |
|
||||
IllegalAccessException e) {
|
||||
throw new RuntimeException("Failed to create raw coder", e);
|
||||
}
|
||||
|
||||
return isEncoder ? fact.createEncoder(numDataUnits, numParityUnits) :
|
||||
fact.createDecoder(numDataUnits, numParityUnits);
|
||||
}
|
||||
|
||||
private static String getFactNameFromCodec(Configuration conf, String codec) {
|
||||
switch (codec) {
|
||||
case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
|
||||
return conf.get(
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
|
||||
CommonConfigurationKeys.
|
||||
IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT);
|
||||
case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
|
||||
return conf.get(
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
|
||||
CommonConfigurationKeys.
|
||||
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_DEFAULT);
|
||||
case ErasureCodeConstants.XOR_CODEC_NAME:
|
||||
return conf.get(
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_XOR_RAWCODER_KEY,
|
||||
CommonConfigurationKeys.IO_ERASURECODE_CODEC_XOR_RAWCODER_DEFAULT);
|
||||
default:
|
||||
// For custom codec, we throw exception if the factory is not configured
|
||||
String rawCoderKey = "io.erasurecode.codec." + codec + ".rawcoder";
|
||||
String factName = conf.get(rawCoderKey);
|
||||
if (factName == null) {
|
||||
throw new IllegalArgumentException("Raw coder factory not configured " +
|
||||
"for custom codec " + codec);
|
||||
}
|
||||
return factName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
/**
|
||||
* Constants related to the erasure code feature.
|
||||
*/
|
||||
public final class ErasureCodeConstants {
|
||||
|
||||
private ErasureCodeConstants(){
|
||||
}
|
||||
|
||||
public static final String RS_DEFAULT_CODEC_NAME = "rs-default";
|
||||
public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
|
||||
public static final String XOR_CODEC_NAME = "xor";
|
||||
|
||||
public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
|
||||
RS_DEFAULT_CODEC_NAME, 6, 3);
|
||||
|
||||
public static final ECSchema RS_3_2_SCHEMA = new ECSchema(
|
||||
RS_DEFAULT_CODEC_NAME, 3, 2);
|
||||
|
||||
public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
|
||||
RS_LEGACY_CODEC_NAME, 6, 3);
|
||||
}
|
|
@ -65,7 +65,7 @@ public class HHXORErasureEncoder extends AbstractErasureEncoder {
|
|||
private RawErasureEncoder checkCreateRSRawEncoder() {
|
||||
if (rsRawEncoder == null) {
|
||||
rsRawEncoder = CodecUtil.createRSRawEncoder(getConf(),
|
||||
getNumDataUnits(), getNumParityUnits());
|
||||
getNumDataUnits(), getNumParityUnits());
|
||||
}
|
||||
return rsRawEncoder;
|
||||
}
|
||||
|
|
|
@ -54,6 +54,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
|
|||
|
||||
private RawErasureDecoder checkCreateRSRawDecoder() {
|
||||
if (rsRawDecoder == null) {
|
||||
// TODO: we should create the raw coder according to codec.
|
||||
rsRawDecoder = CodecUtil.createRSRawDecoder(getConf(),
|
||||
getNumDataUnits(), getNumParityUnits());
|
||||
}
|
||||
|
|
|
@ -54,8 +54,9 @@ public class RSErasureEncoder extends AbstractErasureEncoder {
|
|||
|
||||
private RawErasureEncoder checkCreateRSRawEncoder() {
|
||||
if (rawEncoder == null) {
|
||||
// TODO: we should create the raw coder according to codec.
|
||||
rawEncoder = CodecUtil.createRSRawEncoder(getConf(),
|
||||
getNumDataUnits(), getNumParityUnits());
|
||||
getNumDataUnits(), getNumParityUnits());
|
||||
}
|
||||
return rawEncoder;
|
||||
}
|
||||
|
|
|
@ -82,9 +82,9 @@ final class Compression {
|
|||
public synchronized boolean isSupported() {
|
||||
if (!checked) {
|
||||
checked = true;
|
||||
String extClazz =
|
||||
(conf.get(CONF_LZO_CLASS) == null ? System
|
||||
.getProperty(CONF_LZO_CLASS) : null);
|
||||
String extClazzConf = conf.get(CONF_LZO_CLASS);
|
||||
String extClazz = (extClazzConf != null) ?
|
||||
extClazzConf : System.getProperty(CONF_LZO_CLASS);
|
||||
String clazz = (extClazz != null) ? extClazz : defaultClazz;
|
||||
try {
|
||||
LOG.info("Trying to load Lzo codec class: " + clazz);
|
||||
|
|
|
@ -51,7 +51,7 @@ public class CallQueueManager<E> {
|
|||
return (Class<? extends RpcScheduler>)schedulerClass;
|
||||
}
|
||||
|
||||
private final boolean clientBackOffEnabled;
|
||||
private volatile boolean clientBackOffEnabled;
|
||||
|
||||
// Atomic refs point to active callQueue
|
||||
// We have two so we can better control swapping
|
||||
|
@ -185,6 +185,10 @@ public class CallQueueManager<E> {
|
|||
return scheduler.getPriorityLevel(e);
|
||||
}
|
||||
|
||||
void setClientBackoffEnabled(boolean value) {
|
||||
clientBackOffEnabled = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Insert e into the backing queue or block until we can.
|
||||
* If we block and the queue changes on us, we will insert while the
|
||||
|
|
|
@ -119,7 +119,8 @@ public class Client implements AutoCloseable {
|
|||
|
||||
private static final ThreadLocal<Integer> callId = new ThreadLocal<Integer>();
|
||||
private static final ThreadLocal<Integer> retryCount = new ThreadLocal<Integer>();
|
||||
private static final ThreadLocal<Future<?>> returnValue = new ThreadLocal<>();
|
||||
private static final ThreadLocal<Future<?>>
|
||||
RETURN_RPC_RESPONSE = new ThreadLocal<>();
|
||||
private static final ThreadLocal<Boolean> asynchronousMode =
|
||||
new ThreadLocal<Boolean>() {
|
||||
@Override
|
||||
|
@ -130,8 +131,8 @@ public class Client implements AutoCloseable {
|
|||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Unstable
|
||||
public static <T> Future<T> getReturnValue() {
|
||||
return (Future<T>) returnValue.get();
|
||||
public static <T> Future<T> getReturnRpcResponse() {
|
||||
return (Future<T>) RETURN_RPC_RESPONSE.get();
|
||||
}
|
||||
|
||||
/** Set call id and retry count for the next call. */
|
||||
|
@ -1396,7 +1397,7 @@ public class Client implements AutoCloseable {
|
|||
}
|
||||
};
|
||||
|
||||
returnValue.set(returnFuture);
|
||||
RETURN_RPC_RESPONSE.set(returnFuture);
|
||||
return null;
|
||||
} else {
|
||||
return getRpcResponse(call, connection);
|
||||
|
@ -1410,7 +1411,7 @@ public class Client implements AutoCloseable {
|
|||
* synchronous mode.
|
||||
*/
|
||||
@Unstable
|
||||
static boolean isAsynchronousMode() {
|
||||
public static boolean isAsynchronousMode() {
|
||||
return asynchronousMode.get();
|
||||
}
|
||||
|
||||
|
|
|
@ -32,11 +32,19 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
import java.util.concurrent.atomic.AtomicLongArray;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.util.concurrent.AtomicDoubleArray;
|
||||
import org.apache.commons.lang.exception.ExceptionUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
|
||||
import org.apache.hadoop.metrics2.MetricsSource;
|
||||
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
||||
import org.apache.hadoop.metrics2.lib.Interns;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.metrics2.util.Metrics2Util.NameValuePair;
|
||||
import org.apache.hadoop.metrics2.util.Metrics2Util.TopN;
|
||||
|
||||
import org.codehaus.jackson.map.ObjectMapper;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -49,7 +57,8 @@ import org.slf4j.LoggerFactory;
|
|||
* for large periods (on the order of seconds), as it offloads work to the
|
||||
* decay sweep.
|
||||
*/
|
||||
public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean {
|
||||
public class DecayRpcScheduler implements RpcScheduler,
|
||||
DecayRpcSchedulerMXBean, MetricsSource {
|
||||
/**
|
||||
* Period controls how many milliseconds between each decay sweep.
|
||||
*/
|
||||
|
@ -107,6 +116,12 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_THRESHOLDS_KEY =
|
||||
"decay-scheduler.backoff.responsetime.thresholds";
|
||||
|
||||
// Specifies the top N user's call count and scheduler decision
|
||||
// Metrics2 Source
|
||||
public static final String DECAYSCHEDULER_METRICS_TOP_USER_COUNT =
|
||||
"decay-scheduler.metrics.top.user.count";
|
||||
public static final int DECAYSCHEDULER_METRICS_TOP_USER_COUNT_DEFAULT = 10;
|
||||
|
||||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(DecayRpcScheduler.class);
|
||||
|
||||
|
@ -138,6 +153,8 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
private final IdentityProvider identityProvider;
|
||||
private final boolean backOffByResponseTimeEnabled;
|
||||
private final long[] backOffResponseTimeThresholds;
|
||||
private final String namespace;
|
||||
private final int topUsersCount; // e.g., report top 10 users' metrics
|
||||
|
||||
/**
|
||||
* This TimerTask will call decayCurrentCounts until
|
||||
|
@ -179,6 +196,7 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
"at least 1");
|
||||
}
|
||||
this.numLevels = numLevels;
|
||||
this.namespace = ns;
|
||||
this.decayFactor = parseDecayFactor(ns, conf);
|
||||
this.decayPeriodMillis = parseDecayPeriodMillis(ns, conf);
|
||||
this.identityProvider = this.parseIdentityProvider(ns, conf);
|
||||
|
@ -199,8 +217,15 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
responseTimeAvgInLastWindow = new AtomicDoubleArray(numLevels);
|
||||
responseTimeCountInLastWindow = new AtomicLongArray(numLevels);
|
||||
|
||||
topUsersCount =
|
||||
conf.getInt(DECAYSCHEDULER_METRICS_TOP_USER_COUNT,
|
||||
DECAYSCHEDULER_METRICS_TOP_USER_COUNT_DEFAULT);
|
||||
Preconditions.checkArgument(topUsersCount > 0,
|
||||
"the number of top users for scheduler metrics must be at least 1");
|
||||
|
||||
MetricsProxy prox = MetricsProxy.getInstance(ns, numLevels);
|
||||
prox.setDelegate(this);
|
||||
prox.registerMetrics2Source(ns);
|
||||
}
|
||||
|
||||
// Load configs
|
||||
|
@ -615,7 +640,8 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
* MetricsProxy is a singleton because we may init multiple schedulers and we
|
||||
* want to clean up resources when a new scheduler replaces the old one.
|
||||
*/
|
||||
private static final class MetricsProxy implements DecayRpcSchedulerMXBean {
|
||||
public static final class MetricsProxy implements DecayRpcSchedulerMXBean,
|
||||
MetricsSource {
|
||||
// One singleton per namespace
|
||||
private static final HashMap<String, MetricsProxy> INSTANCES =
|
||||
new HashMap<String, MetricsProxy>();
|
||||
|
@ -646,6 +672,11 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
this.delegate = new WeakReference<DecayRpcScheduler>(obj);
|
||||
}
|
||||
|
||||
void registerMetrics2Source(String namespace) {
|
||||
final String name = "DecayRpcSchedulerMetrics2." + namespace;
|
||||
DefaultMetricsSystem.instance().register(name, name, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSchedulingDecisionSummary() {
|
||||
DecayRpcScheduler scheduler = delegate.get();
|
||||
|
@ -704,6 +735,14 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
return scheduler.getResponseTimeCountInLastWindow();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getMetrics(MetricsCollector collector, boolean all) {
|
||||
DecayRpcScheduler scheduler = delegate.get();
|
||||
if (scheduler != null) {
|
||||
scheduler.getMetrics(collector, all);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public int getUniqueIdentityCount() {
|
||||
|
@ -731,6 +770,89 @@ public class DecayRpcScheduler implements RpcScheduler, DecayRpcSchedulerMXBean
|
|||
return ret;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void getMetrics(MetricsCollector collector, boolean all) {
|
||||
// Metrics2 interface to act as a Metric source
|
||||
try {
|
||||
MetricsRecordBuilder rb = collector.addRecord(getClass().getName())
|
||||
.setContext(namespace);
|
||||
addTotalCallVolume(rb);
|
||||
addUniqueIdentityCount(rb);
|
||||
addTopNCallerSummary(rb);
|
||||
addAvgResponseTimePerPriority(rb);
|
||||
addCallVolumePerPriority(rb);
|
||||
} catch (Exception e) {
|
||||
LOG.warn("Exception thrown while metric collection. Exception : "
|
||||
+ e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Key: UniqueCallers
|
||||
private void addUniqueIdentityCount(MetricsRecordBuilder rb) {
|
||||
rb.addCounter(Interns.info("UniqueCallers", "Total unique callers"),
|
||||
getUniqueIdentityCount());
|
||||
}
|
||||
|
||||
// Key: CallVolume
|
||||
private void addTotalCallVolume(MetricsRecordBuilder rb) {
|
||||
rb.addCounter(Interns.info("CallVolume", "Total Call Volume"),
|
||||
getTotalCallVolume());
|
||||
}
|
||||
|
||||
// Key: Priority.0.CallVolume
|
||||
private void addCallVolumePerPriority(MetricsRecordBuilder rb) {
|
||||
for (int i = 0; i < responseTimeCountInLastWindow.length(); i++) {
|
||||
rb.addGauge(Interns.info("Priority." + i + ".CallVolume", "Call volume " +
|
||||
"of priority "+ i), responseTimeCountInLastWindow.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
// Key: Priority.0.AvgResponseTime
|
||||
private void addAvgResponseTimePerPriority(MetricsRecordBuilder rb) {
|
||||
for (int i = 0; i < responseTimeAvgInLastWindow.length(); i++) {
|
||||
rb.addGauge(Interns.info("Priority." + i + ".AvgResponseTime", "Average" +
|
||||
" response time of priority " + i),
|
||||
responseTimeAvgInLastWindow.get(i));
|
||||
}
|
||||
}
|
||||
|
||||
// Key: Top.0.Caller(xyz).Volume and Top.0.Caller(xyz).Priority
|
||||
private void addTopNCallerSummary(MetricsRecordBuilder rb) {
|
||||
final int topCallerCount = 10;
|
||||
TopN topNCallers = getTopCallers(topCallerCount);
|
||||
Map<Object, Integer> decisions = scheduleCacheRef.get();
|
||||
for (int i=0; i < topNCallers.size(); i++) {
|
||||
NameValuePair entry = topNCallers.poll();
|
||||
String topCaller = "Top." + (topCallerCount - i) + "." +
|
||||
"Caller(" + entry.getName() + ")";
|
||||
String topCallerVolume = topCaller + ".Volume";
|
||||
String topCallerPriority = topCaller + ".Priority";
|
||||
rb.addCounter(Interns.info(topCallerVolume, topCallerVolume),
|
||||
entry.getValue());
|
||||
Integer priority = decisions.get(entry.getName());
|
||||
if (priority != null) {
|
||||
rb.addCounter(Interns.info(topCallerPriority, topCallerPriority),
|
||||
priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Get the top N callers' call count and scheduler decision
|
||||
private TopN getTopCallers(int n) {
|
||||
TopN topNCallers = new TopN(n);
|
||||
Iterator<Map.Entry<Object, AtomicLong>> it =
|
||||
callCounts.entrySet().iterator();
|
||||
while (it.hasNext()) {
|
||||
Map.Entry<Object, AtomicLong> entry = it.next();
|
||||
String caller = entry.getKey().toString();
|
||||
Long count = entry.getValue().get();
|
||||
if (count > 0) {
|
||||
topNCallers.offer(new NameValuePair(caller, count));
|
||||
}
|
||||
}
|
||||
return topNCallers;
|
||||
}
|
||||
|
||||
public String getSchedulingDecisionSummary() {
|
||||
Map<Object, Integer> decisions = scheduleCacheRef.get();
|
||||
if (decisions == null) {
|
||||
|
|
|
@ -26,7 +26,9 @@ import java.lang.reflect.Method;
|
|||
import java.lang.reflect.Proxy;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
@ -35,6 +37,7 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.DataOutputOutputStream;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
|
@ -67,7 +70,9 @@ import com.google.protobuf.TextFormat;
|
|||
@InterfaceStability.Evolving
|
||||
public class ProtobufRpcEngine implements RpcEngine {
|
||||
public static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
|
||||
|
||||
private static final ThreadLocal<Callable<?>>
|
||||
RETURN_MESSAGE_CALLBACK = new ThreadLocal<>();
|
||||
|
||||
static { // Register the rpcRequest deserializer for WritableRpcEngine
|
||||
org.apache.hadoop.ipc.Server.registerProtocolEngine(
|
||||
RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWrapper.class,
|
||||
|
@ -76,6 +81,12 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
|
||||
private static final ClientCache CLIENTS = new ClientCache();
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Unstable
|
||||
public static <T> Callable<T> getReturnMessageCallback() {
|
||||
return (Callable<T>) RETURN_MESSAGE_CALLBACK.get();
|
||||
}
|
||||
|
||||
public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
|
||||
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
|
||||
SocketFactory factory, int rpcTimeout) throws IOException {
|
||||
|
@ -189,7 +200,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
* the server.
|
||||
*/
|
||||
@Override
|
||||
public Object invoke(Object proxy, Method method, Object[] args)
|
||||
public Object invoke(Object proxy, final Method method, Object[] args)
|
||||
throws ServiceException {
|
||||
long startTime = 0;
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
@ -251,6 +262,23 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
LOG.debug("Call: " + method.getName() + " took " + callTime + "ms");
|
||||
}
|
||||
|
||||
if (Client.isAsynchronousMode()) {
|
||||
final Future<RpcResponseWrapper> frrw = Client.getReturnRpcResponse();
|
||||
Callable<Message> callback = new Callable<Message>() {
|
||||
@Override
|
||||
public Message call() throws Exception {
|
||||
return getReturnMessage(method, frrw.get());
|
||||
}
|
||||
};
|
||||
RETURN_MESSAGE_CALLBACK.set(callback);
|
||||
return null;
|
||||
} else {
|
||||
return getReturnMessage(method, val);
|
||||
}
|
||||
}
|
||||
|
||||
private Message getReturnMessage(final Method method,
|
||||
final RpcResponseWrapper rrw) throws ServiceException {
|
||||
Message prototype = null;
|
||||
try {
|
||||
prototype = getReturnProtoType(method);
|
||||
|
@ -260,7 +288,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
Message returnMessage;
|
||||
try {
|
||||
returnMessage = prototype.newBuilderForType()
|
||||
.mergeFrom(val.theResponseRead).build();
|
||||
.mergeFrom(rrw.theResponseRead).build();
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(Thread.currentThread().getId() + ": Response <- " +
|
||||
|
|
|
@ -2921,7 +2921,15 @@ public abstract class Server {
|
|||
public int getCallQueueLen() {
|
||||
return callQueue.size();
|
||||
}
|
||||
|
||||
|
||||
public boolean isClientBackoffEnabled() {
|
||||
return callQueue.isClientBackoffEnabled();
|
||||
}
|
||||
|
||||
public void setClientBackoffEnabled(boolean value) {
|
||||
callQueue.setClientBackoffEnabled(value);
|
||||
}
|
||||
|
||||
/**
|
||||
* The maximum size of the rpc call queue of this server.
|
||||
* @return The maximum size of the rpc call queue.
|
||||
|
|
|
@ -101,5 +101,4 @@ public class MetricsUtil {
|
|||
}
|
||||
return hostName;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.util.LinkedHashMap;
|
|||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Helpers to create interned metrics info
|
||||
* Helpers to create interned metrics info.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
|
@ -109,9 +109,9 @@ public class Interns {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a metric info object
|
||||
* @param name
|
||||
* @param description
|
||||
* Get a metric info object.
|
||||
* @param name Name of metric info object
|
||||
* @param description Description of metric info object
|
||||
* @return an interned metric info object
|
||||
*/
|
||||
public static MetricsInfo info(String name, String description) {
|
||||
|
@ -143,7 +143,7 @@ public class Interns {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a metrics tag
|
||||
* Get a metrics tag.
|
||||
* @param info of the tag
|
||||
* @param value of the tag
|
||||
* @return an interned metrics tag
|
||||
|
@ -153,7 +153,7 @@ public class Interns {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a metrics tag
|
||||
* Get a metrics tag.
|
||||
* @param name of the tag
|
||||
* @param description of the tag
|
||||
* @param value of the tag
|
||||
|
|
|
@ -86,6 +86,10 @@ public class JvmMetrics implements MetricsSource {
|
|||
new JvmMetrics(processName, sessionId));
|
||||
}
|
||||
|
||||
public static void reattach(MetricsSystem ms, JvmMetrics jvmMetrics) {
|
||||
ms.register(JvmMetrics.name(), JvmMetrics.description(), jvmMetrics);
|
||||
}
|
||||
|
||||
public static JvmMetrics initSingleton(String processName, String sessionId) {
|
||||
return Singleton.INSTANCE.init(processName, sessionId);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,105 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.metrics2.util;
|
||||
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Utility class to simplify creation of hadoop metrics2 source/sink.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class Metrics2Util {
|
||||
/**
|
||||
* A pair of a name and its corresponding value. Defines a custom
|
||||
* comparator so the TopN PriorityQueue sorts based on the count.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class NameValuePair implements Comparable<NameValuePair> {
|
||||
private String name;
|
||||
private long value;
|
||||
|
||||
public NameValuePair(String metricName, long value) {
|
||||
this.name = metricName;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public long getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(NameValuePair other) {
|
||||
return (int) (value - other.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (other instanceof NameValuePair) {
|
||||
return compareTo((NameValuePair)other) == 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Long.valueOf(value).hashCode();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A fixed-size priority queue, used to retrieve top-n of offered entries.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public static class TopN extends PriorityQueue<NameValuePair> {
|
||||
private static final long serialVersionUID = 5134028249611535803L;
|
||||
private int n; // > 0
|
||||
private long total = 0;
|
||||
|
||||
public TopN(int n) {
|
||||
super(n);
|
||||
this.n = n;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean offer(NameValuePair entry) {
|
||||
updateTotal(entry.value);
|
||||
if (size() == n) {
|
||||
NameValuePair smallest = peek();
|
||||
if (smallest.value >= entry.value) {
|
||||
return false;
|
||||
}
|
||||
poll(); // remove smallest
|
||||
}
|
||||
return super.offer(entry);
|
||||
}
|
||||
|
||||
private void updateTotal(long value) {
|
||||
total += value;
|
||||
}
|
||||
|
||||
public long getTotal() {
|
||||
return total;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -126,6 +126,10 @@ public class UserGroupInformation {
|
|||
return DefaultMetricsSystem.instance().register(new UgiMetrics());
|
||||
}
|
||||
|
||||
static void reattach() {
|
||||
metrics = UgiMetrics.create();
|
||||
}
|
||||
|
||||
void addGetGroups(long latency) {
|
||||
getGroups.add(latency);
|
||||
if (getGroupsQuantiles != null) {
|
||||
|
@ -238,6 +242,13 @@ public class UserGroupInformation {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reattach the class's metrics to a new metric system.
|
||||
*/
|
||||
public static void reattachMetrics() {
|
||||
UgiMetrics.reattach();
|
||||
}
|
||||
|
||||
/** Metrics to track UGI activity */
|
||||
static UgiMetrics metrics = UgiMetrics.create();
|
||||
/** The auth method to use */
|
||||
|
|
|
@ -82,11 +82,11 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
*
|
||||
* <p>Examples:</p>
|
||||
* <p><blockquote><pre>
|
||||
* $ bin/hadoop dfs -fs darwin:8020 -ls /data
|
||||
* list /data directory in dfs with namenode darwin:8020
|
||||
* $ bin/hadoop dfs -fs darwin:9820 -ls /data
|
||||
* list /data directory in dfs with namenode darwin:9820
|
||||
*
|
||||
* $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
|
||||
* list /data directory in dfs with namenode darwin:8020
|
||||
* $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
|
||||
* list /data directory in dfs with namenode darwin:9820
|
||||
*
|
||||
* $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
|
||||
* list /data directory in dfs with multiple conf files specified.
|
||||
|
|
|
@ -43,12 +43,16 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/** Run a Hadoop job jar. */
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class RunJar {
|
||||
|
||||
private static final Logger LOG = LoggerFactory.getLogger(RunJar.class);
|
||||
|
||||
/** Pattern that matches any string */
|
||||
public static final Pattern MATCH_ANY = Pattern.compile(".*");
|
||||
|
||||
|
@ -93,6 +97,7 @@ public class RunJar {
|
|||
throws IOException {
|
||||
JarFile jar = new JarFile(jarFile);
|
||||
try {
|
||||
int numOfFailedLastModifiedSet = 0;
|
||||
Enumeration<JarEntry> entries = jar.entries();
|
||||
while (entries.hasMoreElements()) {
|
||||
final JarEntry entry = entries.nextElement();
|
||||
|
@ -108,11 +113,18 @@ public class RunJar {
|
|||
} finally {
|
||||
out.close();
|
||||
}
|
||||
if (!file.setLastModified(entry.getTime())) {
|
||||
numOfFailedLastModifiedSet++;
|
||||
}
|
||||
} finally {
|
||||
in.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (numOfFailedLastModifiedSet > 0) {
|
||||
LOG.warn("Could not set last modfied time for {} file(s)",
|
||||
numOfFailedLastModifiedSet);
|
||||
}
|
||||
} finally {
|
||||
jar.close();
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -362,6 +363,9 @@ public abstract class Shell {
|
|||
/** If or not script timed out*/
|
||||
private final AtomicBoolean timedOut = new AtomicBoolean(false);
|
||||
|
||||
/** Indicates if the parent env vars should be inherited or not*/
|
||||
protected boolean inheritParentEnv = true;
|
||||
|
||||
/**
|
||||
* Centralized logic to discover and validate the sanity of the Hadoop
|
||||
* home directory.
|
||||
|
@ -854,9 +858,16 @@ public abstract class Shell {
|
|||
timedOut.set(false);
|
||||
completed.set(false);
|
||||
|
||||
// Remove all env vars from the Builder to prevent leaking of env vars from
|
||||
// the parent process.
|
||||
if (!inheritParentEnv) {
|
||||
builder.environment().clear();
|
||||
}
|
||||
|
||||
if (environment != null) {
|
||||
builder.environment().putAll(this.environment);
|
||||
}
|
||||
|
||||
if (dir != null) {
|
||||
builder.directory(this.dir);
|
||||
}
|
||||
|
@ -1084,6 +1095,11 @@ public abstract class Shell {
|
|||
this(execString, dir, env , 0L);
|
||||
}
|
||||
|
||||
public ShellCommandExecutor(String[] execString, File dir,
|
||||
Map<String, String> env, long timeout) {
|
||||
this(execString, dir, env , timeout, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new instance of the ShellCommandExecutor to execute a command.
|
||||
*
|
||||
|
@ -1096,10 +1112,12 @@ public abstract class Shell {
|
|||
* environment is not modified.
|
||||
* @param timeout Specifies the time in milliseconds, after which the
|
||||
* command will be killed and the status marked as timed-out.
|
||||
* If 0, the command will not be timed out.
|
||||
* If 0, the command will not be timed out.
|
||||
* @param inheritParentEnv Indicates if the process should inherit the env
|
||||
* vars from the parent process or not.
|
||||
*/
|
||||
public ShellCommandExecutor(String[] execString, File dir,
|
||||
Map<String, String> env, long timeout) {
|
||||
Map<String, String> env, long timeout, boolean inheritParentEnv) {
|
||||
command = execString.clone();
|
||||
if (dir != null) {
|
||||
setWorkingDirectory(dir);
|
||||
|
@ -1108,6 +1126,7 @@ public abstract class Shell {
|
|||
setEnvironment(env);
|
||||
}
|
||||
timeOutInterval = timeout;
|
||||
this.inheritParentEnv = inheritParentEnv;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -506,6 +506,14 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>io.erasurecode.codec.rs-default.rawcoder</name>
|
||||
<value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory</value>
|
||||
<description>
|
||||
Raw coder implementation for the rs-default codec.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- file system properties -->
|
||||
|
||||
<property>
|
||||
|
@ -859,7 +867,10 @@
|
|||
<property>
|
||||
<name>fs.s3a.multipart.threshold</name>
|
||||
<value>2147483647</value>
|
||||
<description>Threshold before uploads or copies use parallel multipart operations.</description>
|
||||
<description>How big (in bytes) to split upload or copy operations up into.
|
||||
This also controls the partition size in renamed files, as rename() involves
|
||||
copying the source file(s)
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
|
@ -1101,6 +1112,17 @@
|
|||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.maximum.data.length</name>
|
||||
<value>67108864</value>
|
||||
<description>This indicates the maximum IPC message length (bytes) that can be
|
||||
accepted by the server. Messages larger than this value are rejected by
|
||||
server immediately. This setting should rarely need to be changed. It merits
|
||||
investigating whether the cause of long RPC messages can be fixed instead,
|
||||
e.g. by splitting into smaller messages.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<!-- Proxy Configuration -->
|
||||
|
||||
<property>
|
||||
|
|
|
@ -318,7 +318,7 @@ Once the Hadoop cluster is up and running check the web-ui of the components as
|
|||
|
||||
| Daemon | Web Interface | Notes |
|
||||
|:---- |:---- |:---- |
|
||||
| NameNode | http://nn_host:port/ | Default HTTP port is 50070. |
|
||||
| NameNode | http://nn_host:port/ | Default HTTP port is 9870. |
|
||||
| ResourceManager | http://rm_host:port/ | Default HTTP port is 8088. |
|
||||
| MapReduce JobHistory Server | http://jhs_host:port/ | Default HTTP port is 19888. |
|
||||
|
||||
|
|
|
@ -212,7 +212,7 @@ Usage:
|
|||
|
||||
Get/Set the log level for a Log identified by a qualified class name in the daemon.
|
||||
|
||||
Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:50070 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
|
||||
Example: $ bin/hadoop daemonlog -setlevel 127.0.0.1:9870 org.apache.hadoop.hdfs.server.namenode.NameNode DEBUG
|
||||
|
||||
Files
|
||||
-----
|
||||
|
|
|
@ -122,3 +122,38 @@ Utilizing the credential command will often be for provisioning a password or se
|
|||
Example: `hadoop credential create ssl.server.keystore.password jceks://file/tmp/test.jceks`
|
||||
|
||||
In order to indicate a particular provider type and location, the user must provide the `hadoop.security.credential.provider.path` configuration element in core-site.xml or use the command line option `-provider` on each of the credential management commands. This provider path is a comma-separated list of URLs that indicates the type and location of a list of providers that should be consulted. For example, the following path: `user:///,jceks://file/tmp/test.jceks,jceks://hdfs@nn1.example.com/my/path/test.jceks` indicates that the current user's credentials file should be consulted through the User Provider, that the local file located at `/tmp/test.jceks` is a Java Keystore Provider and that the file located within HDFS at `nn1.example.com/my/path/test.jceks` is also a store for a Java Keystore Provider.
|
||||
|
||||
#### Provider Types
|
||||
|
||||
1. The `UserProvider`, which is representd by the provider URI `user:///`, is used to retrieve credentials from a user's Credentials file. This file is used to store various tokens, secrets and passwords that are needed by executing jobs and applications.
|
||||
2. The `JavaKeyStoreProvider`, which is represented by the provider URI `jceks://file|hdfs/path-to-keystore`, is used to retrieve credentials from a Java keystore. The underlying use of the Hadoop filesystem abstraction allows credentials to be stored on the local filesystem or within HDFS.
|
||||
3. The `LocalJavaKeyStoreProvider`, which is represented by the provider URI `localjceks://file/path-to-keystore`, is used to access credentials from a Java keystore that is must be stored on the local filesystem. This is needed for credentials that would result in a recursive dependency on accessing HDFS. Anytime that your credential is required to gain access to HDFS we can't depend on getting a credential out of HDFS to do so.
|
||||
|
||||
#### Keystore Passwords
|
||||
|
||||
Keystores in Java are generally protected by passwords. The primary method of protection of the keystore-based credential providers are OS level file permissions and any other policy based access protection that may exist for the target filesystem. While the password is not a primary source of protection, it is very important to understand the mechanics required and options available for managing these passwords. It is also very important to understand all the parties that will need access to the password used to protect the keystores in order to consume them at runtime.
|
||||
|
||||
##### Options
|
||||
| Option | Description | Notes |
|
||||
|:---- |:---- |:---|
|
||||
|Default password |This is a harcoded password of "none". |This is a hardcoded password in an open source project and as such has obvious disadvantages. However, the mechanics section will show that it is simpler and consequently nearly as secure as the other more complex options.|
|
||||
|Environment variable|`HADOOP_CREDSTORE_PASSWORD`|This option uses an environment variable to communicate the password that should be used when interrogating all of the keystores that are configured in the `hadoop.security.credential.provider.path` configuration property. All of the keystore based providers in the path will need to be protected by the same password.|
|
||||
|Password-file|`hadoop.security.credstore.java-keystore-provider.password-file`|This option uses a "side file" that has its location configured in the `hadoop.security.credstore.java-keystore-provider.password-file` configuration property to communicate the password that should be used when interrogating all of the keystores that are configured in the `hadoop.security.credential.provider.path` configuration property.|
|
||||
|
||||
##### Mechanics
|
||||
Extremely important to consider that *all* of the runtime consumers of the credential being protected (mapreduce jobs/applications) will need to have access to the password used to protect the keystore providers. Communicating this password can be done a number of ways and they are described in the Options section above.
|
||||
|
||||
|Keystore Password| Description|Sync Required|Clear Text|File Permissions|
|
||||
|:---- |:---- |:---|:---|:---|
|
||||
|Default Password|Hardcoded password is the default. Essentially, when using the default password for all keystore-based credential stores, we are leveraging the file permissions to protect the credential store and the keystore password is just a formality of persisting the keystore.|No|Yes|No (documented)|
|
||||
|Environment Variable|`HADOOP_CREDSTORE_PASSWORD` Environment variable must be set to the custom password for all keystores that may be configured in the provider path of any process that needs to access credentials from a keystore-based credential provider. There is only one env variable for the entire path of comma separated providers. It is difficult to know the passwords required for each keystore and it is suggested that the same be used for all keystore-based credential providers to avoid this issue. Setting the environment variable will likely require it to be set from a script or some other clear text storage mechanism. Environment variables for running processes are available from various unix commands.|Yes|Yes|No|
|
||||
|Password File|`hadoop.security.credstore.java-keystore-provider.password-file` configuration property must be set to the location of the "side file" that contains the custom password for all keystores that may be configured in the provider path. Any process that needs to access credentials from a keystore-based credential provider will need to have this configuration property set to the appropriate file location. There is only one password-file for the entire path of comma separated providers. It is difficult to know the passwords required for each keystore and it is therefore suggested that the same be used for all keystore-based credential providers to avoid this issue. Password-files are additional files that need to be managed, store the password in clear text and need file permissions to be set such that only those that need access to them have it. If file permissions are set inappropriately the password to access the keystores is available in clear text.|Yes|Yes|Yes|
|
||||
|
||||
The use of the default password means that no additional communication/synchronization to runtime consumers needs to be done. The default password is known but file permissions are the primary protection of the keystore.
|
||||
|
||||
When file permissions are thwarted, unlike "side files", there are no standard tools that can expose the protected credentials - even with the password known. Keytool requires a password that is six characters or more and doesn't know how to retrieve general secrets from a keystore. It is also limited to PKI keypairs. Editors will not review the secrets stored within the keystore, nor will `cat`, `more` or any other standard tools. This is why the keystore providers are better than "side file" storage of credentials.
|
||||
|
||||
That said, it is trivial for someone to write code to access the credentials stored within a keystore-based credential provider using the API. Again, when using the default password, the password is merely a formality of persisting the keystore. The *only* protection is file permissions and OS level access policy.
|
||||
|
||||
Users may decide to use a password "side file" to store the password for the keystores themselves and this is supported. It is just really important to be aware of the mechanics required for this level of correctness.
|
||||
|
||||
|
|
|
@ -45,7 +45,8 @@ The following table lists the configuration property names that are deprecated i
|
|||
| dfs.replication.considerLoad | dfs.namenode.replication.considerLoad |
|
||||
| dfs.replication.interval | dfs.namenode.replication.interval |
|
||||
| dfs.replication.min | dfs.namenode.replication.min |
|
||||
| dfs.replication.pending.timeout.sec | dfs.namenode.replication.pending.timeout-sec |
|
||||
| dfs.replication.pending.timeout.sec | dfs.namenode.reconstruction.pending.timeout-sec |
|
||||
| dfs.namenode.replication.pending.timeout-sec | dfs.namenode.reconstruction.pending.timeout-sec |
|
||||
| dfs.safemode.extension | dfs.namenode.safemode.extension |
|
||||
| dfs.safemode.threshold.pct | dfs.namenode.safemode.threshold-pct |
|
||||
| dfs.secondary.http.address | dfs.namenode.secondary.http-address |
|
||||
|
|
|
@ -256,15 +256,15 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
|
|||
| Parameter | Value | Notes |
|
||||
|:-----------------------------|:------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `dfs.http.policy` | `HTTP_ONLY` or `HTTPS_ONLY` or `HTTP_AND_HTTPS` | `HTTPS_ONLY` turns off http access. This option takes precedence over the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. If using SASL to authenticate data transfer protocol instead of running DataNode as root and using privileged ports, then this property must be set to `HTTPS_ONLY` to guarantee authentication of HTTP servers. (See `dfs.data.transfer.protection`.) |
|
||||
| `dfs.namenode.https-address` | `0.0.0.0:50470` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. |
|
||||
| `dfs.namenode.https-address` | `0.0.0.0:9871` | This parameter is used in non-HA mode and without federation. See [HDFS High Availability](../hadoop-hdfs/HDFSHighAvailabilityWithNFS.html#Deployment) and [HDFS Federation](../hadoop-hdfs/Federation.html#Federation_Configuration) for details. |
|
||||
| `dfs.https.enable` | `true` | This value is deprecated. `Use dfs.http.policy` |
|
||||
|
||||
### Secondary NameNode
|
||||
|
||||
| Parameter | Value | Notes |
|
||||
|:------------------------------------------------------------|:-----------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `dfs.namenode.secondary.http-address` | `0.0.0.0:50090` | HTTP web UI address for the Secondary NameNode. |
|
||||
| `dfs.namenode.secondary.https-address` | `0.0.0.0:50091` | HTTPS web UI address for the Secondary NameNode. |
|
||||
| `dfs.namenode.secondary.http-address` | `0.0.0.0:9868` | HTTP web UI address for the Secondary NameNode. |
|
||||
| `dfs.namenode.secondary.https-address` | `0.0.0.0:9869` | HTTPS web UI address for the Secondary NameNode. |
|
||||
| `dfs.secondary.namenode.keytab.file` | `/etc/security/keytab/sn.service.keytab` | Kerberos keytab file for the Secondary NameNode. |
|
||||
| `dfs.secondary.namenode.kerberos.principal` | `sn/_HOST@REALM.TLD` | Kerberos principal name for the Secondary NameNode. |
|
||||
| `dfs.secondary.namenode.kerberos.internal.spnego.principal` | `HTTP/_HOST@REALM.TLD` | The server principal used by the Secondary NameNode for web UI SPNEGO authentication. The SPNEGO server principal begins with the prefix `HTTP/` by convention. If the value is `'*'`, the web server will attempt to login with every principal specified in the keytab file `dfs.web.authentication.kerberos.keytab`. For most deployments this can be set to `${dfs.web.authentication.kerberos.principal}` i.e use the value of `dfs.web.authentication.kerberos.principal`. |
|
||||
|
@ -286,7 +286,7 @@ The following settings allow configuring SSL access to the NameNode web UI (opti
|
|||
| `dfs.datanode.data.dir.perm` | `700` | |
|
||||
| `dfs.datanode.address` | `0.0.0.0:1004` | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. Alternatively, this must be set to a non-privileged port if using SASL to authenticate data transfer protocol. (See `dfs.data.transfer.protection`.) |
|
||||
| `dfs.datanode.http.address` | `0.0.0.0:1006` | Secure DataNode must use privileged port in order to assure that the server was started securely. This means that the server must be started via jsvc. |
|
||||
| `dfs.datanode.https.address` | `0.0.0.0:50475` | HTTPS web UI address for the Data Node. |
|
||||
| `dfs.datanode.https.address` | `0.0.0.0:9865` | HTTPS web UI address for the Data Node. |
|
||||
| `dfs.datanode.kerberos.principal` | `dn/_HOST@REALM.TLD` | Kerberos principal name for the DataNode. |
|
||||
| `dfs.datanode.keytab.file` | `/etc/security/keytab/dn.service.keytab` | Kerberos keytab file for the DataNode. |
|
||||
| `dfs.encrypt.data.transfer` | `false` | set to `true` when using data encryption |
|
||||
|
|
|
@ -144,7 +144,7 @@ The following instructions are to run a MapReduce job locally. If you want to ex
|
|||
|
||||
3. Browse the web interface for the NameNode; by default it is available at:
|
||||
|
||||
* NameNode - `http://localhost:50070/`
|
||||
* NameNode - `http://localhost:9870/`
|
||||
|
||||
4. Make the HDFS directories required to execute MapReduce jobs:
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ You need to run the command against all servers if you want to update the config
|
|||
ID CLASS
|
||||
1 org.apache.htrace.core.LocalFileSpanReceiver
|
||||
|
||||
$ hadoop trace -list -host 192.168.56.2:50020
|
||||
$ hadoop trace -list -host 192.168.56.2:9867
|
||||
ID CLASS
|
||||
1 org.apache.htrace.core.LocalFileSpanReceiver
|
||||
|
||||
|
|
|
@ -278,7 +278,7 @@ of `getFileBlockLocations()` on a directory is []
|
|||
If the filesystem is not location aware, it SHOULD return
|
||||
|
||||
[
|
||||
BlockLocation(["localhost:50010"] ,
|
||||
BlockLocation(["localhost:9866"] ,
|
||||
["localhost"],
|
||||
["/default/localhost"]
|
||||
0, F.getLen())
|
||||
|
|
|
@ -0,0 +1,176 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.conf;
|
||||
|
||||
import java.util.HashSet;
|
||||
|
||||
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
||||
import org.apache.hadoop.fs.AbstractFileSystem;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.ftp.FtpConfigKeys;
|
||||
import org.apache.hadoop.fs.local.LocalConfigKeys;
|
||||
import org.apache.hadoop.ha.SshFenceByTcpPort;
|
||||
import org.apache.hadoop.ha.ZKFailoverController;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.security.CompositeGroupsMapping;
|
||||
import org.apache.hadoop.security.HttpCrossOriginFilterInitializer;
|
||||
import org.apache.hadoop.security.LdapGroupsMapping;
|
||||
import org.apache.hadoop.security.http.CrossOriginFilter;
|
||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||
|
||||
/**
|
||||
* Unit test class to compare the following Hadoop Configuration classes:
|
||||
* <p></p>
|
||||
* {@link org.apache.hadoop.fs.AbstractFileSystem}
|
||||
* {@link org.apache.hadoop.fs.CommonConfigurationKeys}
|
||||
* {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic}
|
||||
* {@link org.apache.hadoop.fs.ftp.FtpConfigKeys}
|
||||
* {@link org.apache.hadoop.fs.local.LocalConfigKeys}
|
||||
* {@link org.apache.hadoop.ha.SshFenceByTcpPort}
|
||||
* {@link org.apache.hadoop.http.HttpServer2}
|
||||
* {@link org.apache.hadoop.security.LdapGroupsMapping}
|
||||
* {@link org.apache.hadoop.security.http.CrossOriginFilter}
|
||||
* {@link org.apache.hadoop.security.ssl.SSLFactory}
|
||||
* <p></p>
|
||||
* against core-site.xml for missing properties. Currently only
|
||||
* throws an error if the class is missing a property.
|
||||
* <p></p>
|
||||
* Refer to {@link org.apache.hadoop.conf.TestConfigurationFieldsBase}
|
||||
* for how this class works.
|
||||
*/
|
||||
public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
@Override
|
||||
public void initializeMemberVariables() {
|
||||
xmlFilename = new String("core-default.xml");
|
||||
configurationClasses = new Class[] {
|
||||
CommonConfigurationKeys.class,
|
||||
CommonConfigurationKeysPublic.class,
|
||||
LocalConfigKeys.class,
|
||||
FtpConfigKeys.class,
|
||||
SshFenceByTcpPort.class,
|
||||
LdapGroupsMapping.class,
|
||||
ZKFailoverController.class,
|
||||
SSLFactory.class,
|
||||
CompositeGroupsMapping.class
|
||||
};
|
||||
|
||||
// Initialize used variables
|
||||
xmlPropsToSkipCompare = new HashSet<String>();
|
||||
xmlPrefixToSkipCompare = new HashSet<String>();
|
||||
configurationPropsToSkipCompare = new HashSet<String>();
|
||||
|
||||
// Set error modes
|
||||
errorIfMissingConfigProps = true;
|
||||
errorIfMissingXmlProps = false;
|
||||
|
||||
// Lots of properties not in the above classes
|
||||
xmlPropsToSkipCompare.add("fs.ftp.password.localhost");
|
||||
xmlPropsToSkipCompare.add("fs.ftp.user.localhost");
|
||||
xmlPropsToSkipCompare.add("fs.s3.block.size");
|
||||
xmlPropsToSkipCompare.add("hadoop.tmp.dir");
|
||||
xmlPropsToSkipCompare.add("nfs3.mountd.port");
|
||||
xmlPropsToSkipCompare.add("nfs3.server.port");
|
||||
xmlPropsToSkipCompare.add("test.fs.s3.name");
|
||||
xmlPropsToSkipCompare.add("test.fs.s3n.name");
|
||||
|
||||
// S3/S3A properties are in a different subtree.
|
||||
// - org.apache.hadoop.fs.s3.S3FileSystemConfigKeys
|
||||
// - org.apache.hadoop.fs.s3native.S3NativeFileSystemConfigKeys
|
||||
xmlPrefixToSkipCompare.add("fs.s3.");
|
||||
xmlPrefixToSkipCompare.add("fs.s3a.");
|
||||
xmlPrefixToSkipCompare.add("fs.s3n.");
|
||||
xmlPrefixToSkipCompare.add("s3.");
|
||||
xmlPrefixToSkipCompare.add("s3native.");
|
||||
|
||||
// Deprecated properties. These should eventually be removed from the
|
||||
// class.
|
||||
configurationPropsToSkipCompare
|
||||
.add(CommonConfigurationKeysPublic.IO_SORT_MB_KEY);
|
||||
configurationPropsToSkipCompare
|
||||
.add(CommonConfigurationKeysPublic.IO_SORT_FACTOR_KEY);
|
||||
|
||||
// Irrelevant property
|
||||
configurationPropsToSkipCompare.add("dr.who");
|
||||
|
||||
// XML deprecated properties.
|
||||
xmlPropsToSkipCompare.add("io.seqfile.lazydecompress");
|
||||
xmlPropsToSkipCompare.add("io.seqfile.sorter.recordlimit");
|
||||
// - org.apache.hadoop.hdfs.client.HdfsClientConfigKeys
|
||||
xmlPropsToSkipCompare
|
||||
.add("io.bytes.per.checksum");
|
||||
|
||||
// Properties in other classes that aren't easily determined
|
||||
// (not following naming convention, in a different project, not public,
|
||||
// etc.)
|
||||
// - org.apache.hadoop.http.HttpServer2.FILTER_INITIALIZER_PROPERTY
|
||||
xmlPropsToSkipCompare.add("hadoop.http.filter.initializers");
|
||||
// - org.apache.hadoop.security.HttpCrossOriginFilterInitializer
|
||||
xmlPrefixToSkipCompare.add(HttpCrossOriginFilterInitializer.PREFIX);
|
||||
xmlPrefixToSkipCompare.add("fs.AbstractFileSystem.");
|
||||
// - org.apache.hadoop.ha.SshFenceByTcpPort
|
||||
xmlPrefixToSkipCompare.add("dfs.ha.fencing.ssh.");
|
||||
// - org.apache.hadoop.classification.RegistryConstants
|
||||
xmlPrefixToSkipCompare.add("hadoop.registry.");
|
||||
// - org.apache.hadoop.security.AuthenticationFilterInitializer
|
||||
xmlPrefixToSkipCompare.add("hadoop.http.authentication.");
|
||||
// - org.apache.hadoop.crypto.key.kms.KMSClientProvider;
|
||||
xmlPropsToSkipCompare.add(KMSClientProvider.AUTH_RETRY);
|
||||
// - org.apache.hadoop.io.nativeio.NativeIO
|
||||
xmlPropsToSkipCompare.add("hadoop.workaround.non.threadsafe.getpwuid");
|
||||
// - org.apache.hadoop.hdfs.DFSConfigKeys
|
||||
xmlPropsToSkipCompare.add("dfs.ha.fencing.methods");
|
||||
// - org.apache.hadoop.fs.CommonConfigurationKeysPublic
|
||||
xmlPrefixToSkipCompare
|
||||
.add(CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX);
|
||||
// - org.apache.hadoop.hdfs.server.datanode.DataNode
|
||||
xmlPropsToSkipCompare.add("hadoop.common.configuration.version");
|
||||
// - org.apache.hadoop.fs.FileSystem
|
||||
xmlPropsToSkipCompare.add("fs.har.impl.disable.cache");
|
||||
// - org.apache.hadoop.fs.FileSystem#getFileSystemClass()
|
||||
xmlPropsToSkipCompare.add("fs.swift.impl");
|
||||
// - package org.apache.hadoop.tracing.TraceUtils ?
|
||||
xmlPropsToSkipCompare.add("hadoop.htrace.span.receiver.classes");
|
||||
// Private keys
|
||||
// - org.apache.hadoop.ha.ZKFailoverController;
|
||||
xmlPropsToSkipCompare.add("ha.zookeeper.parent-znode");
|
||||
xmlPropsToSkipCompare.add("ha.zookeeper.session-timeout.ms");
|
||||
// - Where is this used?
|
||||
xmlPrefixToSkipCompare
|
||||
.add(CommonConfigurationKeys.FS_CLIENT_HTRACE_PREFIX);
|
||||
// - org.apache.hadoop.security.UserGroupInformation
|
||||
xmlPropsToSkipCompare.add("hadoop.kerberos.kinit.command");
|
||||
// - org.apache.hadoop.net.NetUtils
|
||||
xmlPropsToSkipCompare
|
||||
.add("hadoop.rpc.socket.factory.class.ClientProtocol");
|
||||
// - Where is this used?
|
||||
xmlPropsToSkipCompare.add("hadoop.ssl.enabled");
|
||||
|
||||
// Keys with no corresponding variable
|
||||
// - org.apache.hadoop.io.compress.bzip2.Bzip2Factory
|
||||
xmlPropsToSkipCompare.add("io.compression.codec.bzip2.library");
|
||||
// - org.apache.hadoop.io.SequenceFile
|
||||
xmlPropsToSkipCompare.add("io.seqfile.local.dir");
|
||||
|
||||
|
||||
}
|
||||
}
|
|
@ -18,8 +18,11 @@
|
|||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
@ -281,4 +284,20 @@ public class TestChecksumFileSystem {
|
|||
assertTrue(localFs.rename(srcPath, dstPath));
|
||||
assertTrue(localFs.exists(localFs.getChecksumFile(realDstPath)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSetPermissionCrc() throws Exception {
|
||||
FileSystem rawFs = localFs.getRawFileSystem();
|
||||
Path p = new Path(TEST_ROOT_DIR, "testCrcPermissions");
|
||||
localFs.createNewFile(p);
|
||||
Path crc = localFs.getChecksumFile(p);
|
||||
assert(rawFs.exists(crc));
|
||||
|
||||
for (short mode : Arrays.asList((short)0666, (short)0660, (short)0600)) {
|
||||
FsPermission perm = new FsPermission(mode);
|
||||
localFs.setPermission(p, perm);
|
||||
assertEquals(perm, localFs.getFileStatus(p).getPermission());
|
||||
assertEquals(perm, rawFs.getFileStatus(crc).getPermission());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,6 +47,6 @@ public class TestDelegateToFileSystem {
|
|||
|
||||
@Test
|
||||
public void testDefaultURIwithPort() throws Exception {
|
||||
testDefaultUriInternal("hdfs://dummyhost:8020");
|
||||
testDefaultUriInternal("hdfs://dummyhost:9820");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
|
|||
private static String TEST_FENCING_HOST = System.getProperty(
|
||||
"test.TestSshFenceByTcpPort.host", "localhost");
|
||||
private static final String TEST_FENCING_PORT = System.getProperty(
|
||||
"test.TestSshFenceByTcpPort.port", "8020");
|
||||
"test.TestSshFenceByTcpPort.port", "9820");
|
||||
private static final String TEST_KEYFILE = System.getProperty(
|
||||
"test.TestSshFenceByTcpPort.key");
|
||||
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
* <p>
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
* <p>
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.erasurecode;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoderLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoderLegacy;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
|
||||
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* Test the codec to raw coder mapping.
|
||||
*/
|
||||
public class TestCodecRawCoderMapping {
|
||||
|
||||
private static Configuration conf;
|
||||
private static final int numDataUnit = 6;
|
||||
private static final int numParityUnit = 3;
|
||||
|
||||
@Before
|
||||
public void setup() {
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRSDefaultRawCoder() {
|
||||
// should return default raw coder of rs-default codec
|
||||
RawErasureEncoder encoder = CodecUtil.createRSRawEncoder(
|
||||
conf, numDataUnit, numParityUnit);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoder);
|
||||
RawErasureDecoder decoder = CodecUtil.createRSRawDecoder(
|
||||
conf, numDataUnit, numParityUnit);
|
||||
Assert.assertTrue(decoder instanceof RSRawDecoder);
|
||||
|
||||
// should return default raw coder of rs-legacy codec
|
||||
encoder = CodecUtil.createRSRawEncoder(conf, numDataUnit, numParityUnit,
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoderLegacy);
|
||||
decoder = CodecUtil.createRSRawDecoder(conf, numDataUnit, numParityUnit,
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
|
||||
Assert.assertTrue(decoder instanceof RSRawDecoderLegacy);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDedicatedRawCoderKey() {
|
||||
String dummyFactName = "DummyNoneExistingFactory";
|
||||
// set the dummy factory to rs-legacy and create a raw coder
|
||||
// with rs-default, which is OK as the raw coder key is not used
|
||||
conf.set(CommonConfigurationKeys.
|
||||
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY, dummyFactName);
|
||||
RawErasureEncoder encoder = CodecUtil.createRSRawEncoder(conf, numDataUnit,
|
||||
numParityUnit, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME);
|
||||
Assert.assertTrue(encoder instanceof RSRawEncoder);
|
||||
// now create the raw coder with rs-legacy, which should throw exception
|
||||
try {
|
||||
CodecUtil.createRSRawEncoder(conf, numDataUnit, numParityUnit,
|
||||
ErasureCodeConstants.RS_LEGACY_CODEC_NAME);
|
||||
Assert.fail();
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains("Failed to create raw coder", e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -112,7 +112,7 @@ public abstract class TestCoderBase {
|
|||
protected void prepare(Configuration conf, int numDataUnits,
|
||||
int numParityUnits, int[] erasedDataIndexes,
|
||||
int[] erasedParityIndexes, boolean usingFixedData) {
|
||||
this.conf = conf;
|
||||
this.conf = conf != null ? conf : new Configuration();
|
||||
this.numDataUnits = numDataUnits;
|
||||
this.numParityUnits = numParityUnits;
|
||||
this.erasedDataIndexes = erasedDataIndexes != null ?
|
||||
|
|
|
@ -50,7 +50,7 @@ public class TestHHXORErasureCoder extends TestHHErasureCoderBase {
|
|||
* This tests if the configuration items work or not.
|
||||
*/
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
conf.set(CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName());
|
||||
prepare(conf, 10, 4, new int[]{0}, new int[0]);
|
||||
|
||||
|
|
|
@ -57,7 +57,7 @@ public class TestRSErasureCoder extends TestErasureCoderBase {
|
|||
* This tests if the configuration items work or not.
|
||||
*/
|
||||
Configuration conf = new Configuration();
|
||||
conf.set(CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
|
||||
conf.set(CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
|
||||
RSRawErasureCoderFactory.class.getCanonicalName());
|
||||
prepare(conf, 10, 4, new int[]{0}, new int[0]);
|
||||
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.io.file.tfile;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
|
||||
public class TestCompression {
|
||||
|
||||
/**
|
||||
* Regression test for HADOOP-11418.
|
||||
* Verify we can set a LZO codec different from default LZO codec.
|
||||
*/
|
||||
@Test
|
||||
public void testConfigureLZOCodec() throws IOException {
|
||||
// Dummy codec
|
||||
String defaultCodec = "org.apache.hadoop.io.compress.DefaultCodec";
|
||||
Compression.Algorithm.conf.set(
|
||||
Compression.Algorithm.CONF_LZO_CLASS, defaultCodec);
|
||||
assertEquals(defaultCodec,
|
||||
Compression.Algorithm.LZO.getCodec().getClass().getName());
|
||||
}
|
||||
}
|
|
@ -84,7 +84,7 @@ public class TestAsyncIPC {
|
|||
try {
|
||||
final long param = TestIPC.RANDOM.nextLong();
|
||||
TestIPC.call(client, param, server, conf);
|
||||
Future<LongWritable> returnFuture = Client.getReturnValue();
|
||||
Future<LongWritable> returnFuture = Client.getReturnRpcResponse();
|
||||
returnFutures.put(i, returnFuture);
|
||||
expectedValues.put(i, param);
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import com.google.common.base.Supplier;
|
||||
import com.google.protobuf.ServiceException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -1025,7 +1026,6 @@ public class TestRPC extends TestRpcBase {
|
|||
*/
|
||||
@Test (timeout=30000)
|
||||
public void testClientBackOffByResponseTime() throws Exception {
|
||||
Server server;
|
||||
final TestRpcService proxy;
|
||||
boolean succeeded = false;
|
||||
final int numClients = 1;
|
||||
|
@ -1038,28 +1038,9 @@ public class TestRPC extends TestRpcBase {
|
|||
final ExecutorService executorService =
|
||||
Executors.newFixedThreadPool(numClients);
|
||||
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
|
||||
final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0.";
|
||||
conf.setBoolean(ns + CommonConfigurationKeys.IPC_BACKOFF_ENABLE, true);
|
||||
conf.setStrings(ns + CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY,
|
||||
"org.apache.hadoop.ipc.FairCallQueue");
|
||||
conf.setStrings(ns + CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY,
|
||||
"org.apache.hadoop.ipc.DecayRpcScheduler");
|
||||
conf.setInt(ns + CommonConfigurationKeys.IPC_SCHEDULER_PRIORITY_LEVELS_KEY,
|
||||
2);
|
||||
conf.setBoolean(ns +
|
||||
DecayRpcScheduler.IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_ENABLE_KEY,
|
||||
true);
|
||||
// set a small thresholds 2s and 4s for level 0 and level 1 for testing
|
||||
conf.set(ns +
|
||||
DecayRpcScheduler.IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_THRESHOLDS_KEY
|
||||
, "2s, 4s");
|
||||
|
||||
// Set max queue size to 3 so that 2 calls from the test won't trigger
|
||||
// back off because the queue is full.
|
||||
RPC.Builder builder = newServerBuilder(conf)
|
||||
.setQueueSizePerHandler(queueSizePerHandler).setNumHandlers(1)
|
||||
.setVerbose(true);
|
||||
server = setupTestServer(builder);
|
||||
final String ns = CommonConfigurationKeys.IPC_NAMESPACE + ".0";
|
||||
Server server = setupDecayRpcSchedulerandTestServer(ns + ".");
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
CallQueueManager<Call> spy = spy((CallQueueManager<Call>) Whitebox
|
||||
|
@ -1068,6 +1049,13 @@ public class TestRPC extends TestRpcBase {
|
|||
|
||||
Exception lastException = null;
|
||||
proxy = getClient(addr, conf);
|
||||
|
||||
MetricsRecordBuilder rb1 =
|
||||
getMetrics("DecayRpcSchedulerMetrics2." + ns);
|
||||
final long beginCallVolume = MetricsAsserts.getLongCounter("CallVolume", rb1);
|
||||
final int beginUniqueCaller = MetricsAsserts.getIntCounter("UniqueCallers",
|
||||
rb1);
|
||||
|
||||
try {
|
||||
// start a sleep RPC call that sleeps 3s.
|
||||
for (int i = 0; i < numClients; i++) {
|
||||
|
@ -1095,6 +1083,36 @@ public class TestRPC extends TestRpcBase {
|
|||
} else {
|
||||
lastException = unwrapExeption;
|
||||
}
|
||||
|
||||
// Lets Metric system update latest metrics
|
||||
GenericTestUtils.waitFor(new Supplier<Boolean>() {
|
||||
@Override
|
||||
public Boolean get() {
|
||||
MetricsRecordBuilder rb2 =
|
||||
getMetrics("DecayRpcSchedulerMetrics2." + ns);
|
||||
long callVolume1 = MetricsAsserts.getLongCounter("CallVolume", rb2);
|
||||
int uniqueCaller1 = MetricsAsserts.getIntCounter("UniqueCallers",
|
||||
rb2);
|
||||
long callVolumePriority0 = MetricsAsserts.getLongGauge(
|
||||
"Priority.0.CallVolume", rb2);
|
||||
long callVolumePriority1 = MetricsAsserts.getLongGauge(
|
||||
"Priority.1.CallVolume", rb2);
|
||||
double avgRespTimePriority0 = MetricsAsserts.getDoubleGauge(
|
||||
"Priority.0.AvgResponseTime", rb2);
|
||||
double avgRespTimePriority1 = MetricsAsserts.getDoubleGauge(
|
||||
"Priority.1.AvgResponseTime", rb2);
|
||||
|
||||
LOG.info("CallVolume1: " + callVolume1);
|
||||
LOG.info("UniqueCaller: " + uniqueCaller1);
|
||||
LOG.info("Priority.0.CallVolume: " + callVolumePriority0);
|
||||
LOG.info("Priority.1.CallVolume: " + callVolumePriority1);
|
||||
LOG.info("Priority.0.AvgResponseTime: " + avgRespTimePriority0);
|
||||
LOG.info("Priority.1.AvgResponseTime: " + avgRespTimePriority1);
|
||||
|
||||
return callVolume1 > beginCallVolume
|
||||
&& uniqueCaller1 > beginUniqueCaller;
|
||||
}
|
||||
}, 30, 60000);
|
||||
}
|
||||
} finally {
|
||||
executorService.shutdown();
|
||||
|
@ -1106,6 +1124,34 @@ public class TestRPC extends TestRpcBase {
|
|||
assertTrue("RetriableException not received", succeeded);
|
||||
}
|
||||
|
||||
private Server setupDecayRpcSchedulerandTestServer(String ns)
|
||||
throws Exception {
|
||||
final int queueSizePerHandler = 3;
|
||||
|
||||
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
|
||||
conf.setBoolean(ns + CommonConfigurationKeys.IPC_BACKOFF_ENABLE, true);
|
||||
conf.setStrings(ns + CommonConfigurationKeys.IPC_CALLQUEUE_IMPL_KEY,
|
||||
"org.apache.hadoop.ipc.FairCallQueue");
|
||||
conf.setStrings(ns + CommonConfigurationKeys.IPC_SCHEDULER_IMPL_KEY,
|
||||
"org.apache.hadoop.ipc.DecayRpcScheduler");
|
||||
conf.setInt(ns + CommonConfigurationKeys.IPC_SCHEDULER_PRIORITY_LEVELS_KEY,
|
||||
2);
|
||||
conf.setBoolean(ns +
|
||||
DecayRpcScheduler.IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_ENABLE_KEY,
|
||||
true);
|
||||
// set a small thresholds 2s and 4s for level 0 and level 1 for testing
|
||||
conf.set(ns +
|
||||
DecayRpcScheduler.IPC_DECAYSCHEDULER_BACKOFF_RESPONSETIME_THRESHOLDS_KEY
|
||||
, "2s, 4s");
|
||||
|
||||
// Set max queue size to 3 so that 2 calls from the test won't trigger
|
||||
// back off because the queue is full.
|
||||
RPC.Builder builder = newServerBuilder(conf)
|
||||
.setQueueSizePerHandler(queueSizePerHandler).setNumHandlers(1)
|
||||
.setVerbose(true);
|
||||
return setupTestServer(builder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Test RPC timeout.
|
||||
*/
|
||||
|
|
|
@ -42,6 +42,8 @@ public class TestRunJar extends TestCase {
|
|||
|
||||
private static final String TEST_JAR_NAME="test-runjar.jar";
|
||||
private static final String TEST_JAR_2_NAME = "test-runjar2.jar";
|
||||
private static final long MOCKED_NOW = 1_460_389_972_000L;
|
||||
private static final long MOCKED_NOW_PLUS_TWO_SEC = MOCKED_NOW + 2_000;
|
||||
|
||||
@Override
|
||||
@Before
|
||||
|
@ -68,9 +70,13 @@ public class TestRunJar extends TestCase {
|
|||
File jarFile = new File(TEST_ROOT_DIR, TEST_JAR_NAME);
|
||||
JarOutputStream jstream =
|
||||
new JarOutputStream(new FileOutputStream(jarFile));
|
||||
jstream.putNextEntry(new ZipEntry("foobar.txt"));
|
||||
ZipEntry zipEntry1 = new ZipEntry("foobar.txt");
|
||||
zipEntry1.setTime(MOCKED_NOW);
|
||||
jstream.putNextEntry(zipEntry1);
|
||||
jstream.closeEntry();
|
||||
jstream.putNextEntry(new ZipEntry("foobaz.txt"));
|
||||
ZipEntry zipEntry2 = new ZipEntry("foobaz.txt");
|
||||
zipEntry2.setTime(MOCKED_NOW_PLUS_TWO_SEC);
|
||||
jstream.putNextEntry(zipEntry2);
|
||||
jstream.closeEntry();
|
||||
jstream.close();
|
||||
}
|
||||
|
@ -113,6 +119,19 @@ public class TestRunJar extends TestCase {
|
|||
|
||||
}
|
||||
|
||||
public void testUnJarDoesNotLooseLastModify() throws Exception {
|
||||
File unjarDir = new File(TEST_ROOT_DIR, "unjar-lastmod");
|
||||
assertFalse("unjar dir shouldn't exist at test start",
|
||||
new File(unjarDir, "foobar.txt").exists());
|
||||
|
||||
// Unjar everything
|
||||
RunJar.unJar(new File(TEST_ROOT_DIR, TEST_JAR_NAME),
|
||||
unjarDir);
|
||||
|
||||
assertEquals("Last modify time was lost during unJar", MOCKED_NOW, new File(unjarDir, "foobar.txt").lastModified());
|
||||
assertEquals("Last modify time was lost during unJar", MOCKED_NOW_PLUS_TWO_SEC, new File(unjarDir, "foobaz.txt").lastModified());
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests the client classloader to verify the main class and its dependent
|
||||
* class are loaded correctly by the application classloader, and others are
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.util;
|
||||
|
||||
import org.apache.commons.io.FileUtils;
|
||||
import org.apache.hadoop.security.alias.AbstractJavaKeyStoreProvider;
|
||||
import org.junit.Assert;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
|
@ -29,6 +30,8 @@ import java.io.PrintWriter;
|
|||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.ThreadInfo;
|
||||
import java.lang.management.ThreadMXBean;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
@ -145,6 +148,40 @@ public class TestShell extends Assert {
|
|||
shellFile.delete();
|
||||
assertTrue("Script did not timeout" , shexc.isTimedOut());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEnvVarsWithInheritance() throws Exception {
|
||||
Assume.assumeFalse(WINDOWS);
|
||||
testEnvHelper(true);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testEnvVarsWithoutInheritance() throws Exception {
|
||||
Assume.assumeFalse(WINDOWS);
|
||||
testEnvHelper(false);
|
||||
}
|
||||
|
||||
private void testEnvHelper(boolean inheritParentEnv) throws Exception {
|
||||
Map<String, String> customEnv = new HashMap<>();
|
||||
customEnv.put("AAA" + System.currentTimeMillis(), "AAA");
|
||||
customEnv.put("BBB" + System.currentTimeMillis(), "BBB");
|
||||
customEnv.put("CCC" + System.currentTimeMillis(), "CCC");
|
||||
Shell.ShellCommandExecutor command = new ShellCommandExecutor(
|
||||
new String[]{"env"}, null, customEnv, 0L, inheritParentEnv);
|
||||
command.execute();
|
||||
String[] varsArr = command.getOutput().split("\n");
|
||||
Map<String, String> vars = new HashMap<>();
|
||||
for (String var : varsArr) {
|
||||
int eqIndex = var.indexOf('=');
|
||||
vars.put(var.substring(0, eqIndex), var.substring(eqIndex + 1));
|
||||
}
|
||||
Map<String, String> expectedEnv = new HashMap<>();
|
||||
expectedEnv.putAll(customEnv);
|
||||
if (inheritParentEnv) {
|
||||
expectedEnv.putAll(System.getenv());
|
||||
}
|
||||
assertEquals(expectedEnv, vars);
|
||||
}
|
||||
|
||||
private static int countTimerThreads() {
|
||||
ThreadMXBean threadBean = ManagementFactory.getThreadMXBean();
|
||||
|
|
|
@ -29,6 +29,14 @@ function hadoop_usage
|
|||
hadoop_generate_usage "${MYNAME}" false
|
||||
}
|
||||
|
||||
function hadoop_escape() {
|
||||
# Escape special chars for the later sed which saves the text as xml attribute
|
||||
local ret
|
||||
ret=$(sed 's/[\/&]/\\&/g' <<< "$1" | sed 's/&/\&/g' | sed 's/"/\\\"/g' \
|
||||
| sed "s/'/\\\\\'/g" | sed 's/</\\\</g' | sed 's/>/\\\>/g')
|
||||
echo "$ret"
|
||||
}
|
||||
|
||||
# let's locate libexec...
|
||||
if [[ -n "${HADOOP_HOME}" ]]; then
|
||||
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
|
||||
|
@ -96,8 +104,10 @@ fi
|
|||
if [[ -f "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" ]]; then
|
||||
if [[ -n "${KMS_SSL_KEYSTORE_PASS+x}" ]] || [[ -n "${KMS_SSL_TRUSTSTORE_PASS}" ]]; then
|
||||
export KMS_SSL_KEYSTORE_PASS=${KMS_SSL_KEYSTORE_PASS:-password}
|
||||
sed -e 's/_kms_ssl_keystore_pass_/'${KMS_SSL_KEYSTORE_PASS}'/g' \
|
||||
-e 's/_kms_ssl_truststore_pass_/'${KMS_SSL_TRUSTSTORE_PASS}'/g' \
|
||||
KMS_SSL_KEYSTORE_PASS_ESCAPED=$(hadoop_escape "$KMS_SSL_KEYSTORE_PASS")
|
||||
KMS_SSL_TRUSTSTORE_PASS_ESCAPED=$(hadoop_escape "$KMS_SSL_TRUSTSTORE_PASS")
|
||||
sed -e 's/"_kms_ssl_keystore_pass_"/'"\"${KMS_SSL_KEYSTORE_PASS_ESCAPED}\""'/g' \
|
||||
-e 's/"_kms_ssl_truststore_pass_"/'"\"${KMS_SSL_TRUSTSTORE_PASS_ESCAPED}\""'/g' \
|
||||
"${HADOOP_CATALINA_HOME}/conf/ssl-server.xml.conf" \
|
||||
> "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml"
|
||||
chmod 700 "${HADOOP_CATALINA_HOME}/conf/ssl-server.xml" >/dev/null 2>&1
|
||||
|
|
|
@ -30,6 +30,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<name>Apache Hadoop HDFS Client</name>
|
||||
<packaging>jar</packaging>
|
||||
|
||||
<properties>
|
||||
<hadoop.component>hdfs</hadoop.component>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>com.squareup.okhttp</groupId>
|
||||
|
|
|
@ -473,6 +473,11 @@ public class Hdfs extends AbstractFileSystem {
|
|||
dfs.setStoragePolicy(getUriPath(path), policyName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unsetStoragePolicy(final Path src) throws IOException {
|
||||
dfs.unsetStoragePolicy(getUriPath(src));
|
||||
}
|
||||
|
||||
@Override
|
||||
public BlockStoragePolicySpi getStoragePolicy(Path src) throws IOException {
|
||||
return dfs.getStoragePolicy(getUriPath(src));
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
|
||||
/**
|
||||
* AddBlockFlag provides hints for new block allocation and placement.
|
||||
* Users can use this flag to control <em>per DFSOutputStream</em>
|
||||
* {@see ClientProtocol#addBlock()} behavior.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public enum AddBlockFlag {
|
||||
|
||||
/**
|
||||
* Advise that a block replica NOT be written to the local DataNode where
|
||||
* 'local' means the same host as the client is being run on.
|
||||
*
|
||||
* @see CreateFlag#NO_LOCAL_WRITE
|
||||
*/
|
||||
NO_LOCAL_WRITE((short) 0x01);
|
||||
|
||||
private final short mode;
|
||||
|
||||
AddBlockFlag(short mode) {
|
||||
this.mode = mode;
|
||||
}
|
||||
|
||||
public static AddBlockFlag valueOf(short mode) {
|
||||
for (AddBlockFlag flag : AddBlockFlag.values()) {
|
||||
if (flag.getMode() == mode) {
|
||||
return flag;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public short getMode() {
|
||||
return mode;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.fs.Options;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
|
||||
import org.apache.hadoop.ipc.Client;
|
||||
|
||||
import com.google.common.util.concurrent.AbstractFuture;
|
||||
|
||||
/****************************************************************
|
||||
* Implementation of the asynchronous distributed file system.
|
||||
* This instance of this class is the way end-user code interacts
|
||||
* with a Hadoop DistributedFileSystem in an asynchronous manner.
|
||||
*
|
||||
*****************************************************************/
|
||||
@Unstable
|
||||
public class AsyncDistributedFileSystem {
|
||||
|
||||
private final DistributedFileSystem dfs;
|
||||
|
||||
AsyncDistributedFileSystem(final DistributedFileSystem dfs) {
|
||||
this.dfs = dfs;
|
||||
}
|
||||
|
||||
static <T> Future<T> getReturnValue() {
|
||||
final Callable<T> returnValueCallback = ClientNamenodeProtocolTranslatorPB
|
||||
.getReturnValueCallback();
|
||||
Future<T> returnFuture = new AbstractFuture<T>() {
|
||||
public T get() throws InterruptedException, ExecutionException {
|
||||
try {
|
||||
set(returnValueCallback.call());
|
||||
} catch (Exception e) {
|
||||
setException(e);
|
||||
}
|
||||
return super.get();
|
||||
}
|
||||
};
|
||||
return returnFuture;
|
||||
}
|
||||
|
||||
/**
|
||||
* Renames Path src to Path dst
|
||||
* <ul>
|
||||
* <li>Fails if src is a file and dst is a directory.
|
||||
* <li>Fails if src is a directory and dst is a file.
|
||||
* <li>Fails if the parent of dst does not exist or is a file.
|
||||
* </ul>
|
||||
* <p>
|
||||
* If OVERWRITE option is not passed as an argument, rename fails if the dst
|
||||
* already exists.
|
||||
* <p>
|
||||
* If OVERWRITE option is passed as an argument, rename overwrites the dst if
|
||||
* it is a file or an empty directory. Rename fails if dst is a non-empty
|
||||
* directory.
|
||||
* <p>
|
||||
* Note that atomicity of rename is dependent on the file system
|
||||
* implementation. Please refer to the file system documentation for details.
|
||||
* This default implementation is non atomic.
|
||||
*
|
||||
* @param src
|
||||
* path to be renamed
|
||||
* @param dst
|
||||
* new path after rename
|
||||
* @throws IOException
|
||||
* on failure
|
||||
* @return an instance of Future, #get of which is invoked to wait for
|
||||
* asynchronous call being finished.
|
||||
*/
|
||||
public Future<Void> rename(Path src, Path dst,
|
||||
final Options.Rename... options) throws IOException {
|
||||
dfs.getFsStatistics().incrementWriteOps(1);
|
||||
|
||||
final Path absSrc = dfs.fixRelativePart(src);
|
||||
final Path absDst = dfs.fixRelativePart(dst);
|
||||
|
||||
final boolean isAsync = Client.isAsynchronousMode();
|
||||
Client.setAsynchronousMode(true);
|
||||
try {
|
||||
dfs.getClient().rename(dfs.getPathName(absSrc), dfs.getPathName(absDst),
|
||||
options);
|
||||
return getReturnValue();
|
||||
} finally {
|
||||
Client.setAsynchronousMode(isAsync);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
|
|||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient.CorruptedBlocks;
|
||||
import org.apache.hadoop.hdfs.client.impl.BlockReaderFactory;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
|
|
@ -117,6 +117,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
private long initialFileSize = 0; // at time of file open
|
||||
private final short blockReplication; // replication factor of file
|
||||
protected boolean shouldSyncBlock = false; // force blocks to disk upon close
|
||||
private final EnumSet<AddBlockFlag> addBlockFlags;
|
||||
protected final AtomicReference<CachingStrategy> cachingStrategy;
|
||||
private FileEncryptionInfo fileEncryptionInfo;
|
||||
|
||||
|
@ -179,6 +180,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
}
|
||||
|
||||
private DFSOutputStream(DFSClient dfsClient, String src,
|
||||
EnumSet<CreateFlag> flag,
|
||||
Progressable progress, HdfsFileStatus stat, DataChecksum checksum) {
|
||||
super(getChecksum4Compute(checksum, stat));
|
||||
this.dfsClient = dfsClient;
|
||||
|
@ -189,6 +191,10 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
this.fileEncryptionInfo = stat.getFileEncryptionInfo();
|
||||
this.cachingStrategy = new AtomicReference<>(
|
||||
dfsClient.getDefaultWriteCachingStrategy());
|
||||
this.addBlockFlags = EnumSet.noneOf(AddBlockFlag.class);
|
||||
if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
|
||||
this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
|
||||
}
|
||||
if (progress != null) {
|
||||
DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
|
||||
+"{}", src);
|
||||
|
@ -212,7 +218,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
protected DFSOutputStream(DFSClient dfsClient, String src,
|
||||
HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress,
|
||||
DataChecksum checksum, String[] favoredNodes, boolean createStreamer) {
|
||||
this(dfsClient, src, progress, stat, checksum);
|
||||
this(dfsClient, src, flag, progress, stat, checksum);
|
||||
this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
|
||||
|
||||
computePacketChunkSize(dfsClient.getConf().getWritePacketSize(),
|
||||
|
@ -220,7 +226,8 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
|
||||
if (createStreamer) {
|
||||
streamer = new DataStreamer(stat, null, dfsClient, src, progress,
|
||||
checksum, cachingStrategy, byteArrayManager, favoredNodes);
|
||||
checksum, cachingStrategy, byteArrayManager, favoredNodes,
|
||||
addBlockFlags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -289,7 +296,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
|
||||
HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
|
||||
throws IOException {
|
||||
this(dfsClient, src, progress, stat, checksum);
|
||||
this(dfsClient, src, flags, progress, stat, checksum);
|
||||
initialFileSize = stat.getLen(); // length of file when opened
|
||||
this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);
|
||||
|
||||
|
@ -310,7 +317,8 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
bytesPerChecksum);
|
||||
streamer = new DataStreamer(stat,
|
||||
lastBlock != null ? lastBlock.getBlock() : null, dfsClient, src,
|
||||
progress, checksum, cachingStrategy, byteArrayManager, favoredNodes);
|
||||
progress, checksum, cachingStrategy, byteArrayManager, favoredNodes,
|
||||
addBlockFlags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -844,6 +852,10 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
return initialFileSize;
|
||||
}
|
||||
|
||||
protected EnumSet<AddBlockFlag> getAddBlockFlags() {
|
||||
return addBlockFlags;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the FileEncryptionInfo for this stream, or null if not encrypted.
|
||||
*/
|
||||
|
@ -916,7 +928,8 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
|
||||
static LocatedBlock addBlock(DatanodeInfo[] excludedNodes,
|
||||
DFSClient dfsClient, String src, ExtendedBlock prevBlock, long fileId,
|
||||
String[] favoredNodes) throws IOException {
|
||||
String[] favoredNodes, EnumSet<AddBlockFlag> allocFlags)
|
||||
throws IOException {
|
||||
final DfsClientConf conf = dfsClient.getConf();
|
||||
int retries = conf.getNumBlockWriteLocateFollowingRetry();
|
||||
long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
|
||||
|
@ -924,7 +937,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
while (true) {
|
||||
try {
|
||||
return dfsClient.namenode.addBlock(src, dfsClient.clientName, prevBlock,
|
||||
excludedNodes, fileId, favoredNodes);
|
||||
excludedNodes, fileId, favoredNodes, allocFlags);
|
||||
} catch (RemoteException e) {
|
||||
IOException ue = e.unwrapRemoteException(FileNotFoundException.class,
|
||||
AccessControlException.class,
|
||||
|
|
|
@ -185,7 +185,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
|||
readingService =
|
||||
new ExecutorCompletionService<>(dfsClient.getStripedReadsThreadPool());
|
||||
decoder = CodecUtil.createRSRawDecoder(dfsClient.getConfiguration(),
|
||||
dataBlkNum, parityBlkNum);
|
||||
dataBlkNum, parityBlkNum, ecPolicy.getCodecName());
|
||||
if (DFSClient.LOG.isDebugEnabled()) {
|
||||
DFSClient.LOG.debug("Creating an striped input stream for file " + src);
|
||||
}
|
||||
|
|
|
@ -287,7 +287,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
|||
ExecutorCompletionService<>(flushAllExecutor);
|
||||
|
||||
encoder = CodecUtil.createRSRawEncoder(dfsClient.getConfiguration(),
|
||||
numDataBlocks, numParityBlocks);
|
||||
numDataBlocks, numParityBlocks, ecPolicy.getCodecName());
|
||||
|
||||
coordinator = new Coordinator(numAllBlocks);
|
||||
try {
|
||||
|
@ -301,7 +301,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
|||
for (short i = 0; i < numAllBlocks; i++) {
|
||||
StripedDataStreamer streamer = new StripedDataStreamer(stat,
|
||||
dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager,
|
||||
favoredNodes, i, coordinator);
|
||||
favoredNodes, i, coordinator, getAddBlockFlags());
|
||||
streamers.add(streamer);
|
||||
}
|
||||
currentPackets = new DFSPacket[streamers.size()];
|
||||
|
@ -406,7 +406,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
|||
StripedDataStreamer streamer = new StripedDataStreamer(oldStreamer.stat,
|
||||
dfsClient, src, oldStreamer.progress,
|
||||
oldStreamer.checksum4WriteBlock, cachingStrategy, byteArrayManager,
|
||||
favoredNodes, i, coordinator);
|
||||
favoredNodes, i, coordinator, getAddBlockFlags());
|
||||
streamers.set(i, streamer);
|
||||
currentPackets[i] = null;
|
||||
if (i == currentIndex) {
|
||||
|
@ -458,7 +458,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
|||
LOG.debug("Allocating new block group. The previous block group: "
|
||||
+ currentBlockGroup);
|
||||
final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
|
||||
currentBlockGroup, fileId, favoredNodes);
|
||||
currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
|
||||
assert lb.isStriped();
|
||||
if (lb.getLocations().length < numDataBlocks) {
|
||||
throw new IOException("Failed to get " + numDataBlocks
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.net.Socket;
|
|||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
|
@ -412,13 +413,15 @@ class DataStreamer extends Daemon {
|
|||
|
||||
protected final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes;
|
||||
private final String[] favoredNodes;
|
||||
private final EnumSet<AddBlockFlag> addBlockFlags;
|
||||
|
||||
private DataStreamer(HdfsFileStatus stat, ExtendedBlock block,
|
||||
DFSClient dfsClient, String src,
|
||||
Progressable progress, DataChecksum checksum,
|
||||
AtomicReference<CachingStrategy> cachingStrategy,
|
||||
ByteArrayManager byteArrayManage,
|
||||
boolean isAppend, String[] favoredNodes) {
|
||||
boolean isAppend, String[] favoredNodes,
|
||||
EnumSet<AddBlockFlag> flags) {
|
||||
this.block = block;
|
||||
this.dfsClient = dfsClient;
|
||||
this.src = src;
|
||||
|
@ -430,11 +433,11 @@ class DataStreamer extends Daemon {
|
|||
this.isLazyPersistFile = isLazyPersist(stat);
|
||||
this.isAppend = isAppend;
|
||||
this.favoredNodes = favoredNodes;
|
||||
|
||||
final DfsClientConf conf = dfsClient.getConf();
|
||||
this.dfsclientSlowLogThresholdMs = conf.getSlowIoWarningThresholdMs();
|
||||
this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry());
|
||||
this.errorState = new ErrorState(conf.getDatanodeRestartTimeout());
|
||||
this.addBlockFlags = flags;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -443,9 +446,10 @@ class DataStreamer extends Daemon {
|
|||
DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
|
||||
String src, Progressable progress, DataChecksum checksum,
|
||||
AtomicReference<CachingStrategy> cachingStrategy,
|
||||
ByteArrayManager byteArrayManage, String[] favoredNodes) {
|
||||
ByteArrayManager byteArrayManage, String[] favoredNodes,
|
||||
EnumSet<AddBlockFlag> flags) {
|
||||
this(stat, block, dfsClient, src, progress, checksum, cachingStrategy,
|
||||
byteArrayManage, false, favoredNodes);
|
||||
byteArrayManage, false, favoredNodes, flags);
|
||||
stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
|
||||
}
|
||||
|
||||
|
@ -459,7 +463,7 @@ class DataStreamer extends Daemon {
|
|||
AtomicReference<CachingStrategy> cachingStrategy,
|
||||
ByteArrayManager byteArrayManage) {
|
||||
this(stat, lastBlock.getBlock(), dfsClient, src, progress, checksum, cachingStrategy,
|
||||
byteArrayManage, true, null);
|
||||
byteArrayManage, true, null, null);
|
||||
stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
|
||||
bytesSent = block.getNumBytes();
|
||||
accessToken = lastBlock.getBlockToken();
|
||||
|
@ -1679,7 +1683,7 @@ class DataStreamer extends Daemon {
|
|||
private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)
|
||||
throws IOException {
|
||||
return DFSOutputStream.addBlock(excludedNodes, dfsClient, src, block,
|
||||
stat.getFileId(), favoredNodes);
|
||||
stat.getFileId(), favoredNodes, addBlockFlags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -25,13 +25,13 @@ import java.net.URI;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.BlockLocation;
|
||||
import org.apache.hadoop.fs.BlockStoragePolicySpi;
|
||||
|
@ -191,7 +191,7 @@ public class DistributedFileSystem extends FileSystem {
|
|||
* @return path component of {file}
|
||||
* @throws IllegalArgumentException if URI does not belong to this DFS
|
||||
*/
|
||||
private String getPathName(Path file) {
|
||||
String getPathName(Path file) {
|
||||
checkPath(file);
|
||||
String result = file.toUri().getPath();
|
||||
if (!DFSUtilClient.isValidName(result)) {
|
||||
|
@ -2429,4 +2429,22 @@ public class DistributedFileSystem extends FileSystem {
|
|||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
private final AsyncDistributedFileSystem adfs =
|
||||
new AsyncDistributedFileSystem(this);
|
||||
|
||||
/** @return an {@link AsyncDistributedFileSystem} object. */
|
||||
@Unstable
|
||||
public AsyncDistributedFileSystem getAsyncDistributedFileSystem() {
|
||||
return adfs;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Path fixRelativePart(Path p) {
|
||||
return super.fixRelativePart(p);
|
||||
}
|
||||
|
||||
Statistics getFsStatistics() {
|
||||
return statistics;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,9 @@ public class HdfsConfiguration extends Configuration {
|
|||
new DeprecationDelta("dfs.replication.min",
|
||||
DeprecatedKeys.DFS_NAMENODE_REPLICATION_MIN_KEY),
|
||||
new DeprecationDelta("dfs.replication.pending.timeout.sec",
|
||||
DeprecatedKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY),
|
||||
DeprecatedKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY),
|
||||
new DeprecationDelta("dfs.namenode.replication.pending.timeout-sec",
|
||||
DeprecatedKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY),
|
||||
new DeprecationDelta("dfs.max-repl-streams",
|
||||
DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY),
|
||||
new DeprecationDelta("dfs.permissions",
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.hdfs;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.EnumSet;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -52,9 +53,10 @@ public class StripedDataStreamer extends DataStreamer {
|
|||
Progressable progress, DataChecksum checksum,
|
||||
AtomicReference<CachingStrategy> cachingStrategy,
|
||||
ByteArrayManager byteArrayManage, String[] favoredNodes,
|
||||
short index, Coordinator coordinator) {
|
||||
short index, Coordinator coordinator,
|
||||
final EnumSet<AddBlockFlag> flags) {
|
||||
super(stat, null, dfsClient, src, progress, checksum, cachingStrategy,
|
||||
byteArrayManage, favoredNodes);
|
||||
byteArrayManage, favoredNodes, flags);
|
||||
this.index = index;
|
||||
this.coordinator = coordinator;
|
||||
}
|
||||
|
|
|
@ -65,12 +65,12 @@ public interface HdfsClientConfigKeys {
|
|||
|
||||
String PREFIX = "dfs.client.";
|
||||
String DFS_NAMESERVICES = "dfs.nameservices";
|
||||
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
|
||||
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 9870;
|
||||
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
||||
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470;
|
||||
int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
|
||||
String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
|
||||
String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
|
||||
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
|
||||
int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
|
||||
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
||||
"dfs.namenode.kerberos.principal";
|
||||
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
|
||||
|
@ -216,8 +216,8 @@ public interface HdfsClientConfigKeys {
|
|||
String DFS_NAMENODE_REPLICATION_INTERVAL_KEY =
|
||||
"dfs.namenode.replication.interval";
|
||||
String DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
|
||||
String DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY =
|
||||
"dfs.namenode.replication.pending.timeout-sec";
|
||||
String DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
|
||||
"dfs.namenode.reconstruction.pending.timeout-sec";
|
||||
String DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
|
||||
"dfs.namenode.replication.max-streams";
|
||||
String DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
|
||||
|
||||
|
@ -34,7 +34,15 @@ import org.apache.commons.lang.mutable.MutableBoolean;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.ClientContext;
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSInputStream;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.ExtendedBlockId;
|
||||
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
||||
import org.apache.hadoop.hdfs.ReplicaAccessor;
|
||||
import org.apache.hadoop.hdfs.ReplicaAccessorBuilder;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
||||
import org.apache.hadoop.hdfs.net.DomainPeer;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
|
@ -646,7 +654,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a RemoteBlockReader that communicates over a UNIX domain socket.
|
||||
* Get a BlockReaderRemote that communicates over a UNIX domain socket.
|
||||
*
|
||||
* @return The new BlockReader, or null if we failed to create the block
|
||||
* reader.
|
||||
|
@ -709,7 +717,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get a RemoteBlockReader that communicates over a TCP socket.
|
||||
* Get a BlockReaderRemote that communicates over a TCP socket.
|
||||
*
|
||||
* @return The new BlockReader. We will not return null, but instead throw
|
||||
* an exception if this fails.
|
||||
|
@ -837,13 +845,13 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
private BlockReader getRemoteBlockReader(Peer peer) throws IOException {
|
||||
int networkDistance = clientContext.getNetworkDistance(datanode);
|
||||
if (conf.getShortCircuitConf().isUseLegacyBlockReader()) {
|
||||
return RemoteBlockReader.newBlockReader(fileName,
|
||||
return BlockReaderRemote.newBlockReader(fileName,
|
||||
block, token, startOffset, length, conf.getIoBufferSize(),
|
||||
verifyChecksum, clientName, peer, datanode,
|
||||
clientContext.getPeerCache(), cachingStrategy, tracer,
|
||||
networkDistance);
|
||||
} else {
|
||||
return RemoteBlockReader2.newBlockReader(
|
||||
return BlockReaderRemote2.newBlockReader(
|
||||
fileName, block, token, startOffset, length,
|
||||
verifyChecksum, clientName, peer, datanode,
|
||||
clientContext.getPeerCache(), cachingStrategy, tracer,
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -25,6 +25,7 @@ import java.util.EnumSet;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
import java.io.File;
|
||||
|
@ -33,8 +33,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
|
@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.fs.FSInputChecker;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.PeerCache;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
|
@ -54,13 +56,13 @@ import org.slf4j.LoggerFactory;
|
|||
|
||||
/**
|
||||
* @deprecated this is an old implementation that is being left around
|
||||
* in case any issues spring up with the new {@link RemoteBlockReader2}
|
||||
* in case any issues spring up with the new {@link BlockReaderRemote2}
|
||||
* implementation.
|
||||
* It will be removed in the next release.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@Deprecated
|
||||
public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||
public class BlockReaderRemote extends FSInputChecker implements BlockReader {
|
||||
static final Logger LOG = LoggerFactory.getLogger(FSInputChecker.class);
|
||||
|
||||
private final Peer peer;
|
||||
|
@ -209,7 +211,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
int len, byte[] checksumBuf)
|
||||
throws IOException {
|
||||
try (TraceScope ignored = tracer.newScope(
|
||||
"RemoteBlockReader#readChunk(" + blockId + ")")) {
|
||||
"BlockReaderRemote#readChunk(" + blockId + ")")) {
|
||||
return readChunkImpl(pos, buf, offset, len, checksumBuf);
|
||||
}
|
||||
}
|
||||
|
@ -335,7 +337,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
return bytesToRead;
|
||||
}
|
||||
|
||||
private RemoteBlockReader(String file, String bpid, long blockId,
|
||||
private BlockReaderRemote(String file, String bpid, long blockId,
|
||||
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
|
||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer,
|
||||
|
@ -386,7 +388,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
* @param clientName Client name
|
||||
* @return New BlockReader instance, or null on error.
|
||||
*/
|
||||
public static RemoteBlockReader newBlockReader(String file,
|
||||
public static BlockReaderRemote newBlockReader(String file,
|
||||
ExtendedBlock block,
|
||||
Token<BlockTokenIdentifier> blockToken,
|
||||
long startOffset, long len,
|
||||
|
@ -412,7 +414,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
|
||||
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
|
||||
PBHelperClient.vintPrefixed(in));
|
||||
RemoteBlockReader2.checkSuccess(status, peer, block, file);
|
||||
BlockReaderRemote2.checkSuccess(status, peer, block, file);
|
||||
ReadOpChecksumInfoProto checksumInfo =
|
||||
status.getReadOpChecksumInfo();
|
||||
DataChecksum checksum = DataTransferProtoUtil.fromProto(
|
||||
|
@ -429,7 +431,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
startOffset + " for file " + file);
|
||||
}
|
||||
|
||||
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
|
||||
return new BlockReaderRemote(file, block.getBlockPoolId(), block.getBlockId(),
|
||||
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
|
||||
peer, datanodeID, peerCache, tracer, networkDistance);
|
||||
}
|
||||
|
@ -467,7 +469,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
void sendReadResult(Peer peer, Status statusCode) {
|
||||
assert !sentStatusCode : "already sent status code to " + peer;
|
||||
try {
|
||||
RemoteBlockReader2.writeReadResult(peer.getOutputStream(), statusCode);
|
||||
BlockReaderRemote2.writeReadResult(peer.getOutputStream(), statusCode);
|
||||
sentStatusCode = true;
|
||||
} catch (IOException e) {
|
||||
// It's ok not to be able to send this. But something is probably wrong.
|
||||
|
@ -478,14 +480,14 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
|
||||
@Override
|
||||
public int read(ByteBuffer buf) throws IOException {
|
||||
throw new UnsupportedOperationException("readDirect unsupported in RemoteBlockReader");
|
||||
throw new UnsupportedOperationException("readDirect unsupported in BlockReaderRemote");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() {
|
||||
// An optimistic estimate of how much data is available
|
||||
// to us without doing network I/O.
|
||||
return RemoteBlockReader2.TCP_WINDOW_SIZE;
|
||||
return BlockReaderRemote2.TCP_WINDOW_SIZE;
|
||||
}
|
||||
|
||||
@Override
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.DataInputStream;
|
||||
|
@ -30,6 +30,8 @@ import java.util.UUID;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.PeerCache;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
|
@ -78,13 +80,13 @@ import org.slf4j.LoggerFactory;
|
|||
*
|
||||
* This is a new implementation introduced in Hadoop 0.23 which
|
||||
* is more efficient and simpler than the older BlockReader
|
||||
* implementation. It should be renamed to RemoteBlockReader
|
||||
* implementation. It should be renamed to BlockReaderRemote
|
||||
* once we are confident in it.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RemoteBlockReader2 implements BlockReader {
|
||||
public class BlockReaderRemote2 implements BlockReader {
|
||||
|
||||
static final Logger LOG = LoggerFactory.getLogger(RemoteBlockReader2.class);
|
||||
static final Logger LOG = LoggerFactory.getLogger(BlockReaderRemote2.class);
|
||||
static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB;
|
||||
|
||||
final private Peer peer;
|
||||
|
@ -138,7 +140,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
if (curDataSlice == null ||
|
||||
curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
||||
try (TraceScope ignored = tracer.newScope(
|
||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")")) {
|
||||
"BlockReaderRemote2#readNextPacket(" + blockId + ")")) {
|
||||
readNextPacket();
|
||||
}
|
||||
}
|
||||
|
@ -162,7 +164,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
if (curDataSlice == null ||
|
||||
(curDataSlice.remaining() == 0 && bytesNeededToFinish > 0)) {
|
||||
try (TraceScope ignored = tracer.newScope(
|
||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")")) {
|
||||
"BlockReaderRemote2#readNextPacket(" + blockId + ")")) {
|
||||
readNextPacket();
|
||||
}
|
||||
}
|
||||
|
@ -273,7 +275,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
}
|
||||
}
|
||||
|
||||
protected RemoteBlockReader2(String file, long blockId,
|
||||
protected BlockReaderRemote2(String file, long blockId,
|
||||
DataChecksum checksum, boolean verifyChecksum,
|
||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer,
|
||||
|
@ -425,7 +427,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
startOffset + " for file " + file);
|
||||
}
|
||||
|
||||
return new RemoteBlockReader2(file, block.getBlockId(), checksum,
|
||||
return new BlockReaderRemote2(file, block.getBlockId(), checksum,
|
||||
verifyChecksum, startOffset, firstChunkOffset, len, peer, datanodeID,
|
||||
peerCache, tracer, networkDistance);
|
||||
}
|
|
@ -15,9 +15,10 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
@ -15,7 +15,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs;
|
||||
package org.apache.hadoop.hdfs.client.impl;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
@ -23,6 +23,8 @@ import java.util.EnumSet;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.ReplicaAccessor;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.crypto.CryptoProtocolVersion;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
|
@ -390,6 +391,8 @@ public interface ClientProtocol {
|
|||
* @param fileId the id uniquely identifying a file
|
||||
* @param favoredNodes the list of nodes where the client wants the blocks.
|
||||
* Nodes are identified by either host name or address.
|
||||
* @param addBlockFlags flags to advise the behavior of allocating and placing
|
||||
* a new block.
|
||||
*
|
||||
* @return LocatedBlock allocated block information.
|
||||
*
|
||||
|
@ -408,7 +411,7 @@ public interface ClientProtocol {
|
|||
@Idempotent
|
||||
LocatedBlock addBlock(String src, String clientName,
|
||||
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
|
||||
String[] favoredNodes)
|
||||
String[] favoredNodes, EnumSet<AddBlockFlag> addBlockFlags)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
|
|
|
@ -72,6 +72,10 @@ public final class ErasureCodingPolicy {
|
|||
return schema.getNumParityUnits();
|
||||
}
|
||||
|
||||
public String getCodecName() {
|
||||
return schema.getCodecName();
|
||||
}
|
||||
|
||||
public byte getId() {
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import java.util.Map;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
|
@ -145,11 +144,9 @@ public final class HdfsConstants {
|
|||
ALL, LIVE, DEAD, DECOMMISSIONING
|
||||
}
|
||||
|
||||
public static final ECSchema RS_6_3_SCHEMA = new ECSchema("rs", 6, 3);
|
||||
public static final byte RS_6_3_POLICY_ID = 0;
|
||||
|
||||
public static final ECSchema RS_3_2_SCHEMA = new ECSchema("rs", 3, 2);
|
||||
public static final byte RS_3_2_POLICY_ID = 1;
|
||||
public static final byte RS_6_3_LEGACY_POLICY_ID = 2;
|
||||
|
||||
/* Hidden constructor */
|
||||
protected HdfsConstants() {
|
||||
|
|
|
@ -24,11 +24,14 @@ import java.util.EnumSet;
|
|||
import java.util.List;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.crypto.CryptoProtocolVersion;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
|
@ -42,6 +45,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
|||
import org.apache.hadoop.fs.permission.AclStatus;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.hdfs.inotify.EventBatchList;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
|
@ -54,6 +58,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
|||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
|
||||
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
|
@ -134,7 +139,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Recove
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
|
||||
|
@ -152,13 +156,15 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetPer
|
|||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetQuotaRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetReplicationRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSafeModeRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.TruncateRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UnsetStoragePolicyRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetStoragePolicyRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.*;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.GetEZForPathRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPoliciesRequestProto;
|
||||
|
@ -176,8 +182,9 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
|
|||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
|
||||
import org.apache.hadoop.ipc.Client;
|
||||
import org.apache.hadoop.ipc.ProtobufHelper;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||
import org.apache.hadoop.ipc.ProtocolMetaInterface;
|
||||
import org.apache.hadoop.ipc.ProtocolTranslator;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
|
@ -189,12 +196,9 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenReque
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
import static org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
|
||||
import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
|
||||
.EncryptionZoneProto;
|
||||
|
||||
/**
|
||||
* This class forwards NN's ClientProtocol calls as RPC calls to the NN server
|
||||
* while translating from the parameter types used in ClientProtocol to the
|
||||
|
@ -205,6 +209,8 @@ import static org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos
|
|||
public class ClientNamenodeProtocolTranslatorPB implements
|
||||
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
|
||||
final private ClientNamenodeProtocolPB rpcProxy;
|
||||
private static final ThreadLocal<Callable<?>>
|
||||
RETURN_VALUE_CALLBACK = new ThreadLocal<>();
|
||||
|
||||
static final GetServerDefaultsRequestProto VOID_GET_SERVER_DEFAULT_REQUEST =
|
||||
GetServerDefaultsRequestProto.newBuilder().build();
|
||||
|
@ -238,6 +244,12 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
rpcProxy = proxy;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Unstable
|
||||
public static <T> Callable<T> getReturnValueCallback() {
|
||||
return (Callable<T>) RETURN_VALUE_CALLBACK.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
RPC.stopProxy(rpcProxy);
|
||||
|
@ -393,7 +405,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
@Override
|
||||
public LocatedBlock addBlock(String src, String clientName,
|
||||
ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
|
||||
String[] favoredNodes) throws IOException {
|
||||
String[] favoredNodes, EnumSet<AddBlockFlag> addBlockFlags)
|
||||
throws IOException {
|
||||
AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
|
||||
.setSrc(src).setClientName(clientName).setFileId(fileId);
|
||||
if (previous != null)
|
||||
|
@ -403,6 +416,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
if (favoredNodes != null) {
|
||||
req.addAllFavoredNodes(Arrays.asList(favoredNodes));
|
||||
}
|
||||
if (addBlockFlags != null) {
|
||||
req.addAllFlags(PBHelperClient.convertAddBlockFlags(
|
||||
addBlockFlags));
|
||||
}
|
||||
try {
|
||||
return PBHelperClient.convertLocatedBlockProto(
|
||||
rpcProxy.addBlock(null, req.build()).getBlock());
|
||||
|
@ -469,6 +486,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
RenameRequestProto req = RenameRequestProto.newBuilder()
|
||||
.setSrc(src)
|
||||
.setDst(dst).build();
|
||||
|
||||
try {
|
||||
return rpcProxy.rename(null, req).getResult();
|
||||
} catch (ServiceException e) {
|
||||
|
@ -493,7 +511,22 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
|||
setDst(dst).setOverwriteDest(overwrite).
|
||||
build();
|
||||
try {
|
||||
rpcProxy.rename2(null, req);
|
||||
if (Client.isAsynchronousMode()) {
|
||||
rpcProxy.rename2(null, req);
|
||||
|
||||
final Callable<Message> returnMessageCallback = ProtobufRpcEngine
|
||||
.getReturnMessageCallback();
|
||||
Callable<Void> callBack = new Callable<Void>() {
|
||||
@Override
|
||||
public Void call() throws Exception {
|
||||
returnMessageCallback.call();
|
||||
return null;
|
||||
}
|
||||
};
|
||||
RETURN_VALUE_CALLBACK.set(callBack);
|
||||
} else {
|
||||
rpcProxy.rename2(null, req);
|
||||
}
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
|
|
@ -37,6 +37,7 @@ import com.google.protobuf.CodedInputStream;
|
|||
import org.apache.hadoop.crypto.CipherOption;
|
||||
import org.apache.hadoop.crypto.CipherSuite;
|
||||
import org.apache.hadoop.crypto.CryptoProtocolVersion;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.CacheFlag;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
|
@ -97,6 +98,7 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTyp
|
|||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
|
||||
|
@ -2528,4 +2530,29 @@ public class PBHelperClient {
|
|||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static EnumSet<AddBlockFlag> convertAddBlockFlags(
|
||||
List<AddBlockFlagProto> addBlockFlags) {
|
||||
EnumSet<AddBlockFlag> flags =
|
||||
EnumSet.noneOf(AddBlockFlag.class);
|
||||
for (AddBlockFlagProto af : addBlockFlags) {
|
||||
AddBlockFlag flag = AddBlockFlag.valueOf((short)af.getNumber());
|
||||
if (flag != null) {
|
||||
flags.add(flag);
|
||||
}
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
|
||||
public static List<AddBlockFlagProto> convertAddBlockFlags(
|
||||
EnumSet<AddBlockFlag> flags) {
|
||||
List<AddBlockFlagProto> ret = new ArrayList<>();
|
||||
for (AddBlockFlag flag : flags) {
|
||||
AddBlockFlagProto abfp = AddBlockFlagProto.valueOf(flag.getMode());
|
||||
if (abfp != null) {
|
||||
ret.add(abfp);
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -730,8 +730,10 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
node = url.getAuthority();
|
||||
}
|
||||
try {
|
||||
ioe = ioe.getClass().getConstructor(String.class)
|
||||
.newInstance(node + ": " + ioe.getMessage());
|
||||
IOException newIoe = ioe.getClass().getConstructor(String.class)
|
||||
.newInstance(node + ": " + ioe.getMessage());
|
||||
newIoe.setStackTrace(ioe.getStackTrace());
|
||||
ioe = newIoe;
|
||||
} catch (NoSuchMethodException | SecurityException
|
||||
| InstantiationException | IllegalAccessException
|
||||
| IllegalArgumentException | InvocationTargetException e) {
|
||||
|
|
|
@ -162,6 +162,10 @@ message AbandonBlockRequestProto {
|
|||
message AbandonBlockResponseProto { // void response
|
||||
}
|
||||
|
||||
enum AddBlockFlagProto {
|
||||
NO_LOCAL_WRITE = 1; // avoid writing to local node.
|
||||
}
|
||||
|
||||
message AddBlockRequestProto {
|
||||
required string src = 1;
|
||||
required string clientName = 2;
|
||||
|
@ -169,6 +173,7 @@ message AddBlockRequestProto {
|
|||
repeated DatanodeInfoProto excludeNodes = 4;
|
||||
optional uint64 fileId = 5 [default = 0]; // default as a bogus id
|
||||
repeated string favoredNodes = 6; //the set of datanodes to use for the block
|
||||
repeated AddBlockFlagProto flags = 7; // default to empty.
|
||||
}
|
||||
|
||||
message AddBlockResponseProto {
|
||||
|
|
|
@ -32,6 +32,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
|
||||
<properties>
|
||||
<require.fuse>false</require.fuse>
|
||||
<hadoop.component>hdfs</hadoop.component>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
|
@ -222,7 +223,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
<exec executable="ctest" failonerror="true" dir="${project.build.directory}/">
|
||||
<env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
|
||||
<!-- Make sure libhadoop.so is on LD_LIBRARY_PATH. -->
|
||||
<env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
|
||||
<env key="LD_LIBRARY_PATH" value="${env.LD_LIBRARY_PATH}:${project.build.directory}/native/target/usr/local/lib:${hadoop.common.build.dir}/native/target/usr/local/lib"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
|
|
|
@ -58,11 +58,19 @@ if(WIN32)
|
|||
# Omit unneeded headers.
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -DWIN32_LEAN_AND_MEAN")
|
||||
set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/windows)
|
||||
set(OUT_DIR target/bin)
|
||||
|
||||
# IMPORTANT: OUT_DIR MUST be relative to maven's
|
||||
# project.build.directory (=target) and match dist-copynativelibs
|
||||
# in order to be in a release
|
||||
set(OUT_DIR bin)
|
||||
else()
|
||||
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
|
||||
set(OS_DIR ${CMAKE_SOURCE_DIR}/main/native/libhdfs/os/posix)
|
||||
set(OUT_DIR target/usr/local/lib)
|
||||
|
||||
# IMPORTANT: OUT_DIR MUST be relative to maven's
|
||||
# project.build.directory (=target) and match dist-copynativelibs
|
||||
# in order to be in a release
|
||||
set(OUT_DIR native/target/usr/local/lib)
|
||||
endif()
|
||||
|
||||
# Configure JNI.
|
||||
|
|
|
@ -139,7 +139,7 @@
|
|||
|
||||
<!-- Don't complain about LocalDatanodeInfo's anonymous class -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.hdfs.BlockReaderLocal$LocalDatanodeInfo$1" />
|
||||
<Class name="org.apache.hadoop.hdfs.client.impl.BlockReaderLocal$LocalDatanodeInfo$1" />
|
||||
<Bug pattern="SE_BAD_FIELD_INNER_CLASS" />
|
||||
</Match>
|
||||
<!-- Only one method increments numFailedVolumes and it is synchronized -->
|
||||
|
|
|
@ -87,9 +87,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.backup.dnrpc-address";
|
||||
public static final String DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY;
|
||||
public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT = 1024*1024;
|
||||
public static final String DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY = "dfs.datanode.balance.max.concurrent.moves";
|
||||
public static final int DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 5;
|
||||
public static final long DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_DEFAULT =
|
||||
10 * 1024*1024;
|
||||
public static final String DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY
|
||||
= "dfs.datanode.balance.max.concurrent.moves";
|
||||
public static final int
|
||||
DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT = 50;
|
||||
@Deprecated
|
||||
public static final String DFS_DATANODE_READAHEAD_BYTES_KEY =
|
||||
HdfsClientConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY;
|
||||
|
@ -166,9 +169,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT =
|
||||
"0.0.0.0:9868";
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_KEY = "dfs.namenode.secondary.https-address";
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:50091";
|
||||
public static final String DFS_NAMENODE_SECONDARY_HTTPS_ADDRESS_DEFAULT =
|
||||
"0.0.0.0:9869";
|
||||
public static final String DFS_NAMENODE_CHECKPOINT_QUIET_MULTIPLIER_KEY = "dfs.namenode.checkpoint.check.quiet-multiplier";
|
||||
public static final double DFS_NAMENODE_CHECKPOINT_QUIET_MULTIPLIER_DEFAULT = 1.5;
|
||||
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
|
||||
|
@ -211,9 +216,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
|
||||
public static final String DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY =
|
||||
"dfs.namenode.safemode.replication.min";
|
||||
public static final String DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY;
|
||||
public static final int DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
|
||||
|
||||
public static final String DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY =
|
||||
"dfs.namenode.reconstruction.pending.timeout-sec";
|
||||
public static final int DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
|
||||
|
||||
public static final String DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY;
|
||||
public static final int DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT = 2;
|
||||
|
@ -467,7 +474,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_MOVER_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
|
||||
|
||||
public static final String DFS_DATANODE_ADDRESS_KEY = "dfs.datanode.address";
|
||||
public static final int DFS_DATANODE_DEFAULT_PORT = 50010;
|
||||
public static final int DFS_DATANODE_DEFAULT_PORT = 9866;
|
||||
public static final String DFS_DATANODE_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_DEFAULT_PORT;
|
||||
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_KEY = "dfs.datanode.data.dir.perm";
|
||||
public static final String DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT = "700";
|
||||
|
@ -499,7 +506,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
|
||||
public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
|
||||
public static final String DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";
|
||||
public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 50075;
|
||||
public static final int DFS_DATANODE_HTTP_DEFAULT_PORT = 9864;
|
||||
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
|
||||
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
|
||||
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
|
||||
|
@ -538,10 +545,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final int DFS_DEFAULT_CHUNK_VIEW_SIZE_DEFAULT = 32*1024;
|
||||
public static final String DFS_DATANODE_HTTPS_ADDRESS_KEY = "dfs.datanode.https.address";
|
||||
public static final String DFS_DATANODE_HTTPS_PORT_KEY = "datanode.https.port";
|
||||
public static final int DFS_DATANODE_HTTPS_DEFAULT_PORT = 50475;
|
||||
public static final int DFS_DATANODE_HTTPS_DEFAULT_PORT = 9865;
|
||||
public static final String DFS_DATANODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTPS_DEFAULT_PORT;
|
||||
public static final String DFS_DATANODE_IPC_ADDRESS_KEY = "dfs.datanode.ipc.address";
|
||||
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
|
||||
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 9867;
|
||||
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_IPC_DEFAULT_PORT;
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
|
||||
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.protocol;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH_DEFAULT;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
|
@ -31,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||
import org.apache.hadoop.hdfs.server.datanode.Replica;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.ByteString;
|
||||
import com.google.protobuf.CodedInputStream;
|
||||
|
@ -63,26 +66,42 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
};
|
||||
|
||||
/**
|
||||
* Prepare an instance to in-place decode the given ByteString buffer
|
||||
* Prepare an instance to in-place decode the given ByteString buffer.
|
||||
* @param numBlocks - blocks in the buffer
|
||||
* @param blocksBuf - ByteString encoded varints
|
||||
* @param maxDataLength - maximum allowable data size in protobuf message
|
||||
* @return BlockListAsLongs
|
||||
*/
|
||||
public static BlockListAsLongs decodeBuffer(final int numBlocks,
|
||||
final ByteString blocksBuf) {
|
||||
return new BufferDecoder(numBlocks, blocksBuf);
|
||||
final ByteString blocksBuf, final int maxDataLength) {
|
||||
return new BufferDecoder(numBlocks, blocksBuf, maxDataLength);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare an instance to in-place decode the given ByteString buffers
|
||||
* Prepare an instance to in-place decode the given ByteString buffers.
|
||||
* @param numBlocks - blocks in the buffers
|
||||
* @param blocksBufs - list of ByteString encoded varints
|
||||
* @return BlockListAsLongs
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static BlockListAsLongs decodeBuffers(final int numBlocks,
|
||||
final List<ByteString> blocksBufs) {
|
||||
return decodeBuffers(numBlocks, blocksBufs,
|
||||
IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare an instance to in-place decode the given ByteString buffers.
|
||||
* @param numBlocks - blocks in the buffers
|
||||
* @param blocksBufs - list of ByteString encoded varints
|
||||
* @param maxDataLength - maximum allowable data size in protobuf message
|
||||
* @return BlockListAsLongs
|
||||
*/
|
||||
public static BlockListAsLongs decodeBuffers(final int numBlocks,
|
||||
final List<ByteString> blocksBufs, final int maxDataLength) {
|
||||
// this doesn't actually copy the data
|
||||
return decodeBuffer(numBlocks, ByteString.copyFrom(blocksBufs));
|
||||
return decodeBuffer(numBlocks, ByteString.copyFrom(blocksBufs),
|
||||
maxDataLength);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -93,7 +112,21 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
* @return BlockListAsLongs
|
||||
*/
|
||||
public static BlockListAsLongs decodeLongs(List<Long> blocksList) {
|
||||
return blocksList.isEmpty() ? EMPTY : new LongsDecoder(blocksList);
|
||||
return decodeLongs(blocksList, IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare an instance to in-place decode the given list of Longs. Note
|
||||
* it's much more efficient to decode ByteString buffers and only exists
|
||||
* for compatibility.
|
||||
* @param blocksList - list of longs
|
||||
* @param maxDataLength - maximum allowable data size in protobuf message
|
||||
* @return BlockListAsLongs
|
||||
*/
|
||||
public static BlockListAsLongs decodeLongs(List<Long> blocksList,
|
||||
int maxDataLength) {
|
||||
return blocksList.isEmpty() ? EMPTY :
|
||||
new LongsDecoder(blocksList, maxDataLength);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -102,17 +135,22 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
* @param replicas - replicas to encode
|
||||
* @return BlockListAsLongs
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static BlockListAsLongs encode(
|
||||
final Collection<? extends Replica> replicas) {
|
||||
BlockListAsLongs.Builder builder = builder();
|
||||
BlockListAsLongs.Builder builder = builder(IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
|
||||
for (Replica replica : replicas) {
|
||||
builder.add(replica);
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static BlockListAsLongs readFrom(InputStream is) throws IOException {
|
||||
public static BlockListAsLongs readFrom(InputStream is, int maxDataLength)
|
||||
throws IOException {
|
||||
CodedInputStream cis = CodedInputStream.newInstance(is);
|
||||
if (maxDataLength != IPC_MAXIMUM_DATA_LENGTH_DEFAULT) {
|
||||
cis.setSizeLimit(maxDataLength);
|
||||
}
|
||||
int numBlocks = -1;
|
||||
ByteString blocksBuf = null;
|
||||
while (!cis.isAtEnd()) {
|
||||
|
@ -133,7 +171,7 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
}
|
||||
}
|
||||
if (numBlocks != -1 && blocksBuf != null) {
|
||||
return decodeBuffer(numBlocks, blocksBuf);
|
||||
return decodeBuffer(numBlocks, blocksBuf, maxDataLength);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -144,9 +182,14 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
cos.writeBytes(2, getBlocksBuffer());
|
||||
cos.flush();
|
||||
}
|
||||
|
||||
|
||||
@VisibleForTesting
|
||||
public static Builder builder() {
|
||||
return new BlockListAsLongs.Builder();
|
||||
return builder(IPC_MAXIMUM_DATA_LENGTH_DEFAULT);
|
||||
}
|
||||
|
||||
public static Builder builder(int maxDataLength) {
|
||||
return new BlockListAsLongs.Builder(maxDataLength);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -221,10 +264,12 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
private final CodedOutputStream cos;
|
||||
private int numBlocks = 0;
|
||||
private int numFinalized = 0;
|
||||
private final int maxDataLength;
|
||||
|
||||
Builder() {
|
||||
Builder(int maxDataLength) {
|
||||
out = ByteString.newOutput(64*1024);
|
||||
cos = CodedOutputStream.newInstance(out);
|
||||
this.maxDataLength = maxDataLength;
|
||||
}
|
||||
|
||||
public void add(Replica replica) {
|
||||
|
@ -258,7 +303,8 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
// shouldn't happen, ByteString.Output doesn't throw IOE
|
||||
throw new IllegalStateException(ioe);
|
||||
}
|
||||
return new BufferDecoder(numBlocks, numFinalized, out.toByteString());
|
||||
return new BufferDecoder(numBlocks, numFinalized, out.toByteString(),
|
||||
maxDataLength);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -273,16 +319,19 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
private final ByteString buffer;
|
||||
private final int numBlocks;
|
||||
private int numFinalized;
|
||||
private final int maxDataLength;
|
||||
|
||||
BufferDecoder(final int numBlocks, final ByteString buf) {
|
||||
this(numBlocks, -1, buf);
|
||||
BufferDecoder(final int numBlocks, final ByteString buf,
|
||||
final int maxDataLength) {
|
||||
this(numBlocks, -1, buf, maxDataLength);
|
||||
}
|
||||
|
||||
BufferDecoder(final int numBlocks, final int numFinalized,
|
||||
final ByteString buf) {
|
||||
final ByteString buf, final int maxDataLength) {
|
||||
this.numBlocks = numBlocks;
|
||||
this.numFinalized = numFinalized;
|
||||
this.buffer = buf;
|
||||
this.maxDataLength = maxDataLength;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -349,6 +398,12 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
final CodedInputStream cis = buffer.newCodedInput();
|
||||
private int currentBlockIndex = 0;
|
||||
|
||||
{
|
||||
if (maxDataLength != IPC_MAXIMUM_DATA_LENGTH_DEFAULT) {
|
||||
cis.setSizeLimit(maxDataLength);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return currentBlockIndex < numBlocks;
|
||||
|
@ -384,12 +439,14 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
private final List<Long> values;
|
||||
private final int finalizedBlocks;
|
||||
private final int numBlocks;
|
||||
private final int maxDataLength;
|
||||
|
||||
// set the header
|
||||
LongsDecoder(List<Long> values) {
|
||||
LongsDecoder(List<Long> values, int maxDataLength) {
|
||||
this.values = values.subList(2, values.size());
|
||||
this.finalizedBlocks = values.get(0).intValue();
|
||||
this.numBlocks = finalizedBlocks + values.get(1).intValue();
|
||||
this.maxDataLength = maxDataLength;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -399,7 +456,7 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
|
|||
|
||||
@Override
|
||||
public ByteString getBlocksBuffer() {
|
||||
Builder builder = builder();
|
||||
Builder builder = builder(maxDataLength);
|
||||
for (Replica replica : this) {
|
||||
builder.add(replica);
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.List;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.ContentSummary;
|
||||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
|
@ -505,6 +506,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
try {
|
||||
List<DatanodeInfoProto> excl = req.getExcludeNodesList();
|
||||
List<String> favor = req.getFavoredNodesList();
|
||||
EnumSet<AddBlockFlag> flags =
|
||||
PBHelperClient.convertAddBlockFlags(req.getFlagsList());
|
||||
LocatedBlock result = server.addBlock(
|
||||
req.getSrc(),
|
||||
req.getClientName(),
|
||||
|
@ -512,7 +515,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
|||
(excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl
|
||||
.toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
|
||||
(favor == null || favor.size() == 0) ? null : favor
|
||||
.toArray(new String[favor.size()]));
|
||||
.toArray(new String[favor.size()]),
|
||||
flags);
|
||||
return AddBlockResponseProto.newBuilder()
|
||||
.setBlock(PBHelperClient.convertLocatedBlock(result)).build();
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -68,6 +68,8 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
|||
DatanodeProtocolPB {
|
||||
|
||||
private final DatanodeProtocol impl;
|
||||
private final int maxDataLength;
|
||||
|
||||
private static final ErrorReportResponseProto
|
||||
VOID_ERROR_REPORT_RESPONSE_PROTO =
|
||||
ErrorReportResponseProto.newBuilder().build();
|
||||
|
@ -81,8 +83,10 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
|||
VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
|
||||
CommitBlockSynchronizationResponseProto.newBuilder().build();
|
||||
|
||||
public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
|
||||
public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl,
|
||||
int maxDataLength) {
|
||||
this.impl = impl;
|
||||
this.maxDataLength = maxDataLength;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -162,9 +166,10 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
|||
int num = (int)s.getNumberOfBlocks();
|
||||
Preconditions.checkState(s.getBlocksCount() == 0,
|
||||
"cannot send both blocks list and buffers");
|
||||
blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList());
|
||||
blocks = BlockListAsLongs.decodeBuffers(num, s.getBlocksBuffersList(),
|
||||
maxDataLength);
|
||||
} else {
|
||||
blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
|
||||
blocks = BlockListAsLongs.decodeLongs(s.getBlocksList(), maxDataLength);
|
||||
}
|
||||
report[index++] = new StorageBlockReport(PBHelperClient.convert(s.getStorage()),
|
||||
blocks);
|
||||
|
|
|
@ -235,6 +235,15 @@ public class Balancer {
|
|||
return v;
|
||||
}
|
||||
|
||||
static long getLongBytes(Configuration conf, String key, long defaultValue) {
|
||||
final long v = conf.getLongBytes(key, defaultValue);
|
||||
LOG.info(key + " = " + v + " (default=" + defaultValue + ")");
|
||||
if (v <= 0) {
|
||||
throw new HadoopIllegalArgumentException(key + " = " + v + " <= " + 0);
|
||||
}
|
||||
return v;
|
||||
}
|
||||
|
||||
static int getInt(Configuration conf, String key, int defaultValue) {
|
||||
final int v = conf.getInt(key, defaultValue);
|
||||
LOG.info(key + " = " + v + " (default=" + defaultValue + ")");
|
||||
|
@ -266,10 +275,10 @@ public class Balancer {
|
|||
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY,
|
||||
DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT);
|
||||
|
||||
final long getBlocksSize = getLong(conf,
|
||||
final long getBlocksSize = getLongBytes(conf,
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_SIZE_DEFAULT);
|
||||
final long getBlocksMinBlockSize = getLong(conf,
|
||||
final long getBlocksMinBlockSize = getLongBytes(conf,
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
|
||||
|
||||
|
@ -284,10 +293,10 @@ public class Balancer {
|
|||
this.sourceNodes = p.getSourceNodes();
|
||||
this.runDuringUpgrade = p.getRunDuringUpgrade();
|
||||
|
||||
this.maxSizeToMove = getLong(conf,
|
||||
this.maxSizeToMove = getLongBytes(conf,
|
||||
DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY,
|
||||
DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_DEFAULT);
|
||||
this.defaultBlockSize = getLong(conf,
|
||||
this.defaultBlockSize = getLongBytes(conf,
|
||||
DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
||||
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
|
||||
}
|
||||
|
@ -525,8 +534,7 @@ public class Balancer {
|
|||
final C c = candidates.next();
|
||||
if (!c.hasSpaceForScheduling()) {
|
||||
candidates.remove();
|
||||
} else if (matcher.match(dispatcher.getCluster(),
|
||||
g.getDatanodeInfo(), c.getDatanodeInfo())) {
|
||||
} else if (matchStorageGroups(c, g, matcher)) {
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
@ -534,6 +542,13 @@ public class Balancer {
|
|||
return null;
|
||||
}
|
||||
|
||||
private boolean matchStorageGroups(StorageGroup left, StorageGroup right,
|
||||
Matcher matcher) {
|
||||
return left.getStorageType() == right.getStorageType()
|
||||
&& matcher.match(dispatcher.getCluster(),
|
||||
left.getDatanodeInfo(), right.getDatanodeInfo());
|
||||
}
|
||||
|
||||
/* reset all fields in a balancer preparing for the next iteration */
|
||||
void resetData(Configuration conf) {
|
||||
this.overUtilized.clear();
|
||||
|
|
|
@ -48,6 +48,7 @@ import javax.management.ObjectName;
|
|||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
|
@ -109,6 +110,7 @@ import org.apache.hadoop.security.token.Token;
|
|||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.ExitUtil;
|
||||
import org.apache.hadoop.util.LightWeightGSet;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
|
@ -147,7 +149,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
private final PendingDataNodeMessages pendingDNMessages =
|
||||
new PendingDataNodeMessages();
|
||||
|
||||
private volatile long pendingReplicationBlocksCount = 0L;
|
||||
private volatile long pendingReconstructionBlocksCount = 0L;
|
||||
private volatile long corruptReplicaBlocksCount = 0L;
|
||||
private volatile long lowRedundancyBlocksCount = 0L;
|
||||
private volatile long scheduledReplicationBlocksCount = 0L;
|
||||
|
@ -161,8 +163,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
private ObjectName mxBeanName;
|
||||
|
||||
/** Used by metrics */
|
||||
public long getPendingReplicationBlocksCount() {
|
||||
return pendingReplicationBlocksCount;
|
||||
public long getPendingReconstructionBlocksCount() {
|
||||
return pendingReconstructionBlocksCount;
|
||||
}
|
||||
/** Used by metrics */
|
||||
public long getUnderReplicatedBlocksCount() {
|
||||
|
@ -186,7 +188,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
/** Used by metrics */
|
||||
public long getExcessBlocksCount() {
|
||||
return excessReplicas.size();
|
||||
return excessRedundancyMap.size();
|
||||
}
|
||||
/** Used by metrics */
|
||||
public long getPostponedMisreplicatedBlocksCount() {
|
||||
|
@ -246,7 +248,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
* Maps a StorageID to the set of blocks that are "extra" for this
|
||||
* DataNode. We'll eventually remove these extras.
|
||||
*/
|
||||
private final ExcessReplicaMap excessReplicas = new ExcessReplicaMap();
|
||||
private final ExcessRedundancyMap excessRedundancyMap =
|
||||
new ExcessRedundancyMap();
|
||||
|
||||
/**
|
||||
* Store set of Blocks that need to be replicated 1 or more times.
|
||||
|
@ -256,7 +259,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
new LowRedundancyBlocks();
|
||||
|
||||
@VisibleForTesting
|
||||
final PendingReplicationBlocks pendingReplications;
|
||||
final PendingReconstructionBlocks pendingReconstruction;
|
||||
|
||||
/** The maximum number of replicas allowed for a block */
|
||||
public final short maxReplication;
|
||||
|
@ -352,9 +355,10 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
datanodeManager.getNetworkTopology(),
|
||||
datanodeManager.getHost2DatanodeMap());
|
||||
storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
|
||||
pendingReplications = new PendingReplicationBlocks(conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
|
||||
pendingReconstruction = new PendingReconstructionBlocks(conf.getInt(
|
||||
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_DEFAULT)
|
||||
* 1000L);
|
||||
|
||||
blockTokenSecretManager = createBlockTokenSecretManager(conf);
|
||||
|
||||
|
@ -542,7 +546,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
|
||||
public void activate(Configuration conf, long blockTotal) {
|
||||
pendingReplications.start();
|
||||
pendingReconstruction.start();
|
||||
datanodeManager.activate(conf);
|
||||
this.replicationThread.setName("ReplicationMonitor");
|
||||
this.replicationThread.start();
|
||||
|
@ -565,7 +569,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
} catch (InterruptedException ie) {
|
||||
}
|
||||
datanodeManager.close();
|
||||
pendingReplications.stop();
|
||||
pendingReconstruction.stop();
|
||||
blocksMap.close();
|
||||
}
|
||||
|
||||
|
@ -604,12 +608,54 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
dumpBlockMeta(block, out);
|
||||
}
|
||||
|
||||
// Dump blocks from pendingReplication
|
||||
pendingReplications.metaSave(out);
|
||||
// Dump blocks from pendingReconstruction
|
||||
pendingReconstruction.metaSave(out);
|
||||
|
||||
// Dump blocks that are waiting to be deleted
|
||||
invalidateBlocks.dump(out);
|
||||
|
||||
//Dump corrupt blocks and their storageIDs
|
||||
Set<Block> corruptBlocks = corruptReplicas.getCorruptBlocks();
|
||||
out.println("Corrupt Blocks:");
|
||||
for(Block block : corruptBlocks) {
|
||||
Collection<DatanodeDescriptor> corruptNodes =
|
||||
corruptReplicas.getNodes(block);
|
||||
if (corruptNodes == null) {
|
||||
LOG.warn(block.getBlockId() +
|
||||
" is corrupt but has no associated node.");
|
||||
continue;
|
||||
}
|
||||
int numNodesToFind = corruptNodes.size();
|
||||
for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) {
|
||||
DatanodeDescriptor node = storage.getDatanodeDescriptor();
|
||||
if (corruptNodes.contains(node)) {
|
||||
String storageId = storage.getStorageID();
|
||||
DatanodeStorageInfo storageInfo = node.getStorageInfo(storageId);
|
||||
State state = (storageInfo == null) ? null : storageInfo.getState();
|
||||
out.println("Block=" + block.getBlockId() + "\tNode=" + node.getName()
|
||||
+ "\tStorageID=" + storageId + "\tStorageState=" + state
|
||||
+ "\tTotalReplicas=" +
|
||||
blocksMap.numNodes(block)
|
||||
+ "\tReason=" + corruptReplicas.getCorruptReason(block, node));
|
||||
numNodesToFind--;
|
||||
if (numNodesToFind == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (numNodesToFind > 0) {
|
||||
String[] corruptNodesList = new String[corruptNodes.size()];
|
||||
int i = 0;
|
||||
for (DatanodeDescriptor d : corruptNodes) {
|
||||
corruptNodesList[i] = d.getHostName();
|
||||
i++;
|
||||
}
|
||||
out.println(block.getBlockId() + " corrupt on " +
|
||||
StringUtils.join(",", corruptNodesList) + " but not all nodes are" +
|
||||
"found in its block locations");
|
||||
}
|
||||
}
|
||||
|
||||
// Dump all datanodes
|
||||
getDatanodeManager().datanodeDump(out);
|
||||
}
|
||||
|
@ -765,7 +811,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
|
||||
/**
|
||||
* If IBR is not sent from expected locations yet, add the datanodes to
|
||||
* pendingReplications in order to keep ReplicationMonitor from scheduling
|
||||
* pendingReconstruction in order to keep ReplicationMonitor from scheduling
|
||||
* the block.
|
||||
*/
|
||||
public void addExpectedReplicasToPending(BlockInfo blk) {
|
||||
|
@ -780,7 +826,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
pendingNodes.add(dnd);
|
||||
}
|
||||
}
|
||||
pendingReplications.increment(blk,
|
||||
pendingReconstruction.increment(blk,
|
||||
pendingNodes.toArray(new DatanodeDescriptor[pendingNodes.size()]));
|
||||
}
|
||||
}
|
||||
|
@ -866,7 +912,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
|
||||
replicas.readOnlyReplicas(),
|
||||
replicas.decommissionedAndDecommissioning(), getReplication(lastBlock));
|
||||
pendingReplications.remove(lastBlock);
|
||||
pendingReconstruction.remove(lastBlock);
|
||||
|
||||
// remove this block from the list of pending blocks to be deleted.
|
||||
for (DatanodeStorageInfo storage : targets) {
|
||||
|
@ -992,9 +1038,9 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
|
||||
final int numNodes = blocksMap.numNodes(blk);
|
||||
final boolean isCorrupt = numCorruptNodes != 0 &&
|
||||
numCorruptNodes == numNodes;
|
||||
final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
|
||||
final boolean isCorrupt = numCorruptReplicas != 0 &&
|
||||
numCorruptReplicas == numNodes;
|
||||
final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptReplicas;
|
||||
final DatanodeStorageInfo[] machines = new DatanodeStorageInfo[numMachines];
|
||||
final byte[] blockIndices = blk.isStriped() ? new byte[numMachines] : null;
|
||||
int j = 0, i = 0;
|
||||
|
@ -1320,11 +1366,22 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
+ " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
|
||||
+ ") does not exist");
|
||||
}
|
||||
|
||||
DatanodeStorageInfo storage = null;
|
||||
if (storageID != null) {
|
||||
storage = node.getStorageInfo(storageID);
|
||||
}
|
||||
if (storage == null) {
|
||||
storage = storedBlock.findStorageInfo(node);
|
||||
}
|
||||
|
||||
if (storage == null) {
|
||||
blockLog.debug("BLOCK* findAndMarkBlockAsCorrupt: {} not found on {}",
|
||||
blk, dn);
|
||||
return;
|
||||
}
|
||||
markBlockAsCorrupt(new BlockToMarkCorrupt(reportedBlock, storedBlock,
|
||||
blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
|
||||
storageID == null ? null : node.getStorageInfo(storageID),
|
||||
node);
|
||||
storage, node);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1435,7 +1492,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
|
||||
|
||||
void updateState() {
|
||||
pendingReplicationBlocksCount = pendingReplications.size();
|
||||
pendingReconstructionBlocksCount = pendingReconstruction.size();
|
||||
lowRedundancyBlocksCount = neededReconstruction.size();
|
||||
corruptReplicaBlocksCount = corruptReplicas.size();
|
||||
}
|
||||
|
@ -1578,8 +1635,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
|
||||
blockLog.debug(
|
||||
"BLOCK* neededReconstruction = {} pendingReplications = {}",
|
||||
neededReconstruction.size(), pendingReplications.size());
|
||||
"BLOCK* neededReconstruction = {} pendingReconstruction = {}",
|
||||
neededReconstruction.size(), pendingReconstruction.size());
|
||||
}
|
||||
|
||||
return scheduledWork;
|
||||
|
@ -1622,7 +1679,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
// not included in the numReplicas.liveReplicas() count
|
||||
assert liveReplicaNodes.size() >= numReplicas.liveReplicas();
|
||||
|
||||
int pendingNum = pendingReplications.getNumReplicas(block);
|
||||
int pendingNum = pendingReconstruction.getNumReplicas(block);
|
||||
if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum,
|
||||
requiredReplication)) {
|
||||
neededReconstruction.remove(block, priority);
|
||||
|
@ -1690,7 +1747,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
// do not schedule more if enough replicas is already pending
|
||||
final short requiredReplication = getExpectedReplicaNum(block);
|
||||
NumberReplicas numReplicas = countNodes(block);
|
||||
final int pendingNum = pendingReplications.getNumReplicas(block);
|
||||
final int pendingNum = pendingReconstruction.getNumReplicas(block);
|
||||
if (hasEnoughEffectiveReplicas(block, numReplicas, pendingNum,
|
||||
requiredReplication)) {
|
||||
neededReconstruction.remove(block, priority);
|
||||
|
@ -1718,8 +1775,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
|
||||
// Move the block-replication into a "pending" state.
|
||||
// The reason we use 'pending' is so we can retry
|
||||
// replications that fail after an appropriate amount of time.
|
||||
pendingReplications.increment(block,
|
||||
// reconstructions that fail after an appropriate amount of time.
|
||||
pendingReconstruction.increment(block,
|
||||
DatanodeStorageInfo.toDatanodeDescriptors(targets));
|
||||
blockLog.debug("BLOCK* block {} is moved from neededReplications to "
|
||||
+ "pendingReplications", block);
|
||||
|
@ -1737,7 +1794,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
|
||||
return placementPolicies.getPolicy(false).chooseTarget(src, 1, clientnode,
|
||||
Collections.<DatanodeStorageInfo>emptyList(), false, excludes,
|
||||
blocksize, storagePolicySuite.getDefaultPolicy());
|
||||
blocksize, storagePolicySuite.getDefaultPolicy(), null);
|
||||
}
|
||||
|
||||
/** Choose target for getting additional datanodes for an existing pipeline. */
|
||||
|
@ -1752,7 +1809,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
|
||||
final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped);
|
||||
return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode,
|
||||
chosen, true, excludes, blocksize, storagePolicy);
|
||||
chosen, true, excludes, blocksize, storagePolicy, null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1769,14 +1826,15 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
final long blocksize,
|
||||
final List<String> favoredNodes,
|
||||
final byte storagePolicyID,
|
||||
final boolean isStriped) throws IOException {
|
||||
final boolean isStriped,
|
||||
final EnumSet<AddBlockFlag> flags) throws IOException {
|
||||
List<DatanodeDescriptor> favoredDatanodeDescriptors =
|
||||
getDatanodeDescriptors(favoredNodes);
|
||||
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
|
||||
final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped);
|
||||
final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
|
||||
numOfReplicas, client, excludedNodes, blocksize,
|
||||
favoredDatanodeDescriptors, storagePolicy);
|
||||
favoredDatanodeDescriptors, storagePolicy, flags);
|
||||
if (targets.length < minReplication) {
|
||||
throw new IOException("File " + src + " could only be replicated to "
|
||||
+ targets.length + " nodes instead of minReplication (="
|
||||
|
@ -1907,11 +1965,11 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
|
||||
/**
|
||||
* If there were any replication requests that timed out, reap them
|
||||
* and put them back into the neededReplication queue
|
||||
* If there were any reconstruction requests that timed out, reap them
|
||||
* and put them back into the neededReconstruction queue
|
||||
*/
|
||||
private void processPendingReplications() {
|
||||
BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks();
|
||||
private void processPendingReconstructions() {
|
||||
BlockInfo[] timedOutItems = pendingReconstruction.getTimedOutBlocks();
|
||||
if (timedOutItems != null) {
|
||||
namesystem.writeLock();
|
||||
try {
|
||||
|
@ -2890,7 +2948,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
// Now check for completion of blocks and safe block count
|
||||
NumberReplicas num = countNodes(storedBlock);
|
||||
int numLiveReplicas = num.liveReplicas();
|
||||
int pendingNum = pendingReplications.getNumReplicas(storedBlock);
|
||||
int pendingNum = pendingReconstruction.getNumReplicas(storedBlock);
|
||||
int numCurrentReplica = numLiveReplicas + pendingNum;
|
||||
|
||||
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
|
||||
|
@ -3203,8 +3261,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
|
||||
/**
|
||||
* Find how many of the containing nodes are "extra", if any.
|
||||
* If there are any extras, call chooseExcessReplicates() to
|
||||
* mark them in the excessReplicateMap.
|
||||
* If there are any extras, call chooseExcessRedundancies() to
|
||||
* mark them in the excessRedundancyMap.
|
||||
*/
|
||||
private void processExtraRedundancyBlock(final BlockInfo block,
|
||||
final short replication, final DatanodeDescriptor addedNode,
|
||||
|
@ -3237,11 +3295,11 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
}
|
||||
}
|
||||
chooseExcessReplicates(nonExcess, block, replication, addedNode,
|
||||
chooseExcessRedundancies(nonExcess, block, replication, addedNode,
|
||||
delNodeHint);
|
||||
}
|
||||
|
||||
private void chooseExcessReplicates(
|
||||
private void chooseExcessRedundancies(
|
||||
final Collection<DatanodeStorageInfo> nonExcess,
|
||||
BlockInfo storedBlock, short replication,
|
||||
DatanodeDescriptor addedNode,
|
||||
|
@ -3250,19 +3308,19 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
// first form a rack to datanodes map and
|
||||
BlockCollection bc = getBlockCollection(storedBlock);
|
||||
if (storedBlock.isStriped()) {
|
||||
chooseExcessReplicasStriped(bc, nonExcess, storedBlock, delNodeHint);
|
||||
chooseExcessRedundancyStriped(bc, nonExcess, storedBlock, delNodeHint);
|
||||
} else {
|
||||
final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(
|
||||
bc.getStoragePolicyID());
|
||||
final List<StorageType> excessTypes = storagePolicy.chooseExcess(
|
||||
replication, DatanodeStorageInfo.toStorageTypes(nonExcess));
|
||||
chooseExcessReplicasContiguous(nonExcess, storedBlock, replication,
|
||||
chooseExcessRedundancyContiguous(nonExcess, storedBlock, replication,
|
||||
addedNode, delNodeHint, excessTypes);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* We want "replication" replicates for the block, but we now have too many.
|
||||
* We want sufficient redundancy for the block, but we now have too many.
|
||||
* In this method, copy enough nodes from 'srcNodes' into 'dstNodes' such that:
|
||||
*
|
||||
* srcNodes.size() - dstNodes.size() == replication
|
||||
|
@ -3275,7 +3333,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
* If no such a node is available,
|
||||
* then pick a node with least free space
|
||||
*/
|
||||
private void chooseExcessReplicasContiguous(
|
||||
private void chooseExcessRedundancyContiguous(
|
||||
final Collection<DatanodeStorageInfo> nonExcess, BlockInfo storedBlock,
|
||||
short replication, DatanodeDescriptor addedNode,
|
||||
DatanodeDescriptor delNodeHint, List<StorageType> excessTypes) {
|
||||
|
@ -3284,7 +3342,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
.chooseReplicasToDelete(nonExcess, nonExcess, replication, excessTypes,
|
||||
addedNode, delNodeHint);
|
||||
for (DatanodeStorageInfo choosenReplica : replicasToDelete) {
|
||||
processChosenExcessReplica(nonExcess, choosenReplica, storedBlock);
|
||||
processChosenExcessRedundancy(nonExcess, choosenReplica, storedBlock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3297,7 +3355,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
* The block placement policy will make sure that the left internal blocks are
|
||||
* spread across racks and also try hard to pick one with least free space.
|
||||
*/
|
||||
private void chooseExcessReplicasStriped(BlockCollection bc,
|
||||
private void chooseExcessRedundancyStriped(BlockCollection bc,
|
||||
final Collection<DatanodeStorageInfo> nonExcess,
|
||||
BlockInfo storedBlock,
|
||||
DatanodeDescriptor delNodeHint) {
|
||||
|
@ -3325,7 +3383,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
if (delStorageHint != null) {
|
||||
Integer index = storage2index.get(delStorageHint);
|
||||
if (index != null && duplicated.get(index)) {
|
||||
processChosenExcessReplica(nonExcess, delStorageHint, storedBlock);
|
||||
processChosenExcessRedundancy(nonExcess, delStorageHint, storedBlock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3357,7 +3415,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
.chooseReplicasToDelete(nonExcess, candidates, (short) 1,
|
||||
excessTypes, null, null);
|
||||
for (DatanodeStorageInfo chosen : replicasToDelete) {
|
||||
processChosenExcessReplica(nonExcess, chosen, storedBlock);
|
||||
processChosenExcessRedundancy(nonExcess, chosen, storedBlock);
|
||||
candidates.remove(chosen);
|
||||
}
|
||||
}
|
||||
|
@ -3365,11 +3423,11 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
}
|
||||
|
||||
private void processChosenExcessReplica(
|
||||
private void processChosenExcessRedundancy(
|
||||
final Collection<DatanodeStorageInfo> nonExcess,
|
||||
final DatanodeStorageInfo chosen, BlockInfo storedBlock) {
|
||||
nonExcess.remove(chosen);
|
||||
excessReplicas.add(chosen.getDatanodeDescriptor(), storedBlock);
|
||||
excessRedundancyMap.add(chosen.getDatanodeDescriptor(), storedBlock);
|
||||
//
|
||||
// The 'excessblocks' tracks blocks until we get confirmation
|
||||
// that the datanode has deleted them; the only way we remove them
|
||||
|
@ -3381,7 +3439,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
//
|
||||
final Block blockToInvalidate = getBlockOnStorage(storedBlock, chosen);
|
||||
addToInvalidates(blockToInvalidate, chosen.getDatanodeDescriptor());
|
||||
blockLog.debug("BLOCK* chooseExcessReplicates: "
|
||||
blockLog.debug("BLOCK* chooseExcessRedundancies: "
|
||||
+ "({}, {}) is added to invalidated blocks set", chosen, storedBlock);
|
||||
}
|
||||
|
||||
|
@ -3433,7 +3491,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
updateNeededReconstructions(storedBlock, -1, 0);
|
||||
}
|
||||
|
||||
excessReplicas.remove(node, storedBlock);
|
||||
excessRedundancyMap.remove(node, storedBlock);
|
||||
corruptReplicas.removeFromCorruptReplicasMap(storedBlock, node);
|
||||
}
|
||||
}
|
||||
|
@ -3504,7 +3562,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
//
|
||||
BlockInfo storedBlock = getStoredBlock(block);
|
||||
if (storedBlock != null) {
|
||||
pendingReplications.decrement(storedBlock, node);
|
||||
pendingReconstruction.decrement(storedBlock, node);
|
||||
}
|
||||
processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
|
||||
delHintNode);
|
||||
|
@ -3749,11 +3807,11 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
|
||||
@VisibleForTesting
|
||||
int getExcessSize4Testing(String dnUuid) {
|
||||
return excessReplicas.getSize4Testing(dnUuid);
|
||||
return excessRedundancyMap.getSize4Testing(dnUuid);
|
||||
}
|
||||
|
||||
public boolean isExcess(DatanodeDescriptor dn, BlockInfo blk) {
|
||||
return excessReplicas.contains(dn, blk);
|
||||
return excessRedundancyMap.contains(dn, blk);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3813,7 +3871,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
}
|
||||
|
||||
updateState();
|
||||
if (pendingReplicationBlocksCount == 0 &&
|
||||
if (pendingReconstructionBlocksCount == 0 &&
|
||||
lowRedundancyBlocksCount == 0) {
|
||||
LOG.info("Node {} is dead and there are no low redundancy" +
|
||||
" blocks or blocks pending reconstruction. Safe to decommission.",
|
||||
|
@ -3860,8 +3918,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
block.setNumBytes(BlockCommand.NO_ACK);
|
||||
addToInvalidates(block);
|
||||
removeBlockFromMap(block);
|
||||
// Remove the block from pendingReplications and neededReconstruction
|
||||
pendingReplications.remove(block);
|
||||
// Remove the block from pendingReconstruction and neededReconstruction
|
||||
pendingReconstruction.remove(block);
|
||||
neededReconstruction.remove(block, LowRedundancyBlocks.LEVEL);
|
||||
if (postponedMisreplicatedBlocks.remove(block)) {
|
||||
postponedMisreplicatedBlocksCount.decrementAndGet();
|
||||
|
@ -3919,7 +3977,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
for (BlockInfo block : bc.getBlocks()) {
|
||||
short expected = getExpectedReplicaNum(block);
|
||||
final NumberReplicas n = countNodes(block);
|
||||
final int pending = pendingReplications.getNumReplicas(block);
|
||||
final int pending = pendingReconstruction.getNumReplicas(block);
|
||||
if (!hasEnoughEffectiveReplicas(block, n, pending, expected)) {
|
||||
neededReconstruction.add(block, n.liveReplicas() + pending,
|
||||
n.readOnlyReplicas(),
|
||||
|
@ -4059,7 +4117,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
|
||||
public void removeBlockFromMap(BlockInfo block) {
|
||||
for(DatanodeStorageInfo info : blocksMap.getStorages(block)) {
|
||||
excessReplicas.remove(info.getDatanodeDescriptor(), block);
|
||||
excessRedundancyMap.remove(info.getDatanodeDescriptor(), block);
|
||||
}
|
||||
|
||||
blocksMap.removeBlock(block);
|
||||
|
@ -4110,7 +4168,7 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
// Process replication work only when active NN is out of safe mode.
|
||||
if (isPopulatingReplQueues()) {
|
||||
computeDatanodeWork();
|
||||
processPendingReplications();
|
||||
processPendingReconstructions();
|
||||
rescanPostponedMisreplicatedBlocks();
|
||||
}
|
||||
Thread.sleep(replicationRecheckInterval);
|
||||
|
@ -4258,8 +4316,8 @@ public class BlockManager implements BlockStatsMXBean {
|
|||
*/
|
||||
public void clearQueues() {
|
||||
neededReconstruction.clear();
|
||||
pendingReplications.clear();
|
||||
excessReplicas.clear();
|
||||
pendingReconstruction.clear();
|
||||
excessRedundancyMap.clear();
|
||||
invalidateBlocks.clear();
|
||||
datanodeManager.clearPendingQueues();
|
||||
postponedMisreplicatedBlocks.clear();
|
||||
|
|
|
@ -623,7 +623,9 @@ class BlockManagerSafeMode {
|
|||
* the extension time has passed.
|
||||
*/
|
||||
private boolean canLeave() {
|
||||
if (timeToLeaveExtension() > 0) {
|
||||
if (namesystem.inTransitionToActive()) {
|
||||
return false;
|
||||
} else if (timeToLeaveExtension() > 0) {
|
||||
reportStatus("STATE* Safe mode ON, in safe mode extension.", false);
|
||||
return false;
|
||||
} else if (!areThresholdsMet()) {
|
||||
|
|
|
@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
@ -26,6 +27,7 @@ import java.util.Set;
|
|||
import com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
|
@ -64,6 +66,7 @@ public abstract class BlockPlacementPolicy {
|
|||
* @param returnChosenNodes decide if the chosenNodes are returned.
|
||||
* @param excludedNodes datanodes that should not be considered as targets.
|
||||
* @param blocksize size of the data to be written.
|
||||
* @param flags Block placement flags.
|
||||
* @return array of DatanodeDescriptor instances chosen as target
|
||||
* and sorted as a pipeline.
|
||||
*/
|
||||
|
@ -74,7 +77,8 @@ public abstract class BlockPlacementPolicy {
|
|||
boolean returnChosenNodes,
|
||||
Set<Node> excludedNodes,
|
||||
long blocksize,
|
||||
BlockStoragePolicy storagePolicy);
|
||||
BlockStoragePolicy storagePolicy,
|
||||
EnumSet<AddBlockFlag> flags);
|
||||
|
||||
/**
|
||||
* @param favoredNodes datanodes that should be favored as targets. This
|
||||
|
@ -86,14 +90,15 @@ public abstract class BlockPlacementPolicy {
|
|||
Set<Node> excludedNodes,
|
||||
long blocksize,
|
||||
List<DatanodeDescriptor> favoredNodes,
|
||||
BlockStoragePolicy storagePolicy) {
|
||||
BlockStoragePolicy storagePolicy,
|
||||
EnumSet<AddBlockFlag> flags) {
|
||||
// This class does not provide the functionality of placing
|
||||
// a block in favored datanodes. The implementations of this class
|
||||
// are expected to provide this functionality
|
||||
|
||||
return chooseTarget(src, numOfReplicas, writer,
|
||||
new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
|
||||
excludedNodes, blocksize, storagePolicy);
|
||||
excludedNodes, blocksize, storagePolicy, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,6 +23,7 @@ import java.util.*;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.AddBlockFlag;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
|
@ -111,9 +112,10 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
boolean returnChosenNodes,
|
||||
Set<Node> excludedNodes,
|
||||
long blocksize,
|
||||
final BlockStoragePolicy storagePolicy) {
|
||||
final BlockStoragePolicy storagePolicy,
|
||||
EnumSet<AddBlockFlag> flags) {
|
||||
return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
|
||||
excludedNodes, blocksize, storagePolicy);
|
||||
excludedNodes, blocksize, storagePolicy, flags);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -123,13 +125,14 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
Set<Node> excludedNodes,
|
||||
long blocksize,
|
||||
List<DatanodeDescriptor> favoredNodes,
|
||||
BlockStoragePolicy storagePolicy) {
|
||||
BlockStoragePolicy storagePolicy,
|
||||
EnumSet<AddBlockFlag> flags) {
|
||||
try {
|
||||
if (favoredNodes == null || favoredNodes.size() == 0) {
|
||||
// Favored nodes not specified, fall back to regular block placement.
|
||||
return chooseTarget(src, numOfReplicas, writer,
|
||||
new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
|
||||
excludedNodes, blocksize, storagePolicy);
|
||||
excludedNodes, blocksize, storagePolicy, flags);
|
||||
}
|
||||
|
||||
Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
|
||||
|
@ -164,7 +167,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
DatanodeStorageInfo[] remainingTargets =
|
||||
chooseTarget(src, numOfReplicas, writer,
|
||||
new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
|
||||
favoriteAndExcludedNodes, blocksize, storagePolicy);
|
||||
favoriteAndExcludedNodes, blocksize, storagePolicy, flags);
|
||||
for (int i = 0; i < remainingTargets.length; i++) {
|
||||
results.add(remainingTargets[i]);
|
||||
}
|
||||
|
@ -179,7 +182,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
// Fall back to regular block placement disregarding favored nodes hint
|
||||
return chooseTarget(src, numOfReplicas, writer,
|
||||
new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
|
||||
excludedNodes, blocksize, storagePolicy);
|
||||
excludedNodes, blocksize, storagePolicy, flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -213,7 +216,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
boolean returnChosenNodes,
|
||||
Set<Node> excludedNodes,
|
||||
long blocksize,
|
||||
final BlockStoragePolicy storagePolicy) {
|
||||
final BlockStoragePolicy storagePolicy,
|
||||
EnumSet<AddBlockFlag> addBlockFlags) {
|
||||
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
|
||||
return DatanodeStorageInfo.EMPTY_ARRAY;
|
||||
}
|
||||
|
@ -226,17 +230,42 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
|||
numOfReplicas = result[0];
|
||||
int maxNodesPerRack = result[1];
|
||||
|
||||
final List<DatanodeStorageInfo> results = new ArrayList<>(chosenStorage);
|
||||
for (DatanodeStorageInfo storage : chosenStorage) {
|
||||
// add localMachine and related nodes to excludedNodes
|
||||
addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
|
||||
}
|
||||
|
||||
List<DatanodeStorageInfo> results = null;
|
||||
Node localNode = null;
|
||||
boolean avoidStaleNodes = (stats != null
|
||||
&& stats.isAvoidingStaleDataNodesForWrite());
|
||||
final Node localNode = chooseTarget(numOfReplicas, writer, excludedNodes,
|
||||
blocksize, maxNodesPerRack, results, avoidStaleNodes, storagePolicy,
|
||||
EnumSet.noneOf(StorageType.class), results.isEmpty());
|
||||
boolean avoidLocalNode = (addBlockFlags != null
|
||||
&& addBlockFlags.contains(AddBlockFlag.NO_LOCAL_WRITE)
|
||||
&& writer != null
|
||||
&& !excludedNodes.contains(writer));
|
||||
// Attempt to exclude local node if the client suggests so. If no enough
|
||||
// nodes can be obtained, it falls back to the default block placement
|
||||
// policy.
|
||||
if (avoidLocalNode) {
|
||||
results = new ArrayList<>(chosenStorage);
|
||||
Set<Node> excludedNodeCopy = new HashSet<>(excludedNodes);
|
||||
excludedNodeCopy.add(writer);
|
||||
localNode = chooseTarget(numOfReplicas, writer,
|
||||
excludedNodeCopy, blocksize, maxNodesPerRack, results,
|
||||
avoidStaleNodes, storagePolicy,
|
||||
EnumSet.noneOf(StorageType.class), results.isEmpty());
|
||||
if (results.size() < numOfReplicas) {
|
||||
// not enough nodes; discard results and fall back
|
||||
results = null;
|
||||
}
|
||||
}
|
||||
if (results == null) {
|
||||
results = new ArrayList<>(chosenStorage);
|
||||
localNode = chooseTarget(numOfReplicas, writer, excludedNodes,
|
||||
blocksize, maxNodesPerRack, results, avoidStaleNodes,
|
||||
storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty());
|
||||
}
|
||||
|
||||
if (!returnChosenNodes) {
|
||||
results.removeAll(chosenStorage);
|
||||
}
|
||||
|
|
|
@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
|
@ -231,6 +233,16 @@ public class CorruptReplicasMap{
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* method to get the set of corrupt blocks in corruptReplicasMap.
|
||||
* @return Set of Block objects
|
||||
*/
|
||||
Set<Block> getCorruptBlocks() {
|
||||
Set<Block> corruptBlocks = new HashSet<Block>();
|
||||
corruptBlocks.addAll(corruptReplicasMap.keySet());
|
||||
return corruptBlocks;
|
||||
}
|
||||
|
||||
/**
|
||||
* return the reason about corrupted replica for a given block
|
||||
* on a given dn
|
||||
|
|
|
@ -541,7 +541,7 @@ public class DecommissionManager {
|
|||
// pending
|
||||
if (blockManager.isNeededReconstruction(block, liveReplicas)) {
|
||||
if (!blockManager.neededReconstruction.contains(block) &&
|
||||
blockManager.pendingReplications.getNumReplicas(block) == 0 &&
|
||||
blockManager.pendingReconstruction.getNumReplicas(block) == 0 &&
|
||||
blockManager.isPopulatingReplQueues()) {
|
||||
// Process these blocks only when active NN is out of safe mode.
|
||||
blockManager.neededReconstruction.add(block,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue