Compare commits

..

1 Commits

Author SHA1 Message Date
dependabot[bot] 7456e359a7
Bump snakeyaml from 1.33 to 2.0 in /hadoop-project
Bumps [snakeyaml](https://bitbucket.org/snakeyaml/snakeyaml) from 1.33 to 2.0.
- [Commits](https://bitbucket.org/snakeyaml/snakeyaml/branches/compare/snakeyaml-2.0..snakeyaml-1.33)

---
updated-dependencies:
- dependency-name: org.yaml:snakeyaml
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-03-08 15:31:40 +00:00
764 changed files with 8126 additions and 136069 deletions

View File

@ -240,12 +240,12 @@ com.google.guava:guava:20.0
com.google.guava:guava:27.0-jre
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
com.microsoft.azure:azure-storage:7.0.0
com.nimbusds:nimbus-jose-jwt:9.31
com.nimbusds:nimbus-jose-jwt:9.8.1
com.squareup.okhttp3:okhttp:4.10.0
com.squareup.okio:okio:3.2.0
com.zaxxer:HikariCP:4.0.3
commons-beanutils:commons-beanutils:1.9.4
commons-cli:commons-cli:1.5.0
commons-cli:commons-cli:1.2
commons-codec:commons-codec:1.11
commons-collections:commons-collections:3.2.2
commons-daemon:commons-daemon:1.0.13
@ -289,8 +289,12 @@ io.netty:netty-resolver-dns-classes-macos:4.1.77.Final
io.netty:netty-transport-native-epoll:4.1.77.Final
io.netty:netty-transport-native-kqueue:4.1.77.Final
io.netty:netty-resolver-dns-native-macos:4.1.77.Final
io.opencensus:opencensus-api:0.12.3
io.opencensus:opencensus-contrib-grpc-metrics:0.12.3
io.opencensus:opencensus-api:0.24.0
io.opencensus:opencensus-contrib-grpc-metrics:0.24.0
io.opentracing:opentracing-api:0.33.0
io.opentracing:opentracing-noop:0.33.0
io.opentracing:opentracing-util:0.33.0
io.perfmark:perfmark-api:0.19.0
io.reactivex:rxjava:1.3.8
io.reactivex:rxjava-string:1.1.1
io.reactivex:rxnetty:0.4.20
@ -299,6 +303,7 @@ javax.inject:javax.inject:1
log4j:log4j:1.2.17
net.java.dev.jna:jna:5.2.0
net.minidev:accessors-smart:1.2
net.minidev:json-smart:2.4.7
org.apache.avro:avro:1.9.2
org.apache.commons:commons-collections4:4.2
org.apache.commons:commons-compress:1.21
@ -340,21 +345,21 @@ org.apache.kerby:token-provider:2.0.3
org.apache.solr:solr-solrj:8.8.2
org.apache.yetus:audience-annotations:0.5.0
org.apache.zookeeper:zookeeper:3.6.3
org.codehaus.jettison:jettison:1.5.4
org.eclipse.jetty:jetty-annotations:9.4.51.v20230217
org.eclipse.jetty:jetty-http:9.4.51.v20230217
org.eclipse.jetty:jetty-io:9.4.51.v20230217
org.eclipse.jetty:jetty-jndi:9.4.51.v20230217
org.eclipse.jetty:jetty-plus:9.4.51.v20230217
org.eclipse.jetty:jetty-security:9.4.51.v20230217
org.eclipse.jetty:jetty-server:9.4.51.v20230217
org.eclipse.jetty:jetty-servlet:9.4.51.v20230217
org.eclipse.jetty:jetty-util:9.4.51.v20230217
org.eclipse.jetty:jetty-util-ajax:9.4.51.v20230217
org.eclipse.jetty:jetty-webapp:9.4.51.v20230217
org.eclipse.jetty:jetty-xml:9.4.51.v20230217
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.51.v20230217
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.51.v20230217
org.codehaus.jettison:jettison:1.5.3
org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
org.eclipse.jetty:jetty-http:9.4.48.v20220622
org.eclipse.jetty:jetty-io:9.4.48.v20220622
org.eclipse.jetty:jetty-jndi:9.4.48.v20220622
org.eclipse.jetty:jetty-plus:9.4.48.v20220622
org.eclipse.jetty:jetty-security:9.4.48.v20220622
org.eclipse.jetty:jetty-server:9.4.48.v20220622
org.eclipse.jetty:jetty-servlet:9.4.48.v20220622
org.eclipse.jetty:jetty-util:9.4.48.v20220622
org.eclipse.jetty:jetty-util-ajax:9.4.48.v20220622
org.eclipse.jetty:jetty-webapp:9.4.48.v20220622
org.eclipse.jetty:jetty-xml:9.4.48.v20220622
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.48.v20220622
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.48.v20220622
org.ehcache:ehcache:3.3.1
org.ini4j:ini4j:0.5.4
org.jetbrains.kotlin:kotlin-stdlib:1.4.10
@ -362,7 +367,7 @@ org.jetbrains.kotlin:kotlin-stdlib-common:1.4.10
org.lz4:lz4-java:1.7.1
org.objenesis:objenesis:2.6
org.xerial.snappy:snappy-java:1.0.5
org.yaml:snakeyaml:2.0
org.yaml:snakeyaml:1.33
org.wildfly.openssl:wildfly-openssl:1.1.3.Final

View File

@ -20,20 +20,6 @@
# Override these to match Apache Hadoop's requirements
personality_plugins "all,-ant,-gradle,-scalac,-scaladoc"
# These flags are needed to run Yetus against Hadoop on Windows.
WINDOWS_FLAGS="-Pnative-win
-Dhttps.protocols=TLSv1.2
-Drequire.openssl
-Drequire.test.libhadoop
-Dshell-executable=${BASH_EXECUTABLE}
-Dopenssl.prefix=${VCPKG_INSTALLED_PACKAGES}
-Dcmake.prefix.path=${VCPKG_INSTALLED_PACKAGES}
-Dwindows.cmake.toolchain.file=${CMAKE_TOOLCHAIN_FILE}
-Dwindows.cmake.build.type=RelWithDebInfo
-Dwindows.build.hdfspp.dll=off
-Dwindows.no.sasl=on
-Duse.platformToolsetVersion=v142"
## @description Globals specific to this personality
## @audience private
## @stability evolving
@ -101,30 +87,17 @@ function hadoop_order
echo "${hadoopm}"
}
## @description Retrieves the Hadoop project version defined in the root pom.xml
## @audience private
## @stability evolving
## @returns 0 on success, 1 on failure
function load_hadoop_version
{
if [[ -f "${BASEDIR}/pom.xml" ]]; then
HADOOP_VERSION=$(grep '<version>' "${BASEDIR}/pom.xml" \
| head -1 \
| "${SED}" -e 's|^ *<version>||' -e 's|</version>.*$||' \
| cut -f1 -d- )
return 0
else
return 1
fi
}
## @description Determine if it is safe to run parallel tests
## @audience private
## @stability evolving
## @param ordering
function hadoop_test_parallel
{
if load_hadoop_version; then
if [[ -f "${BASEDIR}/pom.xml" ]]; then
HADOOP_VERSION=$(grep '<version>' "${BASEDIR}/pom.xml" \
| head -1 \
| "${SED}" -e 's|^ *<version>||' -e 's|</version>.*$||' \
| cut -f1 -d- )
export HADOOP_VERSION
else
return 1
@ -289,10 +262,7 @@ function hadoop_native_flags
Windows_NT|CYGWIN*|MINGW*|MSYS*)
echo \
"${args[@]}" \
-Drequire.snappy \
-Pdist \
-Dtar \
"${WINDOWS_FLAGS}"
-Drequire.snappy -Drequire.openssl -Pnative-win
;;
*)
echo \
@ -435,10 +405,7 @@ function personality_modules
extra="${extra} ${flags}"
fi
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
extra="-Ptest-patch -Pdist -Dtar ${WINDOWS_FLAGS} ${extra}"
fi
extra="-Ptest-patch ${extra}"
for module in $(hadoop_order ${ordering}); do
# shellcheck disable=SC2086
personality_enqueue_module ${module} ${extra}
@ -581,28 +548,17 @@ function shadedclient_rebuild
big_console_header "Checking client artifacts on ${repostatus} with shaded clients"
extra="-Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true"
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
if load_hadoop_version; then
export HADOOP_HOME="${SOURCEDIR}/hadoop-dist/target/hadoop-${HADOOP_VERSION}-SNAPSHOT"
else
yetus_error "[WARNING] Unable to extract the Hadoop version and thus HADOOP_HOME is not set. Some tests may fail."
fi
extra="${WINDOWS_FLAGS} ${extra}"
fi
echo_and_redirect "${logfile}" \
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am "${modules[@]}" "${extra}"
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
"${modules[@]}" \
-Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true
big_console_header "Checking client artifacts on ${repostatus} with non-shaded clients"
echo_and_redirect "${logfile}" \
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
"${modules[@]}" \
-DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true \
-Dspotbugs.skip=true "${extra}"
-DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true
count=$("${GREP}" -c '\[ERROR\]' "${logfile}")
if [[ ${count} -gt 0 ]]; then

View File

@ -171,17 +171,7 @@ if [[ -n "${GPGBIN}" && ! "${HADOOP_SKIP_YETUS_VERIFICATION}" = true ]]; then
fi
fi
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
gunzip -c "${TARBALL}.gz" | tar xpf -
# One of the entries in the Yetus tarball unzips a symlink qbt.sh.
# The symlink creation fails on Windows, unless this CI is run as Admin or Developer mode is
# enabled.
# Thus, we create the qbt.sh symlink ourselves and move it to the target.
YETUS_PRECOMMIT_DIR="${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/lib/precommit"
ln -s "${YETUS_PRECOMMIT_DIR}/test-patch.sh" qbt.sh
mv qbt.sh "${YETUS_PRECOMMIT_DIR}"
elif ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
yetus_error "ERROR: ${TARBALL}.gz is corrupt. Investigate and then remove ${HADOOP_PATCHPROCESS} to try again."
exit 1
fi

View File

@ -74,7 +74,7 @@ ENV PATH "${PATH}:/opt/protobuf/bin"
###
# Avoid out of memory errors in builds
###
ENV MAVEN_OPTS -Xms256m -Xmx3072m
ENV MAVEN_OPTS -Xms256m -Xmx1536m
# Skip gpg verification when downloading Yetus via yetus-wrapper
ENV HADOOP_SKIP_YETUS_VERIFICATION true

View File

@ -64,7 +64,7 @@ ENV JAVA_HOME "C:\Java\zulu8.62.0.19-ca-jdk8.0.332-win_x64"
RUN setx PATH "%PATH%;%JAVA_HOME%\bin"
# Install Apache Maven.
RUN powershell Invoke-WebRequest -URI https://archive.apache.org/dist/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.zip -OutFile $Env:TEMP\apache-maven-3.8.6-bin.zip
RUN powershell Invoke-WebRequest -URI https://dlcdn.apache.org/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.zip -OutFile $Env:TEMP\apache-maven-3.8.6-bin.zip
RUN powershell Expand-Archive -Path $Env:TEMP\apache-maven-3.8.6-bin.zip -DestinationPath "C:\Maven"
RUN setx PATH "%PATH%;C:\Maven\apache-maven-3.8.6\bin"
ENV MAVEN_OPTS '-Xmx2048M -Xss128M'
@ -74,51 +74,8 @@ RUN powershell Invoke-WebRequest -URI https://cmake.org/files/v3.19/cmake-3.19.0
RUN powershell Expand-Archive -Path $Env:TEMP\cmake-3.19.0-win64-x64.zip -DestinationPath "C:\CMake"
RUN setx PATH "%PATH%;C:\CMake\cmake-3.19.0-win64-x64\bin"
# Install zstd 1.5.4.
RUN powershell Invoke-WebRequest -Uri https://github.com/facebook/zstd/releases/download/v1.5.4/zstd-v1.5.4-win64.zip -OutFile $Env:TEMP\zstd-v1.5.4-win64.zip
RUN powershell Expand-Archive -Path $Env:TEMP\zstd-v1.5.4-win64.zip -DestinationPath "C:\ZStd"
RUN setx PATH "%PATH%;C:\ZStd"
# Install libopenssl 3.1.0 needed for rsync 3.2.7.
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libopenssl-3.1.0-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar.zst
RUN powershell zstd -d $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar.zst -o $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar
RUN powershell mkdir "C:\LibOpenSSL"
RUN powershell tar -xvf $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar -C "C:\LibOpenSSL"
# Install libxxhash 0.8.1 needed for rsync 3.2.7.
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libxxhash-0.8.1-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar.zst
RUN powershell zstd -d $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar.zst -o $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar
RUN powershell mkdir "C:\LibXXHash"
RUN powershell tar -xvf $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar -C "C:\LibXXHash"
# Install libzstd 1.5.4 needed for rsync 3.2.7.
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libzstd-1.5.4-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar.zst
RUN powershell zstd -d $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar.zst -o $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar
RUN powershell mkdir "C:\LibZStd"
RUN powershell tar -xvf $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar -C "C:\LibZStd"
# Install rsync 3.2.7.
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/rsync-3.2.7-2-x86_64.pkg.tar.zst -OutFile $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar.zst
RUN powershell zstd -d $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar.zst -o $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar
RUN powershell mkdir "C:\RSync"
RUN powershell tar -xvf $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar -C "C:\RSync"
# Copy the dependencies of rsync 3.2.7.
RUN powershell Copy-Item -Path "C:\LibOpenSSL\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
RUN powershell Copy-Item -Path "C:\LibXXHash\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
RUN powershell Copy-Item -Path "C:\LibZStd\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
RUN powershell Copy-Item -Path "C:\RSync\usr\bin\*" -Destination "C:\Program` Files\Git\usr\bin"
# Install Python 3.10.11.
RUN powershell Invoke-WebRequest -Uri https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip -OutFile $Env:TEMP\python-3.10.11-embed-amd64.zip
RUN powershell Expand-Archive -Path $Env:TEMP\python-3.10.11-embed-amd64.zip -DestinationPath "C:\Python3"
RUN powershell New-Item -ItemType HardLink -Value "C:\Python3\python.exe" -Path "C:\Python3\python3.exe"
RUN setx path "%PATH%;C:\Python3"
# We get strange Javadoc errors without this.
RUN setx classpath ""
RUN git config --global core.longpaths true
RUN setx PATH "%PATH%;C:\Program Files\Git\usr\bin"
# Define the entry point for the docker container.
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\VC\\Auxiliary\\Build\\vcvars64.bat", "&&", "cmd.exe"]

View File

@ -48,7 +48,7 @@ is_platform_change() {
declare in_path
in_path="${SOURCEDIR}"/"${1}"
for path in "${DOCKERFILE}" "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do
for path in "${SOURCEDIR}"/dev-support/docker/Dockerfile* "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do
if [ "${in_path}" == "${path}" ]; then
echo "Found C/C++ platform related changes in ${in_path}"
return 0
@ -114,47 +114,22 @@ function check_ci_run() {
function run_ci() {
TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/test-patch.sh"
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
echo "Building in a Windows environment, skipping some Yetus related settings"
else
# run in docker mode and specifically point to our
# Dockerfile since we don't want to use the auto-pulled version.
YETUS_ARGS+=("--docker")
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
YETUS_ARGS+=("--mvn-custom-repos")
YETUS_ARGS+=("--dockermemlimit=22g")
# test with Java 8 and 11
YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64")
YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64")
YETUS_ARGS+=("--multijdktests=compile")
# this must be clean for every run
if [[ -d "${PATCHDIR}" ]]; then
rm -rf "${PATCHDIR:?}"
fi
mkdir -p "${PATCHDIR}"
if [[ "$IS_NIGHTLY_BUILD" && "$IS_NIGHTLY_BUILD" == 1 ]]; then
YETUS_ARGS+=("--empty-patch")
YETUS_ARGS+=("--branch=${BRANCH_NAME}")
else
# this must be clean for every run
if [[ -d "${PATCHDIR}" ]]; then
rm -rf "${PATCHDIR:?}"
fi
mkdir -p "${PATCHDIR}"
# if given a JIRA issue, process it. If CHANGE_URL is set
# (e.g., Github Branch Source plugin), process it.
# otherwise exit, because we don't want Hadoop to do a
# full build. We wouldn't normally do this check for smaller
# projects. :)
if [[ -n "${JIRA_ISSUE_KEY}" ]]; then
YETUS_ARGS+=("${JIRA_ISSUE_KEY}")
elif [[ -z "${CHANGE_URL}" ]]; then
echo "Full build skipped" >"${PATCHDIR}/report.html"
exit 0
fi
# write Yetus report as GitHub comment (YETUS-1102)
YETUS_ARGS+=("--github-write-comment")
YETUS_ARGS+=("--github-use-emoji-vote")
# if given a JIRA issue, process it. If CHANGE_URL is set
# (e.g., Github Branch Source plugin), process it.
# otherwise exit, because we don't want Hadoop to do a
# full build. We wouldn't normally do this check for smaller
# projects. :)
if [[ -n "${JIRA_ISSUE_KEY}" ]]; then
YETUS_ARGS+=("${JIRA_ISSUE_KEY}")
elif [[ -z "${CHANGE_URL}" ]]; then
echo "Full build skipped" >"${PATCHDIR}/report.html"
exit 0
fi
YETUS_ARGS+=("--patch-dir=${PATCHDIR}")
@ -181,6 +156,7 @@ function run_ci() {
# changing these to higher values may cause problems
# with other jobs on systemd-enabled machines
YETUS_ARGS+=("--proclimit=5500")
YETUS_ARGS+=("--dockermemlimit=22g")
# -1 spotbugs issues that show up prior to the patch being applied
YETUS_ARGS+=("--spotbugs-strict-precheck")
@ -199,15 +175,30 @@ function run_ci() {
# much attention to them
YETUS_ARGS+=("--tests-filter=checkstyle")
# run in docker mode and specifically point to our
# Dockerfile since we don't want to use the auto-pulled version.
YETUS_ARGS+=("--docker")
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
YETUS_ARGS+=("--mvn-custom-repos")
# effectively treat dev-suport as a custom maven module
YETUS_ARGS+=("--skip-dirs=dev-support")
# help keep the ASF boxes clean
YETUS_ARGS+=("--sentinel")
# test with Java 8 and 11
YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64")
YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64")
YETUS_ARGS+=("--multijdktests=compile")
# custom javadoc goals
YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork")
# write Yetus report as GitHub comment (YETUS-1102)
YETUS_ARGS+=("--github-write-comment")
YETUS_ARGS+=("--github-use-emoji-vote")
"${TESTPATCHBIN}" "${YETUS_ARGS[@]}"
}

View File

@ -69,10 +69,6 @@
<groupId>com.github.pjfanning</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
@ -186,10 +182,6 @@
<groupId>com.github.pjfanning</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
@ -241,10 +233,6 @@
<groupId>com.github.pjfanning</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-servlet</artifactId>
@ -302,10 +290,6 @@
<groupId>com.github.pjfanning</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>

View File

@ -110,8 +110,20 @@
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>
</exclusion>
<!-- HACK. Transitive dependency for nimbus-jose-jwt. Needed for
packaging. Please re-check this version when updating
nimbus-jose-jwt. Please read HADOOP-14903 for more details.
-->
<exclusion>
<groupId>net.minidev</groupId>
<artifactId>json-smart</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>net.minidev</groupId>
<artifactId>json-smart</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>

File diff suppressed because one or more lines are too long

View File

@ -175,14 +175,6 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<!--
adding jettison as direct dependency (as jersey-json's jettison dependency is vulnerable with verison 1.1),
so those who depends on hadoop-common externally will get the non-vulnerable jettison
-->
<groupId>org.codehaus.jettison</groupId>
<artifactId>jettison</artifactId>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
@ -340,14 +332,6 @@
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-handler</artifactId>
</dependency>
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty-transport-native-epoll</artifactId>
</dependency>
<dependency>
<groupId>io.dropwizard.metrics</groupId>
<artifactId>metrics-core</artifactId>
@ -660,10 +644,9 @@
<goal>exec</goal>
</goals>
<configuration>
<executable>${shell-executable}</executable>
<executable>${basedir}/../../dev-support/bin/shelldocs</executable>
<workingDirectory>src/site/markdown</workingDirectory>
<arguments>
<argument>${basedir}/../../dev-support/bin/shelldocs</argument>
<argument>--skipprnorep</argument>
<argument>--output</argument>
<argument>${basedir}/src/site/markdown/UnixShellAPI.md</argument>

View File

@ -26,9 +26,9 @@ MYNAME="${BASH_SOURCE-$0}"
function hadoop_usage
{
hadoop_add_option "buildpaths" "attempt to add class files from build tree"
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in worker mode"
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
hadoop_add_option "loglevel level" "set the log4j level for this command"
hadoop_add_option "hosts filename" "list of hosts to use in worker mode"
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
hadoop_add_option "workers" "turn on worker mode"
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"

View File

@ -16,7 +16,7 @@
# limitations under the License.
# Run a Hadoop command on all worker hosts.
# Run a Hadoop command on all slave hosts.
function hadoop_usage
{

View File

@ -53,10 +53,6 @@
# variable is REQUIRED on ALL platforms except OS X!
# export JAVA_HOME=
# The language environment in which Hadoop runs. Use the English
# environment to ensure that logs are printed as expected.
export LANG=en_US.UTF-8
# Location of Hadoop. By default, Hadoop will attempt to determine
# this location based upon its execution path.
# export HADOOP_HOME=

View File

@ -75,6 +75,14 @@ log4j.appender.console.target=System.err
log4j.appender.console.layout=org.apache.log4j.PatternLayout
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
#
# TaskLog Appender
#
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
# HDFS block state change log from block manager
#

View File

@ -417,14 +417,6 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
/** How often to retry a ZooKeeper operation in milliseconds. */
public static final String ZK_RETRY_INTERVAL_MS =
ZK_PREFIX + "retry-interval-ms";
/** Keystore location for ZooKeeper client connection over SSL. */
public static final String ZK_SSL_KEYSTORE_LOCATION = ZK_PREFIX + "ssl.keystore.location";
/** Keystore password for ZooKeeper client connection over SSL. */
public static final String ZK_SSL_KEYSTORE_PASSWORD = ZK_PREFIX + "ssl.keystore.password";
/** Truststore location for ZooKeeper client connection over SSL. */
public static final String ZK_SSL_TRUSTSTORE_LOCATION = ZK_PREFIX + "ssl.truststore.location";
/** Truststore password for ZooKeeper client connection over SSL. */
public static final String ZK_SSL_TRUSTSTORE_PASSWORD = ZK_PREFIX + "ssl.truststore.password";
public static final int ZK_RETRY_INTERVAL_MS_DEFAULT = 1000;
/** Default domain name resolver for hadoop to use. */
public static final String HADOOP_DOMAINNAME_RESOLVER_IMPL =

View File

@ -163,11 +163,5 @@ public final class CommonPathCapabilities {
public static final String ETAGS_PRESERVED_IN_RENAME =
"fs.capability.etags.preserved.in.rename";
/**
* Does this Filesystem support lease recovery operations such as
* {@link LeaseRecoverable#recoverLease(Path)} and {@link LeaseRecoverable#isFileClosed(Path)}}?
* Value: {@value}.
*/
public static final String LEASE_RECOVERABLE = "fs.capability.lease.recoverable";
}

View File

@ -28,34 +28,6 @@ import org.apache.hadoop.classification.InterfaceStability;
* The base interface which various FileSystem FileContext Builder
* interfaces can extend, and which underlying implementations
* will then implement.
* <p>
* HADOOP-16202 expanded the opt() and must() arguments with
* operator overloading, but HADOOP-18724 identified mapping problems:
* passing a long value in to {@code opt()} could end up invoking
* {@code opt(string, double)}, which could then trigger parse failures.
* <p>
* To fix this without forcing existing code to break/be recompiled.
* <ol>
* <li>A new method to explicitly set a long value is added:
* {@link #optLong(String, long)}
* </li>
* <li>A new method to explicitly set a double value is added:
* {@link #optLong(String, long)}
* </li>
* <li>
* All of {@link #opt(String, long)}, {@link #opt(String, float)} and
* {@link #opt(String, double)} invoke {@link #optLong(String, long)}.
* </li>
* <li>
* The same changes have been applied to {@code must()} methods.
* </li>
* </ol>
* The forwarding of existing double/float setters to the long setters ensure
* that existing code will link, but are guaranteed to always set a long value.
* If you need to write code which works correctly with all hadoop releases,
* covert the option to a string explicitly and then call {@link #opt(String, String)}
* or {@link #must(String, String)} as appropriate.
*
* @param <S> Return type on the {@link #build()} call.
* @param <B> type of builder itself.
*/
@ -78,9 +50,7 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
* @return generic type B.
* @see #opt(String, String)
*/
default B opt(@Nonnull String key, boolean value) {
return opt(key, Boolean.toString(value));
}
B opt(@Nonnull String key, boolean value);
/**
* Set optional int parameter for the Builder.
@ -90,25 +60,17 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
* @return generic type B.
* @see #opt(String, String)
*/
default B opt(@Nonnull String key, int value) {
return optLong(key, value);
}
B opt(@Nonnull String key, int value);
/**
* This parameter is converted to a long and passed
* to {@link #optLong(String, long)} -all
* decimal precision is lost.
* Set optional float parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
* @deprecated use {@link #optDouble(String, double)}
*/
@Deprecated
default B opt(@Nonnull String key, float value) {
return optLong(key, (long) value);
}
B opt(@Nonnull String key, float value);
/**
* Set optional long parameter for the Builder.
@ -116,27 +78,19 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
* @param key key.
* @param value value.
* @return generic type B.
* @deprecated use {@link #optLong(String, long)} where possible.
* @see #opt(String, String)
*/
default B opt(@Nonnull String key, long value) {
return optLong(key, value);
}
B opt(@Nonnull String key, long value);
/**
* Pass an optional double parameter for the Builder.
* This parameter is converted to a long and passed
* to {@link #optLong(String, long)} -all
* decimal precision is lost.
* Set optional double parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
* @deprecated use {@link #optDouble(String, double)}
*/
@Deprecated
default B opt(@Nonnull String key, double value) {
return optLong(key, (long) value);
}
B opt(@Nonnull String key, double value);
/**
* Set an array of string values as optional parameter for the Builder.
@ -148,30 +102,6 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
*/
B opt(@Nonnull String key, @Nonnull String... values);
/**
* Set optional long parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B optLong(@Nonnull String key, long value) {
return opt(key, Long.toString(value));
}
/**
* Set optional double parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B optDouble(@Nonnull String key, double value) {
return opt(key, Double.toString(value));
}
/**
* Set mandatory option to the Builder.
*
@ -192,9 +122,7 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
* @return generic type B.
* @see #must(String, String)
*/
default B must(@Nonnull String key, boolean value) {
return must(key, Boolean.toString(value));
}
B must(@Nonnull String key, boolean value);
/**
* Set mandatory int option.
@ -204,24 +132,17 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
* @return generic type B.
* @see #must(String, String)
*/
default B must(@Nonnull String key, int value) {
return mustLong(key, value);
}
B must(@Nonnull String key, int value);
/**
* This parameter is converted to a long and passed
* to {@link #mustLong(String, long)} -all
* decimal precision is lost.
* Set mandatory float option.
*
* @param key key.
* @param value value.
* @return generic type B.
* @deprecated use {@link #mustDouble(String, double)} to set floating point.
* @see #must(String, String)
*/
@Deprecated
default B must(@Nonnull String key, float value) {
return mustLong(key, (long) value);
}
B must(@Nonnull String key, float value);
/**
* Set mandatory long option.
@ -231,24 +152,17 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
* @return generic type B.
* @see #must(String, String)
*/
@Deprecated
default B must(@Nonnull String key, long value) {
return mustLong(key, (long) value);
}
B must(@Nonnull String key, long value);
/**
* Set mandatory long option, despite passing in a floating
* point value.
* Set mandatory double option.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #must(String, String)
*/
@Deprecated
default B must(@Nonnull String key, double value) {
return mustLong(key, (long) value);
}
B must(@Nonnull String key, double value);
/**
* Set a string array as mandatory option.
@ -260,30 +174,6 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
*/
B must(@Nonnull String key, @Nonnull String... values);
/**
* Set mandatory long parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B mustLong(@Nonnull String key, long value) {
return must(key, Long.toString(value));
}
/**
* Set mandatory double parameter for the Builder.
*
* @param key key.
* @param value value.
* @return generic type B.
* @see #opt(String, String)
*/
default B mustDouble(@Nonnull String key, double value) {
return must(key, Double.toString(value));
}
/**
* Instantiate the object which was being built.
*

View File

@ -2231,7 +2231,7 @@ public class FileContext implements PathCapabilities {
InputStream in = awaitFuture(openFile(qSrc)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
.optLong(FS_OPTION_OPENFILE_LENGTH,
.opt(FS_OPTION_OPENFILE_LENGTH,
fs.getLen()) // file length hint for object stores
.build());
try (OutputStream out = create(qDst, createFlag)) {

View File

@ -2413,14 +2413,8 @@ public abstract class FileSystem extends Configured
if (stat.isFile()) { // file
curFile = stat;
} else if (recursive) { // directory
try {
RemoteIterator<LocatedFileStatus> newDirItor = listLocatedStatus(stat.getPath());
itors.push(curItor);
curItor = newDirItor;
} catch (FileNotFoundException ignored) {
LOGGER.debug("Directory {} deleted while attempting for recursive listing",
stat.getPath());
}
itors.push(curItor);
curItor = listLocatedStatus(stat.getPath());
}
}
@ -3602,9 +3596,9 @@ public abstract class FileSystem extends Configured
} catch (IOException | RuntimeException e) {
// exception raised during initialization.
// log summary at warn and full stack at debug
LOGGER.warn("Failed to initialize filesystem {}: {}",
LOGGER.warn("Failed to initialize fileystem {}: {}",
uri, e.toString());
LOGGER.debug("Failed to initialize filesystem", e);
LOGGER.debug("Failed to initialize fileystem", e);
// then (robustly) close the FS, so as to invoke any
// cleanup code.
IOUtils.cleanupWithLogger(LOGGER, fs);
@ -3942,7 +3936,6 @@ public abstract class FileSystem extends Configured
private volatile long bytesReadDistanceOfThreeOrFour;
private volatile long bytesReadDistanceOfFiveOrLarger;
private volatile long bytesReadErasureCoded;
private volatile long remoteReadTimeMS;
/**
* Add another StatisticsData object to this one.
@ -3960,7 +3953,6 @@ public abstract class FileSystem extends Configured
this.bytesReadDistanceOfFiveOrLarger +=
other.bytesReadDistanceOfFiveOrLarger;
this.bytesReadErasureCoded += other.bytesReadErasureCoded;
this.remoteReadTimeMS += other.remoteReadTimeMS;
}
/**
@ -3979,7 +3971,6 @@ public abstract class FileSystem extends Configured
this.bytesReadDistanceOfFiveOrLarger =
-this.bytesReadDistanceOfFiveOrLarger;
this.bytesReadErasureCoded = -this.bytesReadErasureCoded;
this.remoteReadTimeMS = -this.remoteReadTimeMS;
}
@Override
@ -4028,10 +4019,6 @@ public abstract class FileSystem extends Configured
public long getBytesReadErasureCoded() {
return bytesReadErasureCoded;
}
public long getRemoteReadTimeMS() {
return remoteReadTimeMS;
}
}
private interface StatisticsAggregator<T> {
@ -4259,14 +4246,6 @@ public abstract class FileSystem extends Configured
}
}
/**
* Increment the time taken to read bytes from remote in the statistics.
* @param durationMS time taken in ms to read bytes from remote
*/
public void increaseRemoteReadTime(final long durationMS) {
getThreadStatistics().remoteReadTimeMS += durationMS;
}
/**
* Apply the given aggregator to all StatisticsData objects associated with
* this Statistics object.
@ -4414,25 +4393,6 @@ public abstract class FileSystem extends Configured
return bytesRead;
}
/**
* Get total time taken in ms for bytes read from remote.
* @return time taken in ms for remote bytes read.
*/
public long getRemoteReadTime() {
return visitAll(new StatisticsAggregator<Long>() {
private long remoteReadTimeMS = 0;
@Override
public void accept(StatisticsData data) {
remoteReadTimeMS += data.remoteReadTimeMS;
}
public Long aggregate() {
return remoteReadTimeMS;
}
});
}
/**
* Get all statistics data.
* MR or other frameworks can use the method to get all statistics at once.

View File

@ -47,8 +47,7 @@ public class FileSystemStorageStatistics extends StorageStatistics {
"bytesReadDistanceOfOneOrTwo",
"bytesReadDistanceOfThreeOrFour",
"bytesReadDistanceOfFiveOrLarger",
"bytesReadErasureCoded",
"remoteReadTimeMS"
"bytesReadErasureCoded"
};
private static class LongStatisticIterator
@ -108,8 +107,6 @@ public class FileSystemStorageStatistics extends StorageStatistics {
return data.getBytesReadDistanceOfFiveOrLarger();
case "bytesReadErasureCoded":
return data.getBytesReadErasureCoded();
case "remoteReadTimeMS":
return data.getRemoteReadTimeMS();
default:
return null;
}

View File

@ -484,7 +484,7 @@ public class FileUtil {
in = awaitFuture(srcFS.openFile(src)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
.optLong(FS_OPTION_OPENFILE_LENGTH,
.opt(FS_OPTION_OPENFILE_LENGTH,
srcStatus.getLen()) // file length hint for object stores
.build());
out = dstFS.create(dst, overwrite);

View File

@ -1,46 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
/**
* Whether the given Path of the FileSystem has the capability to perform lease recovery.
*/
public interface LeaseRecoverable {
/**
* Start the lease recovery of a file.
*
* @param file path to a file.
* @return true if the file is already closed, and it does not require lease recovery.
* @throws IOException if an error occurs during lease recovery.
* @throws UnsupportedOperationException if lease recovery is not supported by this filesystem.
*/
boolean recoverLease(Path file) throws IOException;
/**
* Get the close status of a file.
* @param file The string representation of the path to the file
* @return return true if file is closed
* @throws IOException If an I/O error occurred
* @throws UnsupportedOperationException if isFileClosed is not supported by this filesystem.
*/
boolean isFileClosed(Path file) throws IOException;
}

View File

@ -465,12 +465,7 @@ public class Path
* @return a new path with the suffix added
*/
public Path suffix(String suffix) {
Path parent = getParent();
if (parent == null) {
return new Path("/", getName() + suffix);
}
return new Path(parent, getName() + suffix);
return new Path(getParent(), getName()+suffix);
}
@Override

View File

@ -1,50 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.IOException;
/**
* Whether the given filesystem is in any status of safe mode.
*/
public interface SafeMode {
/**
* Enter, leave, or get safe mode.
*
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
* @throws IOException if set safe mode fails to proceed.
* @return true if the action is successfully accepted, otherwise false means rejected.
*/
default boolean setSafeMode(SafeModeAction action) throws IOException {
return setSafeMode(action, false);
}
/**
* Enter, leave, or get safe mode.
*
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
* @param isChecked If true check only for Active metadata node / NameNode's status,
* else check first metadata node / NameNode's status.
* @throws IOException if set safe mode fails to proceed.
* @return true if the action is successfully accepted, otherwise false means rejected.
*/
boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException;
}

View File

@ -1,41 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
/**
* An identical copy from org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction, that helps
* the other file system implementation to define {@link SafeMode}.
*/
public enum SafeModeAction {
/**
* Starting entering into safe mode.
*/
ENTER,
/**
* Gracefully exit from safe mode.
*/
LEAVE,
/**
* Force Exit from safe mode.
*/
FORCE_EXIT,
/**
* Get the status of the safe mode.
*/
GET;
}

View File

@ -115,9 +115,4 @@ public final class AuditConstants {
*/
public static final String PARAM_TIMESTAMP = "ts";
/**
* Num of files to be deleted as part of the bulk delete request.
*/
public static final String DELETE_KEYS_SIZE = "ks";
}

View File

@ -44,13 +44,11 @@ import static org.apache.hadoop.util.Preconditions.checkNotNull;
* with option support.
*
* <code>
* .opt("fs.s3a.open.option.caching", true)
* .opt("fs.option.openfile.read.policy", "random, adaptive")
* .opt("foofs:option.a", true)
* .opt("foofs:option.b", "value")
* .opt("fs.s3a.open.option.etag", "9fe4c37c25b")
* .optLong("fs.option.openfile.length", 1_500_000_000_000)
* .must("fs.option.openfile.buffer.size", 256_000)
* .mustLong("fs.option.openfile.split.start", 256_000_000)
* .mustLong("fs.option.openfile.split.end", 512_000_000)
* .must("foofs:cache", true)
* .must("barfs:cache-size", 256 * 1024 * 1024)
* .build();
* </code>
*
@ -66,7 +64,6 @@ import static org.apache.hadoop.util.Preconditions.checkNotNull;
*/
@InterfaceAudience.Public
@InterfaceStability.Unstable
@SuppressWarnings({"deprecation", "unused"})
public abstract class
AbstractFSBuilderImpl<S, B extends FSBuilder<S, B>>
implements FSBuilder<S, B> {
@ -181,7 +178,10 @@ public abstract class
*/
@Override
public B opt(@Nonnull final String key, boolean value) {
return opt(key, Boolean.toString(value));
mandatoryKeys.remove(key);
optionalKeys.add(key);
options.setBoolean(key, value);
return getThisBuilder();
}
/**
@ -191,17 +191,18 @@ public abstract class
*/
@Override
public B opt(@Nonnull final String key, int value) {
return optLong(key, value);
mandatoryKeys.remove(key);
optionalKeys.add(key);
options.setInt(key, value);
return getThisBuilder();
}
@Override
public B opt(@Nonnull final String key, final long value) {
return optLong(key, value);
}
@Override
public B optLong(@Nonnull final String key, final long value) {
return opt(key, Long.toString(value));
mandatoryKeys.remove(key);
optionalKeys.add(key);
options.setLong(key, value);
return getThisBuilder();
}
/**
@ -211,7 +212,10 @@ public abstract class
*/
@Override
public B opt(@Nonnull final String key, float value) {
return optLong(key, (long) value);
mandatoryKeys.remove(key);
optionalKeys.add(key);
options.setFloat(key, value);
return getThisBuilder();
}
/**
@ -221,17 +225,10 @@ public abstract class
*/
@Override
public B opt(@Nonnull final String key, double value) {
return optLong(key, (long) value);
}
/**
* Set optional double parameter for the Builder.
*
* @see #opt(String, String)
*/
@Override
public B optDouble(@Nonnull final String key, double value) {
return opt(key, Double.toString(value));
mandatoryKeys.remove(key);
optionalKeys.add(key);
options.setDouble(key, value);
return getThisBuilder();
}
/**
@ -267,22 +264,10 @@ public abstract class
*/
@Override
public B must(@Nonnull final String key, boolean value) {
return must(key, Boolean.toString(value));
}
@Override
public B mustLong(@Nonnull final String key, final long value) {
return must(key, Long.toString(value));
}
/**
* Set optional double parameter for the Builder.
*
* @see #opt(String, String)
*/
@Override
public B mustDouble(@Nonnull final String key, double value) {
return must(key, Double.toString(value));
mandatoryKeys.add(key);
optionalKeys.remove(key);
options.setBoolean(key, value);
return getThisBuilder();
}
/**
@ -292,22 +277,44 @@ public abstract class
*/
@Override
public B must(@Nonnull final String key, int value) {
return mustLong(key, value);
mandatoryKeys.add(key);
optionalKeys.remove(key);
options.setInt(key, value);
return getThisBuilder();
}
@Override
public B must(@Nonnull final String key, final long value) {
return mustLong(key, value);
mandatoryKeys.add(key);
optionalKeys.remove(key);
options.setLong(key, value);
return getThisBuilder();
}
/**
* Set mandatory float option.
*
* @see #must(String, String)
*/
@Override
public B must(@Nonnull final String key, final float value) {
return mustLong(key, (long) value);
public B must(@Nonnull final String key, float value) {
mandatoryKeys.add(key);
optionalKeys.remove(key);
options.setFloat(key, value);
return getThisBuilder();
}
/**
* Set mandatory double option.
*
* @see #must(String, String)
*/
@Override
public B must(@Nonnull final String key, double value) {
return mustLong(key, (long) value);
mandatoryKeys.add(key);
optionalKeys.remove(key);
options.setDouble(key, value);
return getThisBuilder();
}
/**

View File

@ -1,95 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.impl;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.store.LogExactlyOnce;
/**
* Class to help with use of FSBuilder.
*/
public class FSBuilderSupport {
private static final Logger LOG =
LoggerFactory.getLogger(FSBuilderSupport.class);
public static final LogExactlyOnce LOG_PARSE_ERROR = new LogExactlyOnce(LOG);
/**
* Options which are parsed.
*/
private final Configuration options;
/**
* Constructor.
* @param options the configuration options from the builder.
*/
public FSBuilderSupport(final Configuration options) {
this.options = options;
}
public Configuration getOptions() {
return options;
}
/**
* Get a long value with resilience to unparseable values.
* Negative values are replaced with the default.
* @param key key to log
* @param defVal default value
* @return long value
*/
public long getPositiveLong(String key, long defVal) {
long l = getLong(key, defVal);
if (l < 0) {
LOG.debug("The option {} has a negative value {}, replacing with the default {}",
key, l, defVal);
l = defVal;
}
return l;
}
/**
* Get a long value with resilience to unparseable values.
* @param key key to log
* @param defVal default value
* @return long value
*/
public long getLong(String key, long defVal) {
final String v = options.getTrimmed(key, "");
if (v.isEmpty()) {
return defVal;
}
try {
return options.getLong(key, defVal);
} catch (NumberFormatException e) {
final String msg = String.format(
"The option %s value \"%s\" is not a long integer; using the default value %s",
key, v, defVal);
// not a long,
LOG_PARSE_ERROR.warn(msg);
LOG.debug("{}", msg, e);
return defVal;
}
}
}

View File

@ -23,9 +23,6 @@ import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
/**
* Provides functionality necessary for caching blocks of data read from FileSystem.
*/
@ -67,10 +64,7 @@ public interface BlockCache extends Closeable {
*
* @param blockNumber the id of the given block.
* @param buffer contents of the given block to be added to this cache.
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
* @throws IOException if there is an error writing the given block.
*/
void put(int blockNumber, ByteBuffer buffer, Configuration conf,
LocalDirAllocator localDirAllocator) throws IOException;
void put(int blockNumber, ByteBuffer buffer) throws IOException;
}

View File

@ -33,8 +33,6 @@ import java.util.function.Supplier;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.statistics.DurationTracker;
import static java.util.Objects.requireNonNull;
@ -97,10 +95,6 @@ public abstract class CachingBlockManager extends BlockManager {
private final PrefetchingStatistics prefetchingStatistics;
private final Configuration conf;
private final LocalDirAllocator localDirAllocator;
/**
* Constructs an instance of a {@code CachingBlockManager}.
*
@ -108,17 +102,14 @@ public abstract class CachingBlockManager extends BlockManager {
* @param blockData information about each block of the underlying file.
* @param bufferPoolSize size of the in-memory cache in terms of number of blocks.
* @param prefetchingStatistics statistics for this stream.
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
*
* @throws IllegalArgumentException if bufferPoolSize is zero or negative.
*/
public CachingBlockManager(
ExecutorServiceFuturePool futurePool,
BlockData blockData,
int bufferPoolSize,
PrefetchingStatistics prefetchingStatistics,
Configuration conf,
LocalDirAllocator localDirAllocator) {
PrefetchingStatistics prefetchingStatistics) {
super(blockData);
Validate.checkPositiveInteger(bufferPoolSize, "bufferPoolSize");
@ -138,8 +129,6 @@ public abstract class CachingBlockManager extends BlockManager {
this.ops = new BlockOperations();
this.ops.setDebug(false);
this.conf = requireNonNull(conf);
this.localDirAllocator = localDirAllocator;
}
/**
@ -479,8 +468,7 @@ public abstract class CachingBlockManager extends BlockManager {
blockFuture = cf;
}
CachePutTask task =
new CachePutTask(data, blockFuture, this, Instant.now());
CachePutTask task = new CachePutTask(data, blockFuture, this, Instant.now());
Future<Void> actionFuture = futurePool.executeFunction(task);
data.setCaching(actionFuture);
ops.end(op);
@ -566,7 +554,7 @@ public abstract class CachingBlockManager extends BlockManager {
return;
}
cache.put(blockNumber, buffer, conf, localDirAllocator);
cache.put(blockNumber, buffer);
}
private static class CachePutTask implements Supplier<Void> {

View File

@ -27,9 +27,10 @@ import java.nio.channels.WritableByteChannel;
import java.nio.file.Files;
import java.nio.file.OpenOption;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EnumSet;
@ -37,16 +38,10 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import static java.util.Objects.requireNonNull;
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
@ -72,22 +67,6 @@ public class SingleFilePerBlockCache implements BlockCache {
private final PrefetchingStatistics prefetchingStatistics;
/**
* Timeout to be used by close, while acquiring prefetch block write lock.
*/
private static final int PREFETCH_WRITE_LOCK_TIMEOUT = 5;
/**
* Lock timeout unit to be used by the thread while acquiring prefetch block write lock.
*/
private static final TimeUnit PREFETCH_WRITE_LOCK_TIMEOUT_UNIT = TimeUnit.SECONDS;
/**
* File attributes attached to any intermediate temporary file created during index creation.
*/
private static final Set<PosixFilePermission> TEMP_FILE_ATTRS =
ImmutableSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE);
/**
* Cache entry.
* Each block is stored as a separate file.
@ -97,18 +76,12 @@ public class SingleFilePerBlockCache implements BlockCache {
private final Path path;
private final int size;
private final long checksum;
private final ReentrantReadWriteLock lock;
private enum LockType {
READ,
WRITE
}
Entry(int blockNumber, Path path, int size, long checksum) {
this.blockNumber = blockNumber;
this.path = path;
this.size = size;
this.checksum = checksum;
this.lock = new ReentrantReadWriteLock();
}
@Override
@ -117,54 +90,6 @@ public class SingleFilePerBlockCache implements BlockCache {
"([%03d] %s: size = %d, checksum = %d)",
blockNumber, path, size, checksum);
}
/**
* Take the read or write lock.
*
* @param lockType type of the lock.
*/
private void takeLock(LockType lockType) {
if (LockType.READ == lockType) {
lock.readLock().lock();
} else if (LockType.WRITE == lockType) {
lock.writeLock().lock();
}
}
/**
* Release the read or write lock.
*
* @param lockType type of the lock.
*/
private void releaseLock(LockType lockType) {
if (LockType.READ == lockType) {
lock.readLock().unlock();
} else if (LockType.WRITE == lockType) {
lock.writeLock().unlock();
}
}
/**
* Try to take the read or write lock within the given timeout.
*
* @param lockType type of the lock.
* @param timeout the time to wait for the given lock.
* @param unit the time unit of the timeout argument.
* @return true if the lock of the given lock type was acquired.
*/
private boolean takeLock(LockType lockType, long timeout, TimeUnit unit) {
try {
if (LockType.READ == lockType) {
return lock.readLock().tryLock(timeout, unit);
} else if (LockType.WRITE == lockType) {
return lock.writeLock().tryLock(timeout, unit);
}
} catch (InterruptedException e) {
LOG.warn("Thread interrupted while trying to acquire {} lock", lockType, e);
Thread.currentThread().interrupt();
}
return false;
}
}
/**
@ -214,15 +139,11 @@ public class SingleFilePerBlockCache implements BlockCache {
checkNotNull(buffer, "buffer");
Entry entry = getEntry(blockNumber);
entry.takeLock(Entry.LockType.READ);
try {
buffer.clear();
readFile(entry.path, buffer);
buffer.rewind();
validateEntry(entry, buffer);
} finally {
entry.releaseLock(Entry.LockType.READ);
}
buffer.clear();
readFile(entry.path, buffer);
buffer.rewind();
validateEntry(entry, buffer);
}
protected int readFile(Path path, ByteBuffer buffer) throws IOException {
@ -251,17 +172,11 @@ public class SingleFilePerBlockCache implements BlockCache {
/**
* Puts the given block in this cache.
*
* @param blockNumber the block number, used as a key for blocks map.
* @param buffer buffer contents of the given block to be added to this cache.
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
* @throws IOException if either local dir allocator fails to allocate file or if IO error
* occurs while writing the buffer content to the file.
* @throws IllegalArgumentException if buffer is null, or if buffer.limit() is zero or negative.
* @throws IllegalArgumentException if buffer is null.
* @throws IllegalArgumentException if buffer.limit() is zero or negative.
*/
@Override
public void put(int blockNumber, ByteBuffer buffer, Configuration conf,
LocalDirAllocator localDirAllocator) throws IOException {
public void put(int blockNumber, ByteBuffer buffer) throws IOException {
if (closed) {
return;
}
@ -270,18 +185,13 @@ public class SingleFilePerBlockCache implements BlockCache {
if (blocks.containsKey(blockNumber)) {
Entry entry = blocks.get(blockNumber);
entry.takeLock(Entry.LockType.READ);
try {
validateEntry(entry, buffer);
} finally {
entry.releaseLock(Entry.LockType.READ);
}
validateEntry(entry, buffer);
return;
}
Validate.checkPositiveInteger(buffer.limit(), "buffer.limit()");
Path blockFilePath = getCacheFilePath(conf, localDirAllocator);
Path blockFilePath = getCacheFilePath();
long size = Files.size(blockFilePath);
if (size != 0) {
String message =
@ -291,15 +201,10 @@ public class SingleFilePerBlockCache implements BlockCache {
}
writeFile(blockFilePath, buffer);
prefetchingStatistics.blockAddedToFileCache();
long checksum = BufferData.getChecksum(buffer);
Entry entry = new Entry(blockNumber, blockFilePath, buffer.limit(), checksum);
blocks.put(blockNumber, entry);
// Update stream_read_blocks_in_cache stats only after blocks map is updated with new file
// entry to avoid any discrepancy related to the value of stream_read_blocks_in_cache.
// If stream_read_blocks_in_cache is updated before updating the blocks map here, closing of
// the input stream can lead to the removal of the cache file even before blocks is added with
// the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
prefetchingStatistics.blockAddedToFileCache();
}
private static final Set<? extends OpenOption> CREATE_OPTIONS =
@ -316,19 +221,8 @@ public class SingleFilePerBlockCache implements BlockCache {
writeChannel.close();
}
/**
* Return temporary file created based on the file path retrieved from local dir allocator.
*
* @param conf The configuration object.
* @param localDirAllocator Local dir allocator instance.
* @return Path of the temporary file created.
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
* from local FS or file creation fails or permission set fails.
*/
protected Path getCacheFilePath(final Configuration conf,
final LocalDirAllocator localDirAllocator)
throws IOException {
return getTempFilePath(conf, localDirAllocator);
protected Path getCacheFilePath() throws IOException {
return getTempFilePath();
}
@Override
@ -343,22 +237,12 @@ public class SingleFilePerBlockCache implements BlockCache {
int numFilesDeleted = 0;
for (Entry entry : blocks.values()) {
boolean lockAcquired = entry.takeLock(Entry.LockType.WRITE, PREFETCH_WRITE_LOCK_TIMEOUT,
PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
if (!lockAcquired) {
LOG.error("Cache file {} deletion would not be attempted as write lock could not"
+ " be acquired within {} {}", entry.path, PREFETCH_WRITE_LOCK_TIMEOUT,
PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
continue;
}
try {
Files.deleteIfExists(entry.path);
prefetchingStatistics.blockRemovedFromFileCache();
numFilesDeleted++;
} catch (IOException e) {
LOG.debug("Failed to delete cache file {}", entry.path, e);
} finally {
entry.releaseLock(Entry.LockType.WRITE);
// Ignore while closing so that we can delete as many cache files as possible.
}
}
@ -439,19 +323,9 @@ public class SingleFilePerBlockCache implements BlockCache {
private static final String CACHE_FILE_PREFIX = "fs-cache-";
/**
* Determine if the cache space is available on the local FS.
*
* @param fileSize The size of the file.
* @param conf The configuration.
* @param localDirAllocator Local dir allocator instance.
* @return True if the given file size is less than the available free space on local FS,
* False otherwise.
*/
public static boolean isCacheSpaceAvailable(long fileSize, Configuration conf,
LocalDirAllocator localDirAllocator) {
public static boolean isCacheSpaceAvailable(long fileSize) {
try {
Path cacheFilePath = getTempFilePath(conf, localDirAllocator);
Path cacheFilePath = getTempFilePath();
long freeSpace = new File(cacheFilePath.toString()).getUsableSpace();
LOG.info("fileSize = {}, freeSpace = {}", fileSize, freeSpace);
Files.deleteIfExists(cacheFilePath);
@ -465,25 +339,16 @@ public class SingleFilePerBlockCache implements BlockCache {
// The suffix (file extension) of each serialized index file.
private static final String BINARY_FILE_SUFFIX = ".bin";
/**
* Create temporary file based on the file path retrieved from local dir allocator
* instance. The file is created with .bin suffix. The created file has been granted
* posix file permissions available in TEMP_FILE_ATTRS.
*
* @param conf the configuration.
* @param localDirAllocator the local dir allocator instance.
* @return path of the file created.
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
* from local FS or file creation fails or permission set fails.
*/
private static Path getTempFilePath(final Configuration conf,
final LocalDirAllocator localDirAllocator) throws IOException {
org.apache.hadoop.fs.Path path =
localDirAllocator.getLocalPathForWrite(CACHE_FILE_PREFIX, conf);
File dir = new File(path.getParent().toUri().getPath());
String prefix = path.getName();
File tmpFile = File.createTempFile(prefix, BINARY_FILE_SUFFIX, dir);
Path tmpFilePath = Paths.get(tmpFile.toURI());
return Files.setPosixFilePermissions(tmpFilePath, TEMP_FILE_ATTRS);
// File attributes attached to any intermediate temporary file created during index creation.
private static final FileAttribute<Set<PosixFilePermission>> TEMP_FILE_ATTRS =
PosixFilePermissions.asFileAttribute(EnumSet.of(PosixFilePermission.OWNER_READ,
PosixFilePermission.OWNER_WRITE));
private static Path getTempFilePath() throws IOException {
return Files.createTempFile(
CACHE_FILE_PREFIX,
BINARY_FILE_SUFFIX,
TEMP_FILE_ATTRS
);
}
}

View File

@ -633,7 +633,7 @@ public class PathData implements Comparable<PathData> {
return awaitFuture(fs.openFile(path)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
policy)
.optLong(FS_OPTION_OPENFILE_LENGTH,
.opt(FS_OPTION_OPENFILE_LENGTH,
stat.getLen()) // file length hint for object stores
.build());
}

View File

@ -1,75 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.statistics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Setter for IOStatistics entries.
* These operations have been in the read/write API
* {@code IOStatisticsStore} since IOStatistics
* was added; extracting into its own interface allows for
* {@link IOStatisticsSnapshot} to also support it.
* These are the simple setters, they don't provide for increments,
* decrements, calculation of min/max/mean etc.
* @since The interface and IOStatisticsSnapshot support was added <i>after</i> Hadoop 3.3.5
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public interface IOStatisticsSetters extends IOStatistics {
/**
* Set a counter.
*
* No-op if the counter is unknown.
* @param key statistics key
* @param value value to set
*/
void setCounter(String key, long value);
/**
* Set a gauge.
*
* @param key statistics key
* @param value value to set
*/
void setGauge(String key, long value);
/**
* Set a maximum.
* @param key statistics key
* @param value value to set
*/
void setMaximum(String key, long value);
/**
* Set a minimum.
* @param key statistics key
* @param value value to set
*/
void setMinimum(String key, long value);
/**
* Set a mean statistic to a given value.
* @param key statistic key
* @param value new value.
*/
void setMeanStatistic(String key, MeanStatistic value);
}

View File

@ -62,8 +62,7 @@ import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.snapshotM
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class IOStatisticsSnapshot
implements IOStatistics, Serializable, IOStatisticsAggregator,
IOStatisticsSetters {
implements IOStatistics, Serializable, IOStatisticsAggregator {
private static final long serialVersionUID = -1762522703841538084L;
@ -223,33 +222,6 @@ public final class IOStatisticsSnapshot
return meanStatistics;
}
@Override
public synchronized void setCounter(final String key, final long value) {
counters().put(key, value);
}
@Override
public synchronized void setGauge(final String key, final long value) {
gauges().put(key, value);
}
@Override
public synchronized void setMaximum(final String key, final long value) {
maximums().put(key, value);
}
@Override
public synchronized void setMinimum(final String key, final long value) {
minimums().put(key, value);
}
@Override
public void setMeanStatistic(final String key, final MeanStatistic value) {
meanStatistics().put(key, value);
}
@Override
public String toString() {
return ioStatisticsToString(this);

View File

@ -24,7 +24,6 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.fs.statistics.IOStatistics;
import org.apache.hadoop.fs.statistics.IOStatisticsAggregator;
import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
import org.apache.hadoop.fs.statistics.IOStatisticsSetters;
import org.apache.hadoop.fs.statistics.MeanStatistic;
/**
@ -32,7 +31,6 @@ import org.apache.hadoop.fs.statistics.MeanStatistic;
* use in classes which track statistics for reporting.
*/
public interface IOStatisticsStore extends IOStatistics,
IOStatisticsSetters,
IOStatisticsAggregator,
DurationTrackerFactory {
@ -58,6 +56,24 @@ public interface IOStatisticsStore extends IOStatistics,
*/
long incrementCounter(String key, long value);
/**
* Set a counter.
*
* No-op if the counter is unknown.
* @param key statistics key
* @param value value to set
*/
void setCounter(String key, long value);
/**
* Set a gauge.
*
* No-op if the gauge is unknown.
* @param key statistics key
* @param value value to set
*/
void setGauge(String key, long value);
/**
* Increment a gauge.
* <p>
@ -69,6 +85,14 @@ public interface IOStatisticsStore extends IOStatistics,
*/
long incrementGauge(String key, long value);
/**
* Set a maximum.
* No-op if the maximum is unknown.
* @param key statistics key
* @param value value to set
*/
void setMaximum(String key, long value);
/**
* Increment a maximum.
* <p>
@ -80,6 +104,16 @@ public interface IOStatisticsStore extends IOStatistics,
*/
long incrementMaximum(String key, long value);
/**
* Set a minimum.
* <p>
* No-op if the minimum is unknown.
* </p>
* @param key statistics key
* @param value value to set
*/
void setMinimum(String key, long value);
/**
* Increment a minimum.
* <p>
@ -113,6 +147,16 @@ public interface IOStatisticsStore extends IOStatistics,
*/
void addMaximumSample(String key, long value);
/**
* Set a mean statistic to a given value.
* <p>
* No-op if the key is unknown.
* </p>
* @param key statistic key
* @param value new value.
*/
void setMeanStatistic(String key, MeanStatistic value);
/**
* Add a sample to the mean statistics.
* <p>

View File

@ -67,17 +67,6 @@ public interface IOStatisticsStoreBuilder {
IOStatisticsStoreBuilder withDurationTracking(
String... prefixes);
/**
* A value which is tracked with counter/min/max/mean.
* Similar to {@link #withDurationTracking(String...)}
* but without the failure option and with the same name
* across all categories.
* @param prefixes prefixes to add.
* @return the builder
*/
IOStatisticsStoreBuilder withSampleTracking(
String... prefixes);
/**
* Build the collector.
* @return a new collector.

View File

@ -92,18 +92,6 @@ final class IOStatisticsStoreBuilderImpl implements
return this;
}
@Override
public IOStatisticsStoreBuilderImpl withSampleTracking(
final String... prefixes) {
for (String p : prefixes) {
withCounters(p);
withMinimums(p);
withMaximums(p);
withMeanStatistics(p);
}
return this;
}
@Override
public IOStatisticsStore build() {
return new IOStatisticsStoreImpl(counters, gauges, minimums,

View File

@ -497,12 +497,7 @@ public final class HttpServer2 implements FilterContainer {
prefix -> this.conf.get(prefix + "type")
.equals(PseudoAuthenticationHandler.TYPE))
) {
server.initSpnego(
conf,
hostName,
getFilterProperties(conf, authFilterConfigurationPrefixes),
usernameConfKey,
keytabConfKey);
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
}
for (URI ep : endpoints) {
@ -1345,12 +1340,8 @@ public final class HttpServer2 implements FilterContainer {
}
private void initSpnego(Configuration conf, String hostName,
Properties authFilterConfigurationPrefixes, String usernameConfKey, String keytabConfKey)
throws IOException {
String usernameConfKey, String keytabConfKey) throws IOException {
Map<String, String> params = new HashMap<>();
for (Map.Entry<Object, Object> entry : authFilterConfigurationPrefixes.entrySet()) {
params.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue()));
}
String principalInConf = conf.get(usernameConfKey);
if (principalInConf != null && !principalInConf.isEmpty()) {
params.put("kerberos.principal", SecurityUtil.getServerPrincipal(

View File

@ -2006,7 +2006,7 @@ public class SequenceFile {
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
.opt(FS_OPTION_OPENFILE_BUFFER_SIZE, bufferSize);
if (length >= 0) {
builder.optLong(FS_OPTION_OPENFILE_LENGTH, length);
builder.opt(FS_OPTION_OPENFILE_LENGTH, length);
}
return awaitFuture(builder.build());
}

View File

@ -78,11 +78,6 @@ public final class CodecUtil {
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODERS_KEY =
IO_ERASURECODE_CODEC + "xor.rawcoders";
public static final String IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY =
IO_ERASURECODE_CODEC + "native.enabled";
public static final boolean IO_ERASURECODE_CODEC_NATIVE_ENABLED_DEFAULT = true;
private CodecUtil() { }
/**
@ -175,14 +170,8 @@ public final class CodecUtil {
private static RawErasureEncoder createRawEncoderWithFallback(
Configuration conf, String codecName, ErasureCoderOptions coderOptions) {
boolean nativeEncoderEnabled = conf.getBoolean(IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY,
IO_ERASURECODE_CODEC_NATIVE_ENABLED_DEFAULT);
String[] rawCoderNames = getRawCoderNames(conf, codecName);
for (String rawCoderName : rawCoderNames) {
if (!nativeEncoderEnabled && rawCoderName.contains("native")) {
LOG.debug("Disable the encoder with ISA-L.");
continue;
}
try {
if (rawCoderName != null) {
RawErasureCoderFactory fact = createRawCoderFactory(
@ -203,14 +192,8 @@ public final class CodecUtil {
private static RawErasureDecoder createRawDecoderWithFallback(
Configuration conf, String codecName, ErasureCoderOptions coderOptions) {
boolean nativeDecoderEnabled = conf.getBoolean(IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY,
IO_ERASURECODE_CODEC_NATIVE_ENABLED_DEFAULT);
String[] coders = getRawCoderNames(conf, codecName);
for (String rawCoderName : coders) {
if (!nativeDecoderEnabled && rawCoderName.contains("native")) {
LOG.debug("Disable the decoder with ISA-L.");
continue;
}
try {
if (rawCoderName != null) {
RawErasureCoderFactory fact = createRawCoderFactory(

View File

@ -50,7 +50,7 @@ public final class CallerContext {
public static final String CLIENT_ID_STR = "clientId";
public static final String CLIENT_CALL_ID_STR = "clientCallId";
public static final String REAL_USER_STR = "realUser";
public static final String PROXY_USER_PORT = "proxyUserPort";
/** The caller context.
*
* It will be truncated if it exceeds the maximum allowed length in

View File

@ -590,8 +590,9 @@ public class Client implements AutoCloseable {
InetSocketAddress currentAddr = NetUtils.createSocketAddrForHost(
server.getHostName(), server.getPort());
if (!currentAddr.isUnresolved() && !server.equals(currentAddr)) {
LOG.warn("Address change detected. Old: {} New: {}", server, currentAddr);
if (!server.equals(currentAddr)) {
LOG.warn("Address change detected. Old: " + server.toString() +
" New: " + currentAddr.toString());
server = currentAddr;
// Update the remote address so that reconnections are with the updated address.
// This avoids thrashing.

View File

@ -29,19 +29,5 @@ import org.apache.hadoop.security.UserGroupInformation;
public interface Schedulable {
public UserGroupInformation getUserGroupInformation();
/**
* This is overridden only in {@link Server.Call}.
* The CallerContext field will be used to carry information
* about the user in cases where UGI proves insufficient.
* Any other classes that might try to use this method,
* will get an UnsupportedOperationException.
*
* @return an instance of CallerContext if method
* is overridden else get an UnsupportedOperationException
*/
default CallerContext getCallerContext() {
throw new UnsupportedOperationException("Invalid operation.");
}
int getPriorityLevel();
}

View File

@ -627,11 +627,8 @@ public abstract class Server {
details.get(Timing.PROCESSING, rpcMetrics.getMetricsTimeUnit());
long waitTime =
details.get(Timing.LOCKWAIT, rpcMetrics.getMetricsTimeUnit());
long responseTime =
details.get(Timing.RESPONSE, rpcMetrics.getMetricsTimeUnit());
rpcMetrics.addRpcLockWaitTime(waitTime);
rpcMetrics.addRpcProcessingTime(processingTime);
rpcMetrics.addRpcResponseTime(responseTime);
// don't include lock wait for detailed metrics.
processingTime -= waitTime;
String name = call.getDetailedMetricsName();
@ -1089,11 +1086,6 @@ public abstract class Server {
return getRemoteUser();
}
@Override
public CallerContext getCallerContext() {
return this.callerContext;
}
@Override
public int getPriorityLevel() {
return this.priorityLevel;

View File

@ -75,8 +75,6 @@ public class RpcMetrics {
new MutableQuantiles[intervals.length];
rpcProcessingTimeQuantiles =
new MutableQuantiles[intervals.length];
rpcResponseTimeQuantiles =
new MutableQuantiles[intervals.length];
deferredRpcProcessingTimeQuantiles =
new MutableQuantiles[intervals.length];
for (int i = 0; i < intervals.length; i++) {
@ -92,10 +90,6 @@ public class RpcMetrics {
"rpcProcessingTime" + interval + "s",
"rpc processing time in " + metricsTimeUnit, "ops",
"latency", interval);
rpcResponseTimeQuantiles[i] = registry.newQuantiles(
"rpcResponseTime" + interval + "s",
"rpc response time in " + metricsTimeUnit, "ops",
"latency", interval);
deferredRpcProcessingTimeQuantiles[i] = registry.newQuantiles(
"deferredRpcProcessingTime" + interval + "s",
"deferred rpc processing time in " + metricsTimeUnit, "ops",
@ -120,8 +114,6 @@ public class RpcMetrics {
MutableQuantiles[] rpcLockWaitTimeQuantiles;
@Metric("Processing time") MutableRate rpcProcessingTime;
MutableQuantiles[] rpcProcessingTimeQuantiles;
@Metric("Response time") MutableRate rpcResponseTime;
MutableQuantiles[] rpcResponseTimeQuantiles;
@Metric("Deferred Processing time") MutableRate deferredRpcProcessingTime;
MutableQuantiles[] deferredRpcProcessingTimeQuantiles;
@Metric("Number of authentication failures")
@ -290,15 +282,6 @@ public class RpcMetrics {
}
}
public void addRpcResponseTime(long responseTime) {
rpcResponseTime.add(responseTime);
if (rpcQuantileEnable) {
for (MutableQuantiles q : rpcResponseTimeQuantiles) {
q.add(responseTime);
}
}
}
public void addDeferredRpcProcessingTime(long processingTime) {
deferredRpcProcessingTime.add(processingTime);
if (rpcQuantileEnable) {

View File

@ -0,0 +1,263 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.MappingJsonFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.ObjectReader;
import com.fasterxml.jackson.databind.node.ContainerNode;
import org.apache.log4j.Layout;
import org.apache.log4j.helpers.ISO8601DateFormat;
import org.apache.log4j.spi.LoggingEvent;
import org.apache.log4j.spi.ThrowableInformation;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.text.DateFormat;
import java.util.Date;
/**
* This offers a log layout for JSON, with some test entry points. It's purpose is
* to allow Log4J to generate events that are easy for other programs to parse, but which are somewhat
* human-readable.
*
* Some features.
*
* <ol>
* <li>Every event is a standalone JSON clause</li>
* <li>Time is published as a time_t event since 1/1/1970
* -this is the fastest to generate.</li>
* <li>An ISO date is generated, but this is cached and will only be accurate to within a second</li>
* <li>the stack trace is included as an array</li>
* </ol>
*
* A simple log event will resemble the following
* <pre>
* {"name":"test","time":1318429136789,"date":"2011-10-12 15:18:56,789","level":"INFO","thread":"main","message":"test message"}
* </pre>
*
* An event with an error will contain data similar to that below (which has been reformatted to be multi-line).
*
* <pre>
* {
* "name":"testException",
* "time":1318429136789,
* "date":"2011-10-12 15:18:56,789",
* "level":"INFO",
* "thread":"quoted\"",
* "message":"new line\n and {}",
* "exceptionclass":"java.net.NoRouteToHostException",
* "stack":[
* "java.net.NoRouteToHostException: that box caught fire 3 years ago",
* "\tat org.apache.hadoop.log.TestLog4Json.testException(TestLog4Json.java:49)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
* "\tat junit.framework.TestCase.runTest(TestCase.java:168)",
* "\tat junit.framework.TestCase.runBare(TestCase.java:134)",
* "\tat junit.framework.TestResult$1.protect(TestResult.java:110)",
* "\tat junit.framework.TestResult.runProtected(TestResult.java:128)",
* "\tat junit.framework.TestResult.run(TestResult.java:113)",
* "\tat junit.framework.TestCase.run(TestCase.java:124)",
* "\tat junit.framework.TestSuite.runTest(TestSuite.java:232)",
* "\tat junit.framework.TestSuite.run(TestSuite.java:227)",
* "\tat org.junit.internal.runners.JUnit38ClassRunner.run(JUnit38ClassRunner.java:83)",
* "\tat org.apache.maven.surefire.junit4.JUnit4TestSet.execute(JUnit4TestSet.java:59)",
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.executeTestSet(AbstractDirectoryTestSuite.java:120)",
* "\tat org.apache.maven.surefire.suite.AbstractDirectoryTestSuite.execute(AbstractDirectoryTestSuite.java:145)",
* "\tat org.apache.maven.surefire.Surefire.run(Surefire.java:104)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)",
* "\tat sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:39)",
* "\tat sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:25)",
* "\tat java.lang.reflect.Method.invoke(Method.java:597)",
* "\tat org.apache.maven.surefire.booter.SurefireBooter.runSuitesInProcess(SurefireBooter.java:290)",
* "\tat org.apache.maven.surefire.booter.SurefireBooter.main(SurefireBooter.java:1017)"
* ]
* }
* </pre>
*/
public class Log4Json extends Layout {
/**
* Jackson factories are thread safe when constructing parsers and generators.
* They are not thread safe in configure methods; if there is to be any
* configuration it must be done in a static initializer block.
*/
private static final JsonFactory factory = new MappingJsonFactory();
private static final ObjectReader READER = new ObjectMapper(factory).reader();
public static final String DATE = "date";
public static final String EXCEPTION_CLASS = "exceptionclass";
public static final String LEVEL = "level";
public static final String MESSAGE = "message";
public static final String NAME = "name";
public static final String STACK = "stack";
public static final String THREAD = "thread";
public static final String TIME = "time";
public static final String JSON_TYPE = "application/json";
private final DateFormat dateFormat;
public Log4Json() {
dateFormat = new ISO8601DateFormat();
}
/**
* @return the mime type of JSON
*/
@Override
public String getContentType() {
return JSON_TYPE;
}
@Override
public String format(LoggingEvent event) {
try {
return toJson(event);
} catch (IOException e) {
//this really should not happen, and rather than throw an exception
//which may hide the real problem, the log class is printed
//in JSON format. The classname is used to ensure valid JSON is
//returned without playing escaping games
return "{ \"logfailure\":\"" + e.getClass().toString() + "\"}";
}
}
/**
* Convert an event to JSON
*
* @param event the event -must not be null
* @return a string value
* @throws IOException on problems generating the JSON
*/
public String toJson(LoggingEvent event) throws IOException {
StringWriter writer = new StringWriter();
toJson(writer, event);
return writer.toString();
}
/**
* Convert an event to JSON
*
* @param writer the destination writer
* @param event the event -must not be null
* @return the writer
* @throws IOException on problems generating the JSON
*/
public Writer toJson(final Writer writer, final LoggingEvent event)
throws IOException {
ThrowableInformation ti = event.getThrowableInformation();
toJson(writer,
event.getLoggerName(),
event.getTimeStamp(),
event.getLevel().toString(),
event.getThreadName(),
event.getRenderedMessage(),
ti);
return writer;
}
/**
* Build a JSON entry from the parameters. This is public for testing.
*
* @param writer destination
* @param loggerName logger name
* @param timeStamp time_t value
* @param level level string
* @param threadName name of the thread
* @param message rendered message
* @param ti nullable thrown information
* @return the writer
* @throws IOException on any problem
*/
public Writer toJson(final Writer writer,
final String loggerName,
final long timeStamp,
final String level,
final String threadName,
final String message,
final ThrowableInformation ti) throws IOException {
JsonGenerator json = factory.createGenerator(writer);
json.writeStartObject();
json.writeStringField(NAME, loggerName);
json.writeNumberField(TIME, timeStamp);
Date date = new Date(timeStamp);
json.writeStringField(DATE, dateFormat.format(date));
json.writeStringField(LEVEL, level);
json.writeStringField(THREAD, threadName);
json.writeStringField(MESSAGE, message);
if (ti != null) {
//there is some throwable info, but if the log event has been sent over the wire,
//there may not be a throwable inside it, just a summary.
Throwable thrown = ti.getThrowable();
String eclass = (thrown != null) ?
thrown.getClass().getName()
: "";
json.writeStringField(EXCEPTION_CLASS, eclass);
String[] stackTrace = ti.getThrowableStrRep();
json.writeArrayFieldStart(STACK);
for (String row : stackTrace) {
json.writeString(row);
}
json.writeEndArray();
}
json.writeEndObject();
json.flush();
json.close();
return writer;
}
/**
* This appender does not ignore throwables
*
* @return false, always
*/
@Override
public boolean ignoresThrowable() {
return false;
}
/**
* Do nothing
*/
@Override
public void activateOptions() {
}
/**
* For use in tests
*
* @param json incoming JSON to parse
* @return a node tree
* @throws IOException on any parsing problems
*/
public static ContainerNode parse(String json) throws IOException {
JsonNode jsonNode = READER.readTree(json);
if (!(jsonNode instanceof ContainerNode)) {
throw new IOException("Wrong JSON data: " + json);
}
return (ContainerNode) jsonNode;
}
}

View File

@ -34,8 +34,6 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -46,7 +44,6 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.GenericsUtil;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@ -341,18 +338,14 @@ public class LogLevel {
out.println(MARKER
+ "Submitted Class Name: <b>" + logName + "</b><br />");
org.slf4j.Logger log = LoggerFactory.getLogger(logName);
Logger log = Logger.getLogger(logName);
out.println(MARKER
+ "Log Class: <b>" + log.getClass().getName() +"</b><br />");
if (level != null) {
out.println(MARKER + "Submitted Level: <b>" + level + "</b><br />");
}
if (GenericsUtil.isLog4jLogger(logName)) {
process(Logger.getLogger(logName), level, out);
} else {
out.println("Sorry, setting log level is only supported for log4j loggers.<br />");
}
process(log, level, out);
}
out.println(FORMS);

View File

@ -227,29 +227,6 @@ public class MetricsRegistry {
return ret;
}
/**
* Create a mutable inverse metric that estimates inverse quantiles of a stream of values
* @param name of the metric
* @param desc metric description
* @param sampleName of the metric (e.g., "Ops")
* @param valueName of the metric (e.g., "Rate")
* @param interval rollover interval of estimator in seconds
* @return a new inverse quantile estimator object
* @throws MetricsException if interval is not a positive integer
*/
public synchronized MutableQuantiles newInverseQuantiles(String name, String desc,
String sampleName, String valueName, int interval) {
checkMetricName(name);
if (interval <= 0) {
throw new MetricsException("Interval should be positive. Value passed" +
" is: " + interval);
}
MutableQuantiles ret =
new MutableInverseQuantiles(name, desc, sampleName, valueName, interval);
metricsMap.put(name, ret);
return ret;
}
/**
* Create a mutable metric with stats
* @param name of the metric
@ -301,7 +278,7 @@ public class MetricsRegistry {
}
/**
* Create a mutable rate metric (for throughput measurement).
* Create a mutable rate metric (for throughput measurement)
* @param name of the metric
* @param desc description
* @param extended produce extended stat (stdev/min/max etc.) if true

View File

@ -1,93 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.lib;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.classification.VisibleForTesting;
import org.apache.hadoop.metrics2.util.Quantile;
import java.text.DecimalFormat;
import static org.apache.hadoop.metrics2.lib.Interns.info;
/**
* Watches a stream of long values, maintaining online estimates of specific
* quantiles with provably low error bounds. Inverse quantiles are meant for
* highly accurate low-percentile (e.g. 1st, 5th) metrics.
* InverseQuantiles are used for metrics where higher the value better it is.
* ( eg: data transfer rate ).
* The 1st percentile here corresponds to the 99th inverse percentile metric,
* 5th percentile to 95th and so on.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class MutableInverseQuantiles extends MutableQuantiles{
static class InversePercentile extends Quantile {
InversePercentile(double inversePercentile) {
super(inversePercentile/100, inversePercentile/1000);
}
}
@VisibleForTesting
public static final Quantile[] INVERSE_QUANTILES = {new InversePercentile(50),
new InversePercentile(25), new InversePercentile(10),
new InversePercentile(5), new InversePercentile(1)};
/**
* Instantiates a new {@link MutableInverseQuantiles} for a metric that rolls itself
* over on the specified time interval.
*
* @param name of the metric
* @param description long-form textual description of the metric
* @param sampleName type of items in the stream (e.g., "Ops")
* @param valueName type of the values
* @param intervalSecs rollover interval (in seconds) of the estimator
*/
public MutableInverseQuantiles(String name, String description, String sampleName,
String valueName, int intervalSecs) {
super(name, description, sampleName, valueName, intervalSecs);
}
/**
* Sets quantileInfo.
*
* @param ucName capitalized name of the metric
* @param uvName capitalized type of the values
* @param desc uncapitalized long-form textual description of the metric
* @param lvName uncapitalized type of the values
* @param df Number formatter for inverse percentile value
*/
void setQuantiles(String ucName, String uvName, String desc, String lvName, DecimalFormat df) {
for (int i = 0; i < INVERSE_QUANTILES.length; i++) {
double inversePercentile = 100 * (1 - INVERSE_QUANTILES[i].quantile);
String nameTemplate = ucName + df.format(inversePercentile) + "thInversePercentile" + uvName;
String descTemplate = df.format(inversePercentile) + " inverse percentile " + lvName
+ " with " + getInterval() + " second interval for " + desc;
addQuantileInfo(i, info(nameTemplate, descTemplate));
}
}
/**
* Returns the array of Inverse Quantiles declared in MutableInverseQuantiles.
*
* @return array of Inverse Quantiles
*/
public synchronized Quantile[] getQuantiles() {
return INVERSE_QUANTILES;
}
}

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.metrics2.lib;
import static org.apache.hadoop.metrics2.lib.Interns.info;
import java.text.DecimalFormat;
import java.util.Map;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@ -49,14 +48,13 @@ import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFact
public class MutableQuantiles extends MutableMetric {
@VisibleForTesting
public static final Quantile[] QUANTILES = {new Quantile(0.50, 0.050),
public static final Quantile[] quantiles = { new Quantile(0.50, 0.050),
new Quantile(0.75, 0.025), new Quantile(0.90, 0.010),
new Quantile(0.95, 0.005), new Quantile(0.99, 0.001)};
new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
private MetricsInfo numInfo;
private MetricsInfo[] quantileInfos;
private int intervalSecs;
private static DecimalFormat decimalFormat = new DecimalFormat("###.####");
private final MetricsInfo numInfo;
private final MetricsInfo[] quantileInfos;
private final int interval;
private QuantileEstimator estimator;
private long previousCount = 0;
@ -93,49 +91,35 @@ public class MutableQuantiles extends MutableMetric {
String lsName = StringUtils.uncapitalize(sampleName);
String lvName = StringUtils.uncapitalize(valueName);
setInterval(interval);
setNumInfo(info(ucName + "Num" + usName, String.format(
"Number of %s for %s with %ds interval", lsName, desc, interval)));
numInfo = info(ucName + "Num" + usName, String.format(
"Number of %s for %s with %ds interval", lsName, desc, interval));
// Construct the MetricsInfos for the quantiles, converting to percentiles
quantileInfos = new MetricsInfo[quantiles.length];
String nameTemplate = ucName + "%dthPercentile" + uvName;
String descTemplate = "%d percentile " + lvName + " with " + interval
+ " second interval for " + desc;
for (int i = 0; i < quantiles.length; i++) {
int percentile = (int) (100 * quantiles[i].quantile);
quantileInfos[i] = info(String.format(nameTemplate, percentile),
String.format(descTemplate, percentile));
}
estimator = new SampleQuantiles(quantiles);
this.interval = interval;
scheduledTask = scheduler.scheduleWithFixedDelay(new RolloverSample(this),
interval, interval, TimeUnit.SECONDS);
// Construct the MetricsInfos for the quantiles, converting to percentiles
Quantile[] quantilesArray = getQuantiles();
setQuantileInfos(quantilesArray.length);
setQuantiles(ucName, uvName, desc, lvName, decimalFormat);
setEstimator(new SampleQuantiles(quantilesArray));
}
/**
* Sets quantileInfo.
*
* @param ucName capitalized name of the metric
* @param uvName capitalized type of the values
* @param desc uncapitalized long-form textual description of the metric
* @param lvName uncapitalized type of the values
* @param pDecimalFormat Number formatter for percentile value
*/
void setQuantiles(String ucName, String uvName, String desc, String lvName, DecimalFormat pDecimalFormat) {
for (int i = 0; i < QUANTILES.length; i++) {
double percentile = 100 * QUANTILES[i].quantile;
String nameTemplate = ucName + pDecimalFormat.format(percentile) + "thPercentile" + uvName;
String descTemplate = pDecimalFormat.format(percentile) + " percentile " + lvName
+ " with " + getInterval() + " second interval for " + desc;
addQuantileInfo(i, info(nameTemplate, descTemplate));
}
}
public MutableQuantiles() {}
@Override
public synchronized void snapshot(MetricsRecordBuilder builder, boolean all) {
Quantile[] quantilesArray = getQuantiles();
if (all || changed()) {
builder.addGauge(numInfo, previousCount);
for (int i = 0; i < quantilesArray.length; i++) {
for (int i = 0; i < quantiles.length; i++) {
long newValue = 0;
// If snapshot is null, we failed to update since the window was empty
if (previousSnapshot != null) {
newValue = previousSnapshot.get(quantilesArray[i]);
newValue = previousSnapshot.get(quantiles[i]);
}
builder.addGauge(quantileInfos[i], newValue);
}
@ -149,59 +133,8 @@ public class MutableQuantiles extends MutableMetric {
estimator.insert(value);
}
/**
* Returns the array of Quantiles declared in MutableQuantiles.
*
* @return array of Quantiles
*/
public synchronized Quantile[] getQuantiles() {
return QUANTILES;
}
/**
* Set info about the metrics.
*
* @param pNumInfo info about the metrics.
*/
public synchronized void setNumInfo(MetricsInfo pNumInfo) {
this.numInfo = pNumInfo;
}
/**
* Initialize quantileInfos array.
*
* @param length of the quantileInfos array.
*/
public synchronized void setQuantileInfos(int length) {
this.quantileInfos = new MetricsInfo[length];
}
/**
* Add entry to quantileInfos array.
*
* @param i array index.
* @param info info to be added to quantileInfos array.
*/
public synchronized void addQuantileInfo(int i, MetricsInfo info) {
this.quantileInfos[i] = info;
}
/**
* Set the rollover interval (in seconds) of the estimator.
*
* @param pIntervalSecs of the estimator.
*/
public synchronized void setInterval(int pIntervalSecs) {
this.intervalSecs = pIntervalSecs;
}
/**
* Get the rollover interval (in seconds) of the estimator.
*
* @return intervalSecs of the estimator.
*/
public synchronized int getInterval() {
return intervalSecs;
public int getInterval() {
return interval;
}
public void stop() {

View File

@ -136,7 +136,6 @@ public class Token<T extends TokenIdentifier> implements Writable {
while (tokenIdentifiers.hasNext()) {
try {
TokenIdentifier id = tokenIdentifiers.next();
LOG.debug("Added {}:{} into tokenKindMap", id.getKind(), id.getClass());
tokenKindMap.put(id.getKind(), id.getClass());
} catch (ServiceConfigurationError | LinkageError e) {
// failure to load a token implementation

View File

@ -33,6 +33,7 @@ import org.slf4j.LoggerFactory;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.audit.CommonAuditContext;
@ -361,28 +362,29 @@ public class ServiceLauncher<S extends Service>
/**
* Override point: create an options instance to combine with the
* standard options set.
* <i>Important. Synchronize uses of {@link Option}</i>
* with {@code Option.class}
* <i>Important. Synchronize uses of {@link OptionBuilder}</i>
* with {@code OptionBuilder.class}
* @return the new options
*/
@SuppressWarnings("static-access")
protected Options createOptions() {
synchronized (Option.class) {
synchronized (OptionBuilder.class) {
Options options = new Options();
Option oconf = Option.builder(ARG_CONF_SHORT).argName("configuration file")
Option oconf = OptionBuilder.withArgName("configuration file")
.hasArg()
.desc("specify an application configuration file")
.longOpt(ARG_CONF)
.build();
Option confclass = Option.builder(ARG_CONFCLASS_SHORT).argName("configuration classname")
.withDescription("specify an application configuration file")
.withLongOpt(ARG_CONF)
.create(ARG_CONF_SHORT);
Option confclass = OptionBuilder.withArgName("configuration classname")
.hasArg()
.desc("Classname of a Hadoop Configuration subclass to load")
.longOpt(ARG_CONFCLASS)
.build();
Option property = Option.builder("D").argName("property=value")
.withDescription(
"Classname of a Hadoop Configuration subclass to load")
.withLongOpt(ARG_CONFCLASS)
.create(ARG_CONFCLASS_SHORT);
Option property = OptionBuilder.withArgName("property=value")
.hasArg()
.desc("use value for given property")
.build();
.withDescription("use value for given property")
.create('D');
options.addOption(oconf);
options.addOption(property);
options.addOption(confclass);

View File

@ -46,6 +46,7 @@ import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.MissingArgumentException;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.classification.InterfaceAudience;
@ -229,8 +230,8 @@ public final class ConfTest {
GenericOptionsParser genericParser = new GenericOptionsParser(args);
String[] remainingArgs = genericParser.getRemainingArgs();
Option conf = Option.builder("conffile").hasArg().build();
Option help = Option.builder("h").longOpt("help").hasArg().build();
Option conf = OptionBuilder.hasArg().create("conffile");
Option help = OptionBuilder.withLongOpt("help").create('h');
Options opts = new Options().addOption(conf).addOption(help);
CommandLineParser specificParser = new GnuParser();
CommandLine cmd = null;

View File

@ -32,6 +32,7 @@ import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.classification.InterfaceAudience;
@ -224,50 +225,51 @@ public class GenericOptionsParser {
/**
* @return Specify properties of each generic option.
* <i>Important</i>: as {@link Option} is not thread safe, subclasses
* must synchronize use on {@code Option.class}
* <i>Important</i>: as {@link OptionBuilder} is not thread safe, subclasses
* must synchronize use on {@code OptionBuilder.class}
* @param opts input opts.
*/
@SuppressWarnings("static-access")
protected Options buildGeneralOptions(Options opts) {
synchronized (Option.class) {
Option fs = Option.builder("fs").argName("file:///|hdfs://namenode:port")
synchronized (OptionBuilder.class) {
Option fs = OptionBuilder.withArgName("file:///|hdfs://namenode:port")
.hasArg()
.desc("specify default filesystem URL to use, "
.withDescription("specify default filesystem URL to use, "
+ "overrides 'fs.defaultFS' property from configurations.")
.build();
Option jt = Option.builder("jt").argName("local|resourcemanager:port")
.create("fs");
Option jt = OptionBuilder.withArgName("local|resourcemanager:port")
.hasArg()
.desc("specify a ResourceManager")
.build();
Option oconf = Option.builder("conf").argName("configuration file")
.withDescription("specify a ResourceManager")
.create("jt");
Option oconf = OptionBuilder.withArgName("configuration file")
.hasArg()
.desc("specify an application configuration file")
.build();
Option property = Option.builder("D").argName("property=value")
.withDescription("specify an application configuration file")
.create("conf");
Option property = OptionBuilder.withArgName("property=value")
.hasArg()
.desc("use value for given property")
.build();
Option libjars = Option.builder("libjars").argName("paths")
.withDescription("use value for given property")
.create('D');
Option libjars = OptionBuilder.withArgName("paths")
.hasArg()
.desc("comma separated jar files to include in the classpath.")
.build();
Option files = Option.builder("files").argName("paths")
.withDescription(
"comma separated jar files to include in the classpath.")
.create("libjars");
Option files = OptionBuilder.withArgName("paths")
.hasArg()
.desc("comma separated files to be copied to the " +
.withDescription("comma separated files to be copied to the " +
"map reduce cluster")
.build();
Option archives = Option.builder("archives").argName("paths")
.create("files");
Option archives = OptionBuilder.withArgName("paths")
.hasArg()
.desc("comma separated archives to be unarchived" +
.withDescription("comma separated archives to be unarchived" +
" on the compute machines.")
.build();
.create("archives");
// file with security tokens
Option tokensFile = Option.builder("tokenCacheFile").argName("tokensFile")
Option tokensFile = OptionBuilder.withArgName("tokensFile")
.hasArg()
.desc("name of the file with the tokens")
.build();
.withDescription("name of the file with the tokens")
.create("tokenCacheFile");
opts.addOption(fs);

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.util;
import java.lang.reflect.Array;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -34,14 +33,6 @@ import org.slf4j.LoggerFactory;
@InterfaceStability.Unstable
public class GenericsUtil {
private static final String SLF4J_LOG4J_ADAPTER_CLASS = "org.slf4j.impl.Log4jLoggerAdapter";
/**
* Set to false only if log4j adapter class is not found in the classpath. Once set to false,
* the utility method should not bother re-loading class again.
*/
private static final AtomicBoolean IS_LOG4J_LOGGER = new AtomicBoolean(true);
/**
* Returns the Class object (of type <code>Class&lt;T&gt;</code>) of the
* argument of type <code>T</code>.
@ -96,27 +87,12 @@ public class GenericsUtil {
if (clazz == null) {
return false;
}
return isLog4jLogger(clazz.getName());
}
/**
* Determine whether the log of the given logger is of Log4J implementation.
*
* @param logger the logger name, usually class name as string.
* @return true if the logger uses Log4J implementation.
*/
public static boolean isLog4jLogger(String logger) {
if (logger == null || !IS_LOG4J_LOGGER.get()) {
return false;
}
Logger log = LoggerFactory.getLogger(logger);
Logger log = LoggerFactory.getLogger(clazz);
try {
Class<?> log4jClass = Class.forName(SLF4J_LOG4J_ADAPTER_CLASS);
Class log4jClass = Class.forName("org.slf4j.impl.Log4jLoggerAdapter");
return log4jClass.isInstance(log);
} catch (ClassNotFoundException e) {
IS_LOG4J_LOGGER.set(false);
return false;
}
}
}

View File

@ -23,7 +23,6 @@ import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import org.apache.commons.lang3.StringUtils;
import org.apache.curator.framework.AuthInfo;
import org.apache.curator.framework.CuratorFramework;
import org.apache.curator.framework.CuratorFrameworkFactory;
@ -40,17 +39,13 @@ import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.Watcher;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.client.ZKClientConfig;
import org.apache.zookeeper.common.ClientX509Util;
import org.apache.zookeeper.data.ACL;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.util.Preconditions;
import javax.naming.ConfigurationException;
/**
* Helper class that provides utility methods specific to ZK operations.
*/
@ -127,7 +122,7 @@ public final class ZKCuratorManager {
* Start the connection to the ZooKeeper ensemble.
* @throws IOException If the connection cannot be started.
*/
public void start() throws IOException{
public void start() throws IOException {
this.start(new ArrayList<>());
}
@ -137,20 +132,6 @@ public final class ZKCuratorManager {
* @throws IOException If the connection cannot be started.
*/
public void start(List<AuthInfo> authInfos) throws IOException {
this.start(authInfos, false);
}
/**
* Start the connection to the ZooKeeper ensemble.
*
* @param authInfos List of authentication keys.
* @param sslEnabled If the connection should be SSL/TLS encrypted.
* @throws IOException If the connection cannot be started.
*/
public void start(List<AuthInfo> authInfos, boolean sslEnabled)
throws IOException{
ZKClientConfig zkClientConfig = new ZKClientConfig();
// Connect to the ZooKeeper ensemble
String zkHostPort = conf.get(CommonConfigurationKeys.ZK_ADDRESS);
@ -158,8 +139,6 @@ public final class ZKCuratorManager {
throw new IOException(
CommonConfigurationKeys.ZK_ADDRESS + " is not configured.");
}
LOG.debug("Configured {} as {}", CommonConfigurationKeys.ZK_ADDRESS, zkHostPort);
int numRetries = conf.getInt(CommonConfigurationKeys.ZK_NUM_RETRIES,
CommonConfigurationKeys.ZK_NUM_RETRIES_DEFAULT);
int zkSessionTimeout = conf.getInt(CommonConfigurationKeys.ZK_TIMEOUT_MS,
@ -177,49 +156,21 @@ public final class ZKCuratorManager {
for (ZKUtil.ZKAuthInfo zkAuth : zkAuths) {
authInfos.add(new AuthInfo(zkAuth.getScheme(), zkAuth.getAuth()));
}
if (sslEnabled) {
validateSslConfiguration(conf);
}
CuratorFramework client = CuratorFrameworkFactory.builder().connectString(zkHostPort)
.zookeeperFactory(
new HadoopZookeeperFactory(conf.get(CommonConfigurationKeys.ZK_SERVER_PRINCIPAL),
conf.get(CommonConfigurationKeys.ZK_KERBEROS_PRINCIPAL),
conf.get(CommonConfigurationKeys.ZK_KERBEROS_KEYTAB), sslEnabled,
new TruststoreKeystore(conf))).zkClientConfig(zkClientConfig)
.sessionTimeoutMs(zkSessionTimeout).retryPolicy(retryPolicy)
.authorization(authInfos).build();
CuratorFramework client = CuratorFrameworkFactory.builder()
.connectString(zkHostPort)
.zookeeperFactory(new HadoopZookeeperFactory(
conf.get(CommonConfigurationKeys.ZK_SERVER_PRINCIPAL),
conf.get(CommonConfigurationKeys.ZK_KERBEROS_PRINCIPAL),
conf.get(CommonConfigurationKeys.ZK_KERBEROS_KEYTAB)))
.sessionTimeoutMs(zkSessionTimeout)
.retryPolicy(retryPolicy)
.authorization(authInfos)
.build();
client.start();
this.curator = client;
}
/* Check on SSL/TLS client connection requirements to emit the name of the
configuration missing. It improves supportability. */
private void validateSslConfiguration(Configuration config) throws IOException {
if (StringUtils.isEmpty(config.get(CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION))) {
throw new IOException(
"The SSL encryption is enabled for the component's ZooKeeper client connection, "
+ "however the " + CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION + " " +
"parameter is empty.");
}
if (StringUtils.isEmpty(config.get(CommonConfigurationKeys.ZK_SSL_KEYSTORE_PASSWORD))) {
throw new IOException(
"The SSL encryption is enabled for the component's " + "ZooKeeper client connection, "
+ "however the " + CommonConfigurationKeys.ZK_SSL_KEYSTORE_PASSWORD + " " +
"parameter is empty.");
}
if (StringUtils.isEmpty(config.get(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION))) {
throw new IOException(
"The SSL encryption is enabled for the component's ZooKeeper client connection, "
+ "however the " + CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION + " " +
"parameter is empty.");
}
if (StringUtils.isEmpty(config.get(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_PASSWORD))) {
throw new IOException(
"The SSL encryption is enabled for the component's ZooKeeper client connection, "
+ "however the " + CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_PASSWORD + " " +
"parameter is empty.");
}
}
/**
* Get ACLs for a ZNode.
@ -463,14 +414,14 @@ public final class ZKCuratorManager {
throws Exception {
this.fencingNodePath = fencingNodePath;
curatorOperations.add(curator.transactionOp().create()
.withMode(CreateMode.PERSISTENT)
.withACL(fencingACL)
.forPath(fencingNodePath, new byte[0]));
.withMode(CreateMode.PERSISTENT)
.withACL(fencingACL)
.forPath(fencingNodePath, new byte[0]));
}
public void commit() throws Exception {
curatorOperations.add(curator.transactionOp().delete()
.forPath(fencingNodePath));
.forPath(fencingNodePath));
curator.transaction().forOperations(curatorOperations);
curatorOperations.clear();
}
@ -478,21 +429,21 @@ public final class ZKCuratorManager {
public void create(String path, byte[] data, List<ACL> acl, CreateMode mode)
throws Exception {
curatorOperations.add(curator.transactionOp().create()
.withMode(mode)
.withACL(acl)
.forPath(path, data));
.withMode(mode)
.withACL(acl)
.forPath(path, data));
}
public void delete(String path) throws Exception {
curatorOperations.add(curator.transactionOp().delete()
.forPath(path));
.forPath(path));
}
public void setData(String path, byte[] data, int version)
throws Exception {
curatorOperations.add(curator.transactionOp().setData()
.withVersion(version)
.forPath(path, data));
.withVersion(version)
.forPath(path, data));
}
}
@ -501,53 +452,21 @@ public final class ZKCuratorManager {
private final String zkPrincipal;
private final String kerberosPrincipal;
private final String kerberosKeytab;
private final Boolean sslEnabled;
private final TruststoreKeystore truststoreKeystore;
/**
* Constructor for the helper class to configure the ZooKeeper client connection.
* @param zkPrincipal Optional.
*/
public HadoopZookeeperFactory(String zkPrincipal) {
this(zkPrincipal, null, null);
}
/**
* Constructor for the helper class to configure the ZooKeeper client connection.
* @param zkPrincipal Optional.
* @param kerberosPrincipal Optional. Use along with kerberosKeytab.
* @param kerberosKeytab Optional. Use along with kerberosPrincipal.
*/
public HadoopZookeeperFactory(String zkPrincipal, String kerberosPrincipal,
String kerberosKeytab) {
this(zkPrincipal, kerberosPrincipal, kerberosKeytab, false,
new TruststoreKeystore(new Configuration()));
}
/**
* Constructor for the helper class to configure the ZooKeeper client connection.
*
* @param zkPrincipal Optional.
* @param kerberosPrincipal Optional. Use along with kerberosKeytab.
* @param kerberosKeytab Optional. Use along with kerberosPrincipal.
* @param sslEnabled Flag to enable SSL/TLS ZK client connection for each component
* independently.
* @param truststoreKeystore TruststoreKeystore object containing the keystoreLocation,
* keystorePassword, truststoreLocation, truststorePassword for
* SSL/TLS connection when sslEnabled is set to true.
*/
public HadoopZookeeperFactory(String zkPrincipal, String kerberosPrincipal,
String kerberosKeytab, boolean sslEnabled, TruststoreKeystore truststoreKeystore) {
this.zkPrincipal = zkPrincipal;
this.kerberosPrincipal = kerberosPrincipal;
this.kerberosKeytab = kerberosKeytab;
this.sslEnabled = sslEnabled;
this.truststoreKeystore = truststoreKeystore;
}
@Override
public ZooKeeper newZooKeeper(String connectString, int sessionTimeout,
Watcher watcher, boolean canBeReadOnly
Watcher watcher, boolean canBeReadOnly
) throws Exception {
ZKClientConfig zkClientConfig = new ZKClientConfig();
if (zkPrincipal != null) {
@ -559,65 +478,10 @@ public final class ZKCuratorManager {
if (zkClientConfig.isSaslClientEnabled() && !isJaasConfigurationSet(zkClientConfig)) {
setJaasConfiguration(zkClientConfig);
}
if (sslEnabled) {
setSslConfiguration(zkClientConfig);
}
return new ZooKeeper(connectString, sessionTimeout, watcher,
canBeReadOnly, zkClientConfig);
}
/**
* Configure ZooKeeper Client with SSL/TLS connection.
* @param zkClientConfig ZooKeeper Client configuration
*/
private void setSslConfiguration(ZKClientConfig zkClientConfig) throws ConfigurationException {
this.setSslConfiguration(zkClientConfig, new ClientX509Util());
}
private void setSslConfiguration(ZKClientConfig zkClientConfig, ClientX509Util x509Util)
throws ConfigurationException {
validateSslConfiguration();
LOG.info("Configuring the ZooKeeper client to use SSL/TLS encryption for connecting to the "
+ "ZooKeeper server.");
LOG.debug("Configuring the ZooKeeper client with {} location: {}.",
this.truststoreKeystore.keystoreLocation,
CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION);
LOG.debug("Configuring the ZooKeeper client with {} location: {}.",
this.truststoreKeystore.truststoreLocation,
CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION);
zkClientConfig.setProperty(ZKClientConfig.SECURE_CLIENT, "true");
zkClientConfig.setProperty(ZKClientConfig.ZOOKEEPER_CLIENT_CNXN_SOCKET,
"org.apache.zookeeper.ClientCnxnSocketNetty");
zkClientConfig.setProperty(x509Util.getSslKeystoreLocationProperty(),
this.truststoreKeystore.keystoreLocation);
zkClientConfig.setProperty(x509Util.getSslKeystorePasswdProperty(),
this.truststoreKeystore.keystorePassword);
zkClientConfig.setProperty(x509Util.getSslTruststoreLocationProperty(),
this.truststoreKeystore.truststoreLocation);
zkClientConfig.setProperty(x509Util.getSslTruststorePasswdProperty(),
this.truststoreKeystore.truststorePassword);
}
private void validateSslConfiguration() throws ConfigurationException {
if (StringUtils.isEmpty(this.truststoreKeystore.keystoreLocation)) {
throw new ConfigurationException(
"The keystore location parameter is empty for the ZooKeeper client connection.");
}
if (StringUtils.isEmpty(this.truststoreKeystore.keystorePassword)) {
throw new ConfigurationException(
"The keystore password parameter is empty for the ZooKeeper client connection.");
}
if (StringUtils.isEmpty(this.truststoreKeystore.truststoreLocation)) {
throw new ConfigurationException(
"The truststore location parameter is empty for the ZooKeeper client connection.");
}
if (StringUtils.isEmpty(this.truststoreKeystore.truststorePassword)) {
throw new ConfigurationException(
"The truststore password parameter is empty for the ZooKeeper client connection.");
}
}
private boolean isJaasConfigurationSet(ZKClientConfig zkClientConfig) {
String clientConfig = zkClientConfig.getProperty(ZKClientConfig.LOGIN_CONTEXT_NAME_KEY,
ZKClientConfig.LOGIN_CONTEXT_NAME_KEY_DEFAULT);
@ -639,44 +503,4 @@ public final class ZKCuratorManager {
zkClientConfig.setProperty(ZKClientConfig.LOGIN_CONTEXT_NAME_KEY, JAAS_CLIENT_ENTRY);
}
}
/**
* Helper class to contain the Truststore/Keystore paths for the ZK client connection over
* SSL/TLS.
*/
public static class TruststoreKeystore {
private final String keystoreLocation;
private final String keystorePassword;
private final String truststoreLocation;
private final String truststorePassword;
/**
* Configuration for the ZooKeeper connection when SSL/TLS is enabled.
* When a value is not configured, ensure that empty string is set instead of null.
*
* @param conf ZooKeeper Client configuration
*/
public TruststoreKeystore(Configuration conf) {
keystoreLocation = conf.get(CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION, "");
keystorePassword = conf.get(CommonConfigurationKeys.ZK_SSL_KEYSTORE_PASSWORD, "");
truststoreLocation = conf.get(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION, "");
truststorePassword = conf.get(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_PASSWORD, "");
}
public String getKeystoreLocation() {
return keystoreLocation;
}
public String getKeystorePassword() {
return keystorePassword;
}
public String getTruststoreLocation() {
return truststoreLocation;
}
public String getTruststorePassword() {
return truststorePassword;
}
}
}

View File

@ -191,37 +191,6 @@ public final class RemoteIterators {
return new CloseRemoteIterator<>(iterator, toClose);
}
/**
* Wrap an iterator with one which adds a continuation probe.
* This allows work to exit fast without complicated breakout logic
* @param iterator source
* @param continueWork predicate which will trigger a fast halt if it returns false.
* @param <S> source type.
* @return a new iterator
*/
public static <S> RemoteIterator<S> haltableRemoteIterator(
final RemoteIterator<S> iterator,
final CallableRaisingIOE<Boolean> continueWork) {
return new HaltableRemoteIterator<>(iterator, continueWork);
}
/**
* A remote iterator which simply counts up, stopping once the
* value is greater than the value of {@code excludedFinish}.
* This is primarily for tests or when submitting work into a TaskPool.
* equivalent to
* <pre>
* for(long l = start, l &lt; excludedFinish; l++) yield l;
* </pre>
* @param start start value
* @param excludedFinish excluded finish
* @return an iterator which returns longs from [start, finish)
*/
public static RemoteIterator<Long> rangeExcludingIterator(
final long start, final long excludedFinish) {
return new RangeExcludingLongIterator(start, excludedFinish);
}
/**
* Build a list from a RemoteIterator.
* @param source source iterator
@ -422,12 +391,10 @@ public final class RemoteIterators {
/**
* Wrapper of another remote iterator; IOStatistics
* and Closeable methods are passed down if implemented.
* This class may be subclassed within the hadoop codebase
* if custom iterators are needed.
* @param <S> source type
* @param <T> type of returned value
*/
public static abstract class WrappingRemoteIterator<S, T>
private static abstract class WrappingRemoteIterator<S, T>
implements RemoteIterator<T>, IOStatisticsSource, Closeable {
/**
@ -748,93 +715,4 @@ public final class RemoteIterators {
}
}
}
/**
* An iterator which allows for a fast exit predicate.
* @param <S> source type
*/
private static final class HaltableRemoteIterator<S>
extends WrappingRemoteIterator<S, S> {
/**
* Probe as to whether work should continue.
*/
private final CallableRaisingIOE<Boolean> continueWork;
/**
* Wrap an iterator with one which adds a continuation probe.
* The probe will be called in the {@link #hasNext()} method, before
* the source iterator is itself checked and in {@link #next()}
* before retrieval.
* That is: it may be called multiple times per iteration.
* @param source source iterator.
* @param continueWork predicate which will trigger a fast halt if it returns false.
*/
private HaltableRemoteIterator(
final RemoteIterator<S> source,
final CallableRaisingIOE<Boolean> continueWork) {
super(source);
this.continueWork = continueWork;
}
@Override
public boolean hasNext() throws IOException {
return sourceHasNext();
}
@Override
public S next() throws IOException {
return sourceNext();
}
@Override
protected boolean sourceHasNext() throws IOException {
return continueWork.apply() && super.sourceHasNext();
}
}
/**
* A remote iterator which simply counts up, stopping once the
* value is greater than the finish.
* This is primarily for tests or when submitting work into a TaskPool.
*/
private static final class RangeExcludingLongIterator implements RemoteIterator<Long> {
/**
* Current value.
*/
private long current;
/**
* End value.
*/
private final long excludedFinish;
/**
* Construct.
* @param start start value.
* @param excludedFinish halt the iterator once the current value is equal
* to or greater than this.
*/
private RangeExcludingLongIterator(final long start, final long excludedFinish) {
this.current = start;
this.excludedFinish = excludedFinish;
}
@Override
public boolean hasNext() throws IOException {
return current < excludedFinish;
}
@Override
public Long next() throws IOException {
if (!hasNext()) {
throw new NoSuchElementException();
}
final long s = current;
current++;
return s;
}
}
}

View File

@ -920,16 +920,6 @@
</description>
</property>
<property>
<name>io.erasurecode.codec.native.enabled</name>
<value>true</value>
<description>
Used to decide whether to enable native codec. If set to false, native codec
would not be created and ISA-L support would be disabled. Recommend to set to
false when your CPU does not support ISA-L.
</description>
</property>
<!-- file system properties -->
<property>
@ -3924,35 +3914,6 @@ The switch to turn S3A auditing on or off.
the ZK CLI).
</description>
</property>
<property>
<name>hadoop.zk.ssl.keystore.location</name>
<description>
Keystore location for ZooKeeper client connection over SSL.
</description>
</property>
<property>
<name>hadoop.zk.ssl.keystore.password</name>
<description>
Keystore password for ZooKeeper client connection over SSL.
</description>
</property>
<property>
<name>hadoop.zk.ssl.truststore.location</name>
<decription>
Truststore location for ZooKeeper client connection over SSL.
</decription>
</property>
<property>
<name>hadoop.zk.ssl.truststore.password</name>
<description>
Truststore password for ZooKeeper client connection over SSL.
</description>
</property>
<property>
<name>hadoop.system.tags</name>
<value>YARN,HDFS,NAMENODE,DATANODE,REQUIRED,SECURITY,KERBEROS,PERFORMANCE,CLIENT

View File

@ -592,19 +592,17 @@ StateStoreMetrics
-----------------
StateStoreMetrics shows the statistics of the State Store component in Router-based federation.
| Name | Description |
|:------------------------------------------|:-----------------------------------------------------------------------------------|
| `ReadsNumOps` | Number of GET transactions for State Store within an interval time of metric |
| `ReadsAvgTime` | Average time of GET transactions for State Store in milliseconds |
| `WritesNumOps` | Number of PUT transactions for State Store within an interval time of metric |
| `WritesAvgTime` | Average time of PUT transactions for State Store in milliseconds |
| `RemovesNumOps` | Number of REMOVE transactions for State Store within an interval time of metric |
| `RemovesAvgTime` | Average time of REMOVE transactions for State Store in milliseconds |
| `FailuresNumOps` | Number of failed transactions for State Store within an interval time of metric |
| `FailuresAvgTime` | Average time of failed transactions for State Store in milliseconds |
| `Cache`*BaseRecord*`Size` | Number of store records to cache in State Store |
| `Cache`*BaseRecord*`LoadNumOps` | Number of times store records are loaded in the State Store Cache from State Store |
| `Cache`*BaseRecord*`LoadAvgTime` | Average time of loading State Store Cache from State Store in milliseconds |
| Name | Description |
|:---- |:---- |
| `ReadsNumOps` | Number of GET transactions for State Store within an interval time of metric |
| `ReadsAvgTime` | Average time of GET transactions for State Store in milliseconds |
| `WritesNumOps` | Number of PUT transactions for State Store within an interval time of metric |
| `WritesAvgTime` | Average time of PUT transactions for State Store in milliseconds |
| `RemovesNumOps` | Number of REMOVE transactions for State Store within an interval time of metric |
| `RemovesAvgTime` | Average time of REMOVE transactions for State Store in milliseconds |
| `FailuresNumOps` | Number of failed transactions for State Store within an interval time of metric |
| `FailuresAvgTime` | Average time of failed transactions for State Store in milliseconds |
| `Cache`*BaseRecord*`Size` | Number of store records to cache in State Store |
yarn context
============

View File

@ -157,7 +157,8 @@ The following instructions are to run a MapReduce job locally. If you want to ex
4. Make the HDFS directories required to execute MapReduce jobs:
$ bin/hdfs dfs -mkdir -p /user/<username>
$ bin/hdfs dfs -mkdir /user
$ bin/hdfs dfs -mkdir /user/<username>
5. Copy the input files into the distributed filesystem:

View File

@ -25,55 +25,6 @@ references to `FSDataInputStream` and its subclasses.
It is used to initate a (potentially asynchronous) operation to open an existing
file for reading.
## <a name="History"></a> History
### Hadoop 3.3.0: API introduced
[HADOOP-15229](https://issues.apache.org/jira/browse/HADOOP-15229)
_Add FileSystem builder-based openFile() API to match createFile()_
* No `opt(String key, long value)` method was available.
* the `withFileStatus(status)` call required a non-null parameter.
* Sole Filesystem to process options and file status was S3A;
* Only the s3a specific options were the S3 select and `fs.s3a.experimental.input.fadvise`
* S3A Filesystem raised `IllegalArgumentException` if a file status was passed in
and the path of the filestatus did not match the path of the `openFile(path)` call.
This is the baseline implementation. To write code guaranteed to compile against this version,
use the `opt(String, String)` and `must(String, String)` methods, converting numbers to
string explicitly.
```java
fs.open("s3a://bucket/file")
.opt("fs.option.openfile.length", Long.toString(length))
.build().get()
```
### Hadoop 3.3.5: standardization and expansion
[HADOOP-16202](https://issues.apache.org/jira/browse/HADOOP-16202)
_Enhance openFile() for better read performance against object stores_
* `withFileStatus(null)` required to be accepted (and ignored)
* only the filename part of any supplied FileStatus path must match the
filename passed in on `openFile(path)`.
* An `opt(String key, long value)` option was added. *This is now deprecated as it
caused regression
* Standard `fs.option.openfile` options defined.
* S3A FS to use openfile length option, seek start/end options not _yet_ used.
* Azure ABFS connector takes a supplied `VersionedFileStatus` and omits any
HEAD probe for the object.
### Hadoop 3.3.6: API change to address operator overload bugs.
new `optLong()`, `optDouble()`, `mustLong()` and `mustDouble()` builder methods.
* See [HADOOP-18724](https://issues.apache.org/jira/browse/HADOOP-18724) _Open file fails with NumberFormatException for S3AFileSystem_,
which was somehow caused by the overloaded `opt(long)`.
* Specification updated to declare that unparseable numbers MUST be treated as "unset" and the default
value used instead.
## Invariants
The `FutureDataInputStreamBuilder` interface does not require parameters or
@ -85,7 +36,7 @@ Some aspects of the state of the filesystem, MAY be checked in the initial
change between `openFile()` and the `build().get()` sequence. For example,
path validation.
## <a name="parameters"></a> `Implementation-agnostic parameters.
## Implementation-agnostic parameters.
### <a name="Builder.bufferSize"></a> `FutureDataInputStreamBuilder bufferSize(int bufSize)`
@ -138,20 +89,10 @@ operations. This is to support wrapper filesystems and serialization/deserializa
of the status.
### <a name="optional"></a> Set optional or mandatory parameters
### Set optional or mandatory parameters
```java
FutureDataInputStreamBuilder opt(String key, String value)
FutureDataInputStreamBuilder opt(String key, int value)
FutureDataInputStreamBuilder opt(String key, boolean value)
FutureDataInputStreamBuilder optLong(String key, long value)
FutureDataInputStreamBuilder optDouble(String key, double value)
FutureDataInputStreamBuilder must(String key, String value)
FutureDataInputStreamBuilder must(String key, int value)
FutureDataInputStreamBuilder must(String key, boolean value)
FutureDataInputStreamBuilder mustLong(String key, long value)
FutureDataInputStreamBuilder mustDouble(String key, double value)
```
FutureDataInputStreamBuilder opt(String key, ...)
FutureDataInputStreamBuilder must(String key, ...)
Set optional or mandatory parameters to the builder. Using `opt()` or `must()`,
client can specify FS-specific parameters without inspecting the concrete type
@ -162,7 +103,7 @@ Example:
```java
out = fs.openFile(path)
.must("fs.option.openfile.read.policy", "random")
.optLong("fs.http.connection.timeout", 30_000L)
.opt("fs.http.connection.timeout", 30_000L)
.withFileStatus(statusFromListing)
.build()
.get();
@ -174,9 +115,9 @@ An http-specific option has been supplied which may be interpreted by any store;
If the filesystem opening the file does not recognize the option, it can safely be
ignored.
### <a name="usage"></a> When to use `opt` versus `must`
### When to use `opt()` versus `must()`
The difference between `opt` versus `must` is how the FileSystem opening
The difference between `opt()` versus `must()` is how the FileSystem opening
the file must react to an option which it does not recognize.
```python
@ -203,7 +144,7 @@ irrespective of how the (key, value) pair was declared.
defined in this filesystem specification, validated through contract
tests.
## <a name="implementation"></a> Implementation Notes
#### Implementation Notes
Checking for supported options must be performed in the `build()` operation.
@ -214,13 +155,6 @@ Checking for supported options must be performed in the `build()` operation.
a feature which is recognized but not supported in the specific
`FileSystem`/`FileContext` instance `UnsupportedException` MUST be thrown.
Parsing of numeric values SHOULD trim any string and if the value
cannot be parsed as a number, downgrade to any default value supplied.
This is to address [HADOOP-18724](https://issues.apache.org/jira/browse/HADOOP-18724)
_Open file fails with NumberFormatException for S3AFileSystem_, which was cause by the overloaded `opt()`
builder parameter binding to `opt(String, double)` rather than `opt(String, long)` when a long
value was passed in.
The behavior of resolving the conflicts between the parameters set by
builder methods (i.e., `bufferSize()`) and `opt()`/`must()` is as follows:
@ -247,7 +181,7 @@ Even if not values of the status are used, the presence of the argument
can be interpreted as the caller declaring that they believe the file
to be present and of the given size.
## <a name="builder"></a> Builder interface
## Builder interface
### <a name="build"></a> `CompletableFuture<FSDataInputStream> build()`
@ -405,7 +339,7 @@ _Futher reading_
* [Linux fadvise()](https://linux.die.net/man/2/fadvise).
* [Windows `CreateFile()`](https://docs.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilea#caching-behavior)
#### <a name="read.policy.adaptive"></a> Read Policy `adaptive`
#### <a name="read.policy."></a> Read Policy `adaptive`
Try to adapt the seek policy to the read pattern of the application.
@ -495,7 +429,7 @@ If this option is used by the FileSystem implementation
*Implementor's Notes*
* A value of `fs.option.openfile.length` &lt; 0 MUST be ignored.
* A value of `fs.option.openfile.length` &lt; 0 MUST be rejected.
* If a file status is supplied along with a value in `fs.opt.openfile.length`;
the file status values take precedence.
@ -532,11 +466,11 @@ than that value.
The S3A Connector supports custom options for readahead and seek policy.
| Name | Type | Meaning |
|--------------------------------------|----------|---------------------------------------------------------------------------|
| `fs.s3a.readahead.range` | `long` | readahead range in bytes |
| `fs.s3a.experimental.input.fadvise` | `String` | seek policy. Superceded by `fs.option.openfile.read.policy` |
| `fs.s3a.input.async.drain.threshold` | `long` | threshold to switch to asynchronous draining of the stream. (Since 3.3.5) |
| Name | Type | Meaning |
|--------------------------------------|----------|-------------------------------------------------------------|
| `fs.s3a.readahead.range` | `long` | readahead range in bytes |
| `fs.s3a.input.async.drain.threshold` | `long` | threshold to switch to asynchronous draining of the stream |
| `fs.s3a.experimental.input.fadvise` | `String` | seek policy. Superceded by `fs.option.openfile.read.policy` |
If the option set contains a SQL statement in the `fs.s3a.select.sql` statement,
then the file is opened as an S3 Select query.
@ -576,8 +510,8 @@ protected SeekableInputStream newStream(Path path, FileStatus stat,
.opt("fs.option.openfile.read.policy", "vector, random")
.withFileStatus(stat);
builder.optLong("fs.option.openfile.split.start", splitStart);
builder.optLong("fs.option.openfile.split.end", splitEnd);
builder.opt("fs.option.openfile.split.start", splitStart);
builder.opt("fs.option.openfile.split.end", splitEnd);
CompletableFuture<FSDataInputStream> streamF = builder.build();
return HadoopStreams.wrap(FutureIO.awaitFuture(streamF));
}
@ -684,8 +618,8 @@ An example of a record reader passing in options to the file it opens.
file.getFileSystem(job).openFile(file);
// the start and end of the split may be used to build
// an input strategy.
builder.optLong("fs.option.openfile.split.start", start);
builder.optLong("fs.option.openfile.split.end", end);
builder.opt("fs.option.openfile.split.start", start);
builder.opt("fs.option.openfile.split.end", end);
FutureIO.propagateOptions(builder, job,
"mapreduce.job.input.file.option",
"mapreduce.job.input.file.must");
@ -699,7 +633,7 @@ An example of a record reader passing in options to the file it opens.
### `FileContext.openFile`
From `org.apache.hadoop.fs.AvroFSInput`; a file is opened with sequential input.
Because the file length has already been probed for, the length is passed down
Because the file length has already been probed for, the length is passd down
```java
public AvroFSInput(FileContext fc, Path p) throws IOException {
@ -708,7 +642,7 @@ Because the file length has already been probed for, the length is passed down
this.stream = awaitFuture(fc.openFile(p)
.opt("fs.option.openfile.read.policy",
"sequential")
.optLong("fs.option.openfile.length",
.opt("fs.option.openfile.length",
Long.toString(status.getLen()))
.build());
fc.open(p);
@ -748,3 +682,8 @@ public T load(FileSystem fs,
}
}
```
*Note:* : in Hadoop 3.3.2 and earlier, the `withFileStatus(status)` call
required a non-null parameter; this has since been relaxed.
For maximum compatibility across versions, only invoke the method
when the file status is known to be non-null.

View File

@ -41,6 +41,4 @@ HDFS as these are commonly expected by Hadoop client applications.
2. [Extending the specification and its tests](extending.html)
1. [Uploading a file using Multiple Parts](multipartuploader.html)
1. [IOStatistics](iostatistics.html)
1. [openFile()](openfile.html)
1. [SafeMode](safemode.html)
1. [LeaseRecoverable](leaserecoverable.html)
1. [openFile()](openfile.html).

View File

@ -1,52 +0,0 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
# <a name="LeaseRecoverable"></a> interface `LeaseRecoverable`
The `LeaseRecoverable` interface tells whether a given path of current filesystem can perform lease
recovery for open file that a lease is not explicitly renewed or the client holding it goes away.
This interface should be implemented accordingly when necessary to any Filesystem that supports
lease recovery, e.g. `DistributedFileSystem` (HDFS) and `ViewDistributedFileSystem`.
```java
public interface LeaseRecoverable {
boolean recoverLease(Path file) throws IOException;
boolean isFileClosed(Path file) throws IOException;
}
```
There are two main functions of this interface, one performs lease recovery and another one
verifies if a file has been closed.
### boolean recoverLease(Path file)
This function performs the lease recovery for the given file path, and it does not support
directory path recovery.
1. Return `true`, if the file has already closed, or does not require lease recovery.
1. Return `false`, if the lease recovery is yet completed.
1. Throw `IOException` if a directory path is given as input.
### boolean isFileClosed(Path file)
This function only checks if the give file path has been closed, and it does not support directory
verification.
1. Return `true`, if the file has been closed.
1. Return `false`, if the file is still open.
1. Throw `IOException` if a directory path is given as input.
### Path Capabilities SHOULD BE declared
If a filesystem supports `LeaseRecoverable`, it should return `true` to
`PathCapabilities.hasPathCapability(path, "fs.capability.lease.recoverable")` for a given path.

View File

@ -31,7 +31,7 @@ There are a number of goals here:
having to invoke them.
1. Allow filesystems with their own optional per-instance features to declare
whether or not they are active for the specific instance.
1. Allow for filesystem connectors which work with object stores to expose the
1. Allow for fileystem connectors which work with object stores to expose the
fundamental difference in semantics of these stores (e.g: files not visible
until closed, file rename being `O(data)`), directory rename being non-atomic,
etc.
@ -122,7 +122,7 @@ will be permitted on that path by the caller.
*Duration of availability*
As the state of a remote store changes,so may path capabilities. This
may be due to changes in the local state of the filesystem (e.g. symbolic links
may be due to changes in the local state of the fileystem (e.g. symbolic links
or mount points changing), or changes in its functionality (e.g. a feature
becoming availaible/unavailable due to operational changes, system upgrades, etc.)

View File

@ -1,45 +0,0 @@
<!---
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
# <a name="SafeMode"></a> interface `SafeMode`
The `SafeMode` interface provides a way to perform safe mode actions and obtain the
status after such actions performed to the `FileSystem`.
This is admin only interface, should be implemented accordingly when necessary to
Filesystem that support safe mode, e.g. `DistributedFileSystem` (HDFS) and
`ViewDistributedFileSystem`.
```java
public interface SafeMode {
default boolean setSafeMode(SafeModeAction action) throws IOException {
return setSafeMode(action, false);
}
boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException;
}
```
The goals of this interface is allow any file system implementation to share the
same concept of safe mode with the following actions and states
### Safe mode actions
1. `GET`, get the safe mode status of the file system.
1. `ENTER`, enter the safe mode for the file system.
1. `LEAVE`, exit safe mode for the file system gracefully.
1. `FORCE_EXIT`, exit safe mode for the file system even if there is any ongoing data process.
### Safe mode states
1. return true, when safe mode is on.
1. return false, when safe mode is off, usually it's the result of safe mode actions
with `GET`, `LEAVE`, `FORCE_EXIT`.

View File

@ -1,359 +0,0 @@
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop Changelog
## Release 3.3.5 - 2023-03-14
### IMPORTANT ISSUES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-17956](https://issues.apache.org/jira/browse/HADOOP-17956) | Replace all default Charset usage with UTF-8 | Major | . | Viraj Jasani | Viraj Jasani |
| [HADOOP-18621](https://issues.apache.org/jira/browse/HADOOP-18621) | CryptoOutputStream::close leak when encrypted zones + quota exceptions | Critical | fs | Colm Dougan | Colm Dougan |
### NEW FEATURES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-18003](https://issues.apache.org/jira/browse/HADOOP-18003) | Add a method appendIfAbsent for CallerContext | Minor | . | Tao Li | Tao Li |
| [HDFS-16331](https://issues.apache.org/jira/browse/HDFS-16331) | Make dfs.blockreport.intervalMsec reconfigurable | Major | . | Tao Li | Tao Li |
| [HDFS-16371](https://issues.apache.org/jira/browse/HDFS-16371) | Exclude slow disks when choosing volume | Major | . | Tao Li | Tao Li |
| [HDFS-16400](https://issues.apache.org/jira/browse/HDFS-16400) | Reconfig DataXceiver parameters for datanode | Major | . | Tao Li | Tao Li |
| [HDFS-16399](https://issues.apache.org/jira/browse/HDFS-16399) | Reconfig cache report parameters for datanode | Major | . | Tao Li | Tao Li |
| [HDFS-16398](https://issues.apache.org/jira/browse/HDFS-16398) | Reconfig block report parameters for datanode | Major | . | Tao Li | Tao Li |
| [HDFS-16396](https://issues.apache.org/jira/browse/HDFS-16396) | Reconfig slow peer parameters for datanode | Major | . | Tao Li | Tao Li |
| [HDFS-16397](https://issues.apache.org/jira/browse/HDFS-16397) | Reconfig slow disk parameters for datanode | Major | . | Tao Li | Tao Li |
| [MAPREDUCE-7341](https://issues.apache.org/jira/browse/MAPREDUCE-7341) | Add a task-manifest output committer for Azure and GCS | Major | client | Steve Loughran | Steve Loughran |
| [HADOOP-18163](https://issues.apache.org/jira/browse/HADOOP-18163) | hadoop-azure support for the Manifest Committer of MAPREDUCE-7341 | Major | fs/azure | Steve Loughran | Steve Loughran |
| [HDFS-16413](https://issues.apache.org/jira/browse/HDFS-16413) | Reconfig dfs usage parameters for datanode | Major | . | Tao Li | Tao Li |
| [HDFS-16521](https://issues.apache.org/jira/browse/HDFS-16521) | DFS API to retrieve slow datanodes | Major | . | Viraj Jasani | Viraj Jasani |
| [HDFS-16568](https://issues.apache.org/jira/browse/HDFS-16568) | dfsadmin -reconfig option to start/query reconfig on all live datanodes | Major | . | Viraj Jasani | Viraj Jasani |
| [HDFS-16582](https://issues.apache.org/jira/browse/HDFS-16582) | Expose aggregate latency of slow node as perceived by the reporting node | Major | . | Viraj Jasani | Viraj Jasani |
| [HDFS-16595](https://issues.apache.org/jira/browse/HDFS-16595) | Slow peer metrics - add median, mad and upper latency limits | Major | . | Viraj Jasani | Viraj Jasani |
| [YARN-11241](https://issues.apache.org/jira/browse/YARN-11241) | Add uncleaning option for local app log file with log-aggregation enabled | Major | log-aggregation | Ashutosh Gupta | Ashutosh Gupta |
| [HADOOP-18103](https://issues.apache.org/jira/browse/HADOOP-18103) | High performance vectored read API in Hadoop | Major | common, fs, fs/adl, fs/s3 | Mukund Thakur | Mukund Thakur |
### IMPROVEMENTS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-17276](https://issues.apache.org/jira/browse/HADOOP-17276) | Extend CallerContext to make it include many items | Major | . | Hui Fei | Hui Fei |
| [HDFS-15745](https://issues.apache.org/jira/browse/HDFS-15745) | Make DataNodePeerMetrics#LOW\_THRESHOLD\_MS and MIN\_OUTLIER\_DETECTION\_NODES configurable | Major | . | Haibin Huang | Haibin Huang |
| [HDFS-16266](https://issues.apache.org/jira/browse/HDFS-16266) | Add remote port information to HDFS audit log | Major | . | Tao Li | Tao Li |
| [YARN-10997](https://issues.apache.org/jira/browse/YARN-10997) | Revisit allocation and reservation logging | Major | . | Andras Gyori | Andras Gyori |
| [HDFS-16310](https://issues.apache.org/jira/browse/HDFS-16310) | RBF: Add client port to CallerContext for Router | Major | . | Tao Li | Tao Li |
| [HDFS-16352](https://issues.apache.org/jira/browse/HDFS-16352) | return the real datanode numBlocks in #getDatanodeStorageReport | Major | . | qinyuren | qinyuren |
| [HDFS-16426](https://issues.apache.org/jira/browse/HDFS-16426) | fix nextBlockReportTime when trigger full block report force | Major | . | qinyuren | qinyuren |
| [HDFS-16430](https://issues.apache.org/jira/browse/HDFS-16430) | Validate maximum blocks in EC group when adding an EC policy | Minor | ec, erasure-coding | daimin | daimin |
| [HDFS-16403](https://issues.apache.org/jira/browse/HDFS-16403) | Improve FUSE IO performance by supporting FUSE parameter max\_background | Minor | fuse-dfs | daimin | daimin |
| [HDFS-16262](https://issues.apache.org/jira/browse/HDFS-16262) | Async refresh of cached locations in DFSInputStream | Major | . | Bryan Beaudreault | Bryan Beaudreault |
| [HADOOP-18093](https://issues.apache.org/jira/browse/HADOOP-18093) | Better exception handling for testFileStatusOnMountLink() in ViewFsBaseTest.java | Trivial | . | Xing Lin | Xing Lin |
| [HDFS-16423](https://issues.apache.org/jira/browse/HDFS-16423) | balancer should not get blocks on stale storages | Major | balancer & mover | qinyuren | qinyuren |
| [HADOOP-18139](https://issues.apache.org/jira/browse/HADOOP-18139) | Allow configuration of zookeeper server principal | Major | auth | Owen O'Malley | Owen O'Malley |
| [YARN-11076](https://issues.apache.org/jira/browse/YARN-11076) | Upgrade jQuery version in Yarn UI2 | Major | yarn-ui-v2 | Tamas Domok | Tamas Domok |
| [HDFS-16495](https://issues.apache.org/jira/browse/HDFS-16495) | RBF should prepend the client ip rather than append it. | Major | . | Owen O'Malley | Owen O'Malley |
| [HADOOP-18144](https://issues.apache.org/jira/browse/HADOOP-18144) | getTrashRoot/s in ViewFileSystem should return viewFS path, not targetFS path | Major | common | Xing Lin | Xing Lin |
| [HADOOP-18162](https://issues.apache.org/jira/browse/HADOOP-18162) | hadoop-common enhancements for the Manifest Committer of MAPREDUCE-7341 | Major | fs | Steve Loughran | Steve Loughran |
| [HDFS-16529](https://issues.apache.org/jira/browse/HDFS-16529) | Remove unnecessary setObserverRead in TestConsistentReadsObserver | Trivial | test | Zhaohui Wang | Zhaohui Wang |
| [HDFS-16530](https://issues.apache.org/jira/browse/HDFS-16530) | setReplication debug log creates a new string even if debug is disabled | Major | namenode | Stephen O'Donnell | Stephen O'Donnell |
| [HDFS-16457](https://issues.apache.org/jira/browse/HDFS-16457) | Make fs.getspaceused.classname reconfigurable | Major | namenode | yanbin.zhang | yanbin.zhang |
| [HDFS-16427](https://issues.apache.org/jira/browse/HDFS-16427) | Add debug log for BlockManager#chooseExcessRedundancyStriped | Minor | erasure-coding | Tao Li | Tao Li |
| [HDFS-16497](https://issues.apache.org/jira/browse/HDFS-16497) | EC: Add param comment for liveBusyBlockIndices with HDFS-14768 | Minor | erasure-coding, namanode | caozhiqiang | caozhiqiang |
| [HDFS-16389](https://issues.apache.org/jira/browse/HDFS-16389) | Improve NNThroughputBenchmark test mkdirs | Major | benchmarks, namenode | JiangHua Zhu | JiangHua Zhu |
| [HADOOP-17551](https://issues.apache.org/jira/browse/HADOOP-17551) | Upgrade maven-site-plugin to 3.11.0 | Major | . | Akira Ajisaka | Ashutosh Gupta |
| [HDFS-16519](https://issues.apache.org/jira/browse/HDFS-16519) | Add throttler to EC reconstruction | Minor | datanode, ec | daimin | daimin |
| [HDFS-14478](https://issues.apache.org/jira/browse/HDFS-14478) | Add libhdfs APIs for openFile | Major | hdfs-client, libhdfs, native | Sahil Takiar | Sahil Takiar |
| [HADOOP-16202](https://issues.apache.org/jira/browse/HADOOP-16202) | Enhance openFile() for better read performance against object stores | Major | fs, fs/s3, tools/distcp | Steve Loughran | Steve Loughran |
| [YARN-11116](https://issues.apache.org/jira/browse/YARN-11116) | Migrate Times util from SimpleDateFormat to thread-safe DateTimeFormatter class | Minor | . | Jonathan Turner Eagles | Jonathan Turner Eagles |
| [HDFS-16520](https://issues.apache.org/jira/browse/HDFS-16520) | Improve EC pread: avoid potential reading whole block | Major | dfsclient, ec, erasure-coding | daimin | daimin |
| [HADOOP-18167](https://issues.apache.org/jira/browse/HADOOP-18167) | Add metrics to track delegation token secret manager operations | Major | . | Hector Sandoval Chaverri | Hector Sandoval Chaverri |
| [YARN-10080](https://issues.apache.org/jira/browse/YARN-10080) | Support show app id on localizer thread pool | Major | nodemanager | zhoukang | Ashutosh Gupta |
| [HADOOP-18172](https://issues.apache.org/jira/browse/HADOOP-18172) | Change scope of getRootFallbackLink for InodeTree to make them accessible from outside package | Minor | . | Xing Lin | Xing Lin |
| [HDFS-16588](https://issues.apache.org/jira/browse/HDFS-16588) | Backport HDFS-16584 to branch-3.3. | Major | balancer & mover, namenode | JiangHua Zhu | JiangHua Zhu |
| [HADOOP-18240](https://issues.apache.org/jira/browse/HADOOP-18240) | Upgrade Yetus to 0.14.0 | Major | build | Akira Ajisaka | Ashutosh Gupta |
| [HDFS-16585](https://issues.apache.org/jira/browse/HDFS-16585) | Add @VisibleForTesting in Dispatcher.java after HDFS-16268 | Trivial | . | Wei-Chiu Chuang | Ashutosh Gupta |
| [HADOOP-18244](https://issues.apache.org/jira/browse/HADOOP-18244) | Fix Hadoop-Common JavaDoc Error on branch-3.3 | Major | common | Shilun Fan | Shilun Fan |
| [HADOOP-18269](https://issues.apache.org/jira/browse/HADOOP-18269) | Misleading method name in DistCpOptions | Minor | tools/distcp | guophilipse | guophilipse |
| [HADOOP-18275](https://issues.apache.org/jira/browse/HADOOP-18275) | update os-maven-plugin to 1.7.0 | Minor | build | Steve Loughran | Steve Loughran |
| [HDFS-16610](https://issues.apache.org/jira/browse/HDFS-16610) | Make fsck read timeout configurable | Major | hdfs-client | Stephen O'Donnell | Stephen O'Donnell |
| [HDFS-16576](https://issues.apache.org/jira/browse/HDFS-16576) | Remove unused imports in HDFS project | Minor | . | Ashutosh Gupta | Ashutosh Gupta |
| [HDFS-16629](https://issues.apache.org/jira/browse/HDFS-16629) | [JDK 11] Fix javadoc warnings in hadoop-hdfs module | Minor | hdfs | Shilun Fan | Shilun Fan |
| [YARN-11172](https://issues.apache.org/jira/browse/YARN-11172) | Fix testDelegationToken | Major | test | zhengchenyu | zhengchenyu |
| [HADOOP-17833](https://issues.apache.org/jira/browse/HADOOP-17833) | Improve Magic Committer Performance | Minor | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18288](https://issues.apache.org/jira/browse/HADOOP-18288) | Total requests and total requests per sec served by RPC servers | Major | . | Viraj Jasani | Viraj Jasani |
| [HADOOP-18336](https://issues.apache.org/jira/browse/HADOOP-18336) | tag FSDataInputStream.getWrappedStream() @Public/@Stable | Minor | fs | Steve Loughran | Ashutosh Gupta |
| [HADOOP-13144](https://issues.apache.org/jira/browse/HADOOP-13144) | Enhancing IPC client throughput via multiple connections per user | Minor | ipc | Jason Kace | Íñigo Goiri |
| [HDFS-16712](https://issues.apache.org/jira/browse/HDFS-16712) | Fix incorrect placeholder in DataNode.java | Major | . | ZanderXu | ZanderXu |
| [HDFS-16702](https://issues.apache.org/jira/browse/HDFS-16702) | MiniDFSCluster should report cause of exception in assertion error | Minor | hdfs | Steve Vaughan | Steve Vaughan |
| [HADOOP-18365](https://issues.apache.org/jira/browse/HADOOP-18365) | Updated addresses are still accessed using the old IP address | Major | common | Steve Vaughan | Steve Vaughan |
| [HDFS-16687](https://issues.apache.org/jira/browse/HDFS-16687) | RouterFsckServlet replicates code from DfsServlet base class | Major | federation | Steve Vaughan | Steve Vaughan |
| [HADOOP-18333](https://issues.apache.org/jira/browse/HADOOP-18333) | hadoop-client-runtime impact by CVE-2022-2047 CVE-2022-2048 due to shaded jetty | Major | build | phoebe chen | Ashutosh Gupta |
| [HADOOP-18406](https://issues.apache.org/jira/browse/HADOOP-18406) | Adds alignment context to call path for creating RPC proxy with multiple connections per user. | Major | ipc | Simbarashe Dzinamarira | Simbarashe Dzinamarira |
| [HDFS-16684](https://issues.apache.org/jira/browse/HDFS-16684) | Exclude self from JournalNodeSyncer when using a bind host | Major | journal-node | Steve Vaughan | Steve Vaughan |
| [HDFS-16686](https://issues.apache.org/jira/browse/HDFS-16686) | GetJournalEditServlet fails to authorize valid Kerberos request | Major | journal-node | Steve Vaughan | Steve Vaughan |
| [YARN-11303](https://issues.apache.org/jira/browse/YARN-11303) | Upgrade jquery ui to 1.13.2 | Major | security | D M Murali Krishna Reddy | Ashutosh Gupta |
| [HADOOP-16769](https://issues.apache.org/jira/browse/HADOOP-16769) | LocalDirAllocator to provide diagnostics when file creation fails | Minor | util | Ramesh Kumar Thangarajan | Ashutosh Gupta |
| [HADOOP-18341](https://issues.apache.org/jira/browse/HADOOP-18341) | upgrade commons-configuration2 to 2.8.0 and commons-text to 1.9 | Major | . | PJ Fanning | PJ Fanning |
| [HDFS-16776](https://issues.apache.org/jira/browse/HDFS-16776) | Erasure Coding: The length of targets should be checked when DN gets a reconstruction task | Major | . | Kidd5368 | Kidd5368 |
| [HADOOP-18469](https://issues.apache.org/jira/browse/HADOOP-18469) | Add XMLUtils methods to centralise code that creates secure XML parsers | Major | . | PJ Fanning | PJ Fanning |
| [HADOOP-18442](https://issues.apache.org/jira/browse/HADOOP-18442) | Remove the hadoop-openstack module | Major | build, fs, fs/swift | Steve Loughran | Steve Loughran |
| [HADOOP-18468](https://issues.apache.org/jira/browse/HADOOP-18468) | upgrade jettison json jar due to fix CVE-2022-40149 | Major | build | PJ Fanning | PJ Fanning |
| [HADOOP-17779](https://issues.apache.org/jira/browse/HADOOP-17779) | Lock File System Creator Semaphore Uninterruptibly | Minor | fs | David Mollitor | David Mollitor |
| [HADOOP-18360](https://issues.apache.org/jira/browse/HADOOP-18360) | Update commons-csv from 1.0 to 1.9.0. | Minor | common | Shilun Fan | Shilun Fan |
| [HADOOP-18493](https://issues.apache.org/jira/browse/HADOOP-18493) | update jackson-databind 2.12.7.1 due to CVE fixes | Major | . | PJ Fanning | PJ Fanning |
| [HADOOP-17563](https://issues.apache.org/jira/browse/HADOOP-17563) | Update Bouncy Castle to 1.68 or later | Major | build | Takanobu Asanuma | PJ Fanning |
| [HADOOP-18497](https://issues.apache.org/jira/browse/HADOOP-18497) | Upgrade commons-text version to fix CVE-2022-42889 | Major | build | Xiaoqiao He | PJ Fanning |
| [HDFS-16795](https://issues.apache.org/jira/browse/HDFS-16795) | Use secure XML parser utils in hdfs classes | Major | . | PJ Fanning | PJ Fanning |
| [YARN-11330](https://issues.apache.org/jira/browse/YARN-11330) | Use secure XML parser utils in YARN | Major | . | PJ Fanning | PJ Fanning |
| [MAPREDUCE-7411](https://issues.apache.org/jira/browse/MAPREDUCE-7411) | Use secure XML parser utils in MapReduce | Major | . | PJ Fanning | PJ Fanning |
| [HADOOP-18512](https://issues.apache.org/jira/browse/HADOOP-18512) | upgrade woodstox-core to 5.4.0 for security fix | Major | common | phoebe chen | PJ Fanning |
| [YARN-11363](https://issues.apache.org/jira/browse/YARN-11363) | Remove unused TimelineVersionWatcher and TimelineVersion from hadoop-yarn-server-tests | Major | test, yarn | Ashutosh Gupta | Ashutosh Gupta |
| [YARN-11364](https://issues.apache.org/jira/browse/YARN-11364) | Docker Container to accept docker Image name with sha256 digest | Major | yarn | Ashutosh Gupta | Ashutosh Gupta |
| [HADOOP-18517](https://issues.apache.org/jira/browse/HADOOP-18517) | ABFS: Add fs.azure.enable.readahead option to disable readahead | Major | fs/azure | Steve Loughran | Steve Loughran |
| [HADOOP-18484](https://issues.apache.org/jira/browse/HADOOP-18484) | upgrade hsqldb to v2.7.1 due to CVE | Major | . | PJ Fanning | Ashutosh Gupta |
| [HDFS-16844](https://issues.apache.org/jira/browse/HDFS-16844) | [RBF] The routers should be resiliant against exceptions from StateStore | Major | rbf | Owen O'Malley | Owen O'Malley |
| [HADOOP-18573](https://issues.apache.org/jira/browse/HADOOP-18573) | Improve error reporting on non-standard kerberos names | Blocker | security | Steve Loughran | Steve Loughran |
| [HADOOP-18561](https://issues.apache.org/jira/browse/HADOOP-18561) | CVE-2021-37533 on commons-net is included in hadoop common and hadoop-client-runtime | Blocker | build | phoebe chen | Steve Loughran |
| [HADOOP-18067](https://issues.apache.org/jira/browse/HADOOP-18067) | Über-jira: S3A Hadoop 3.3.5 features | Major | fs/s3 | Steve Loughran | Mukund Thakur |
| [YARN-10444](https://issues.apache.org/jira/browse/YARN-10444) | Node Manager to use openFile() with whole-file read policy for localizing files. | Minor | nodemanager | Steve Loughran | Steve Loughran |
| [HADOOP-18661](https://issues.apache.org/jira/browse/HADOOP-18661) | Fix bin/hadoop usage script terminology | Blocker | scripts | Steve Loughran | Steve Loughran |
### BUG FIXES:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HADOOP-17116](https://issues.apache.org/jira/browse/HADOOP-17116) | Skip Retry INFO logging on first failover from a proxy | Major | ha | Hanisha Koneru | Hanisha Koneru |
| [YARN-10553](https://issues.apache.org/jira/browse/YARN-10553) | Refactor TestDistributedShell | Major | distributed-shell, test | Ahmed Hussein | Ahmed Hussein |
| [HDFS-15839](https://issues.apache.org/jira/browse/HDFS-15839) | RBF: Cannot get method setBalancerBandwidth on Router Client | Major | rbf | Yang Yun | Yang Yun |
| [HADOOP-17588](https://issues.apache.org/jira/browse/HADOOP-17588) | CryptoInputStream#close() should be synchronized | Major | . | Renukaprasad C | Renukaprasad C |
| [HADOOP-17836](https://issues.apache.org/jira/browse/HADOOP-17836) | Improve logging on ABFS error reporting | Minor | fs/azure | Steve Loughran | Steve Loughran |
| [HADOOP-17989](https://issues.apache.org/jira/browse/HADOOP-17989) | ITestAzureBlobFileSystemDelete failing "Operations has null HTTP response" | Major | fs/azure, test | Steve Loughran | Steve Loughran |
| [YARN-11055](https://issues.apache.org/jira/browse/YARN-11055) | In cgroups-operations.c some fprintf format strings don't end with "\\n" | Minor | nodemanager | Gera Shegalov | Gera Shegalov |
| [YARN-11065](https://issues.apache.org/jira/browse/YARN-11065) | Bump follow-redirects from 1.13.3 to 1.14.7 in hadoop-yarn-ui | Major | yarn-ui-v2 | Akira Ajisaka | |
| [HDFS-16303](https://issues.apache.org/jira/browse/HDFS-16303) | Losing over 100 datanodes in state decommissioning results in full blockage of all datanode decommissioning | Major | . | Kevin Wikant | Kevin Wikant |
| [HDFS-16443](https://issues.apache.org/jira/browse/HDFS-16443) | Fix edge case where DatanodeAdminDefaultMonitor doubly enqueues a DatanodeDescriptor on exception | Major | hdfs | Kevin Wikant | Kevin Wikant |
| [HDFS-16449](https://issues.apache.org/jira/browse/HDFS-16449) | Fix hadoop web site release notes and changelog not available | Minor | documentation | guophilipse | guophilipse |
| [YARN-10788](https://issues.apache.org/jira/browse/YARN-10788) | TestCsiClient fails | Major | test | Akira Ajisaka | Akira Ajisaka |
| [HADOOP-18126](https://issues.apache.org/jira/browse/HADOOP-18126) | Update junit 5 version due to build issues | Major | bulid | PJ Fanning | PJ Fanning |
| [YARN-11033](https://issues.apache.org/jira/browse/YARN-11033) | isAbsoluteResource is not correct for dynamically created queues | Minor | yarn | Tamas Domok | Tamas Domok |
| [YARN-10894](https://issues.apache.org/jira/browse/YARN-10894) | Follow up YARN-10237: fix the new test case in TestRMWebServicesCapacitySched | Major | . | Tamas Domok | Tamas Domok |
| [YARN-11022](https://issues.apache.org/jira/browse/YARN-11022) | Fix the documentation for max-parallel-apps in CS | Major | capacity scheduler | Tamas Domok | Tamas Domok |
| [HADOOP-18150](https://issues.apache.org/jira/browse/HADOOP-18150) | Fix ITestAuditManagerDisabled after S3A audit logging was enabled in HADOOP-18091 | Major | fs/s3 | Mehakmeet Singh | Mehakmeet Singh |
| [HADOOP-17976](https://issues.apache.org/jira/browse/HADOOP-17976) | abfs etag extraction inconsistent between LIST and HEAD calls | Minor | fs/azure | Steve Loughran | Steve Loughran |
| [HADOOP-18129](https://issues.apache.org/jira/browse/HADOOP-18129) | Change URI[] in INodeLink to String[] to reduce memory footprint of ViewFileSystem | Major | . | Abhishek Das | Abhishek Das |
| [HADOOP-18145](https://issues.apache.org/jira/browse/HADOOP-18145) | Fileutil's unzip method causes unzipped files to lose their original permissions | Major | common | jingxiong zhong | jingxiong zhong |
| [HDFS-16518](https://issues.apache.org/jira/browse/HDFS-16518) | KeyProviderCache close cached KeyProvider with Hadoop ShutdownHookManager | Major | hdfs | Lei Yang | Lei Yang |
| [HADOOP-18169](https://issues.apache.org/jira/browse/HADOOP-18169) | getDelegationTokens in ViewFs should also fetch the token from the fallback FS | Major | . | Xing Lin | Xing Lin |
| [HDFS-16479](https://issues.apache.org/jira/browse/HDFS-16479) | EC: NameNode should not send a reconstruction work when the source datanodes are insufficient | Critical | ec, erasure-coding | Yuanbo Liu | Takanobu Asanuma |
| [HDFS-16509](https://issues.apache.org/jira/browse/HDFS-16509) | Fix decommission UnsupportedOperationException: Remove unsupported | Major | namenode | daimin | daimin |
| [HDFS-16456](https://issues.apache.org/jira/browse/HDFS-16456) | EC: Decommission a rack with only on dn will fail when the rack number is equal with replication | Critical | ec, namenode | caozhiqiang | caozhiqiang |
| [HADOOP-18201](https://issues.apache.org/jira/browse/HADOOP-18201) | Remove base and bucket overrides for endpoint in ITestS3ARequesterPays.java | Major | fs/s3 | Mehakmeet Singh | Daniel Carl Jones |
| [HDFS-16536](https://issues.apache.org/jira/browse/HDFS-16536) | TestOfflineImageViewer fails on branch-3.3 | Major | test | Akira Ajisaka | Ashutosh Gupta |
| [HDFS-16538](https://issues.apache.org/jira/browse/HDFS-16538) | EC decoding failed due to not enough valid inputs | Major | erasure-coding | qinyuren | qinyuren |
| [HDFS-16544](https://issues.apache.org/jira/browse/HDFS-16544) | EC decoding failed due to invalid buffer | Major | erasure-coding | qinyuren | qinyuren |
| [HADOOP-17564](https://issues.apache.org/jira/browse/HADOOP-17564) | Fix typo in UnixShellGuide.html | Trivial | . | Takanobu Asanuma | Ashutosh Gupta |
| [HDFS-16552](https://issues.apache.org/jira/browse/HDFS-16552) | Fix NPE for TestBlockManager | Major | . | Tao Li | Tao Li |
| [MAPREDUCE-7246](https://issues.apache.org/jira/browse/MAPREDUCE-7246) | In MapredAppMasterRest#Mapreduce\_Application\_Master\_Info\_API, the datatype of appId should be "string". | Major | documentation | jenny | Ashutosh Gupta |
| [YARN-10187](https://issues.apache.org/jira/browse/YARN-10187) | Removing hadoop-yarn-project/hadoop-yarn/README as it is no longer maintained. | Minor | documentation | N Sanketh Reddy | Ashutosh Gupta |
| [HADOOP-16515](https://issues.apache.org/jira/browse/HADOOP-16515) | Update the link to compatibility guide | Minor | documentation | Akira Ajisaka | Ashutosh Gupta |
| [HDFS-16185](https://issues.apache.org/jira/browse/HDFS-16185) | Fix comment in LowRedundancyBlocks.java | Minor | documentation | Akira Ajisaka | Ashutosh Gupta |
| [HADOOP-17479](https://issues.apache.org/jira/browse/HADOOP-17479) | Fix the examples of hadoop config prefix | Minor | documentation | Akira Ajisaka | Ashutosh Gupta |
| [HADOOP-18222](https://issues.apache.org/jira/browse/HADOOP-18222) | Prevent DelegationTokenSecretManagerMetrics from registering multiple times | Major | . | Hector Sandoval Chaverri | Hector Sandoval Chaverri |
| [HDFS-16540](https://issues.apache.org/jira/browse/HDFS-16540) | Data locality is lost when DataNode pod restarts in kubernetes | Major | namenode | Huaxiang Sun | Huaxiang Sun |
| [YARN-11133](https://issues.apache.org/jira/browse/YARN-11133) | YarnClient gets the wrong EffectiveMinCapacity value | Major | api | Zilong Zhu | Zilong Zhu |
| [YARN-10850](https://issues.apache.org/jira/browse/YARN-10850) | TimelineService v2 lists containers for all attempts when filtering for one | Major | timelinereader | Benjamin Teke | Benjamin Teke |
| [YARN-11141](https://issues.apache.org/jira/browse/YARN-11141) | Capacity Scheduler does not support ambiguous queue names when moving application across queues | Major | capacity scheduler | András Győri | András Győri |
| [HDFS-16586](https://issues.apache.org/jira/browse/HDFS-16586) | Purge FsDatasetAsyncDiskService threadgroup; it causes BPServiceActor$CommandProcessingThread IllegalThreadStateException 'fatal exception and exit' | Major | datanode | Michael Stack | Michael Stack |
| [HADOOP-18251](https://issues.apache.org/jira/browse/HADOOP-18251) | Fix failure of extracting JIRA id from commit message in git\_jira\_fix\_version\_check.py | Minor | build | Masatake Iwasaki | Masatake Iwasaki |
| [YARN-11128](https://issues.apache.org/jira/browse/YARN-11128) | Fix comments in TestProportionalCapacityPreemptionPolicy\* | Minor | capacityscheduler, documentation | Ashutosh Gupta | Ashutosh Gupta |
| [HADOOP-18234](https://issues.apache.org/jira/browse/HADOOP-18234) | s3a access point xml examples are wrong | Minor | documentation, fs/s3 | Steve Loughran | Ashutosh Gupta |
| [HADOOP-18238](https://issues.apache.org/jira/browse/HADOOP-18238) | Fix reentrancy check in SFTPFileSystem.close() | Major | common | yi liu | Ashutosh Gupta |
| [HDFS-16583](https://issues.apache.org/jira/browse/HDFS-16583) | DatanodeAdminDefaultMonitor can get stuck in an infinite loop | Major | . | Stephen O'Donnell | Stephen O'Donnell |
| [HDFS-16608](https://issues.apache.org/jira/browse/HDFS-16608) | Fix the link in TestClientProtocolForPipelineRecovery | Minor | documentation | Samrat Deb | Samrat Deb |
| [HDFS-16563](https://issues.apache.org/jira/browse/HDFS-16563) | Namenode WebUI prints sensitive information on Token Expiry | Major | namanode, security, webhdfs | Renukaprasad C | Renukaprasad C |
| [HDFS-16623](https://issues.apache.org/jira/browse/HDFS-16623) | IllegalArgumentException in LifelineSender | Major | . | ZanderXu | ZanderXu |
| [HDFS-16064](https://issues.apache.org/jira/browse/HDFS-16064) | Determine when to invalidate corrupt replicas based on number of usable replicas | Major | datanode, namenode | Kevin Wikant | Kevin Wikant |
| [HADOOP-18255](https://issues.apache.org/jira/browse/HADOOP-18255) | fsdatainputstreambuilder.md refers to hadoop 3.3.3, when it shouldn't | Minor | documentation | Steve Loughran | Ashutosh Gupta |
| [MAPREDUCE-7387](https://issues.apache.org/jira/browse/MAPREDUCE-7387) | Fix TestJHSSecurity#testDelegationToken AssertionError due to HDFS-16563 | Major | . | Shilun Fan | Shilun Fan |
| [MAPREDUCE-7369](https://issues.apache.org/jira/browse/MAPREDUCE-7369) | MapReduce tasks timing out when spends more time on MultipleOutputs#close | Major | . | Prabhu Joseph | Ashutosh Gupta |
| [MAPREDUCE-7391](https://issues.apache.org/jira/browse/MAPREDUCE-7391) | TestLocalDistributedCacheManager failing after HADOOP-16202 | Major | test | Steve Loughran | Steve Loughran |
| [HDFS-16591](https://issues.apache.org/jira/browse/HDFS-16591) | StateStoreZooKeeper fails to initialize | Major | rbf | Hector Sandoval Chaverri | Hector Sandoval Chaverri |
| [HADOOP-18321](https://issues.apache.org/jira/browse/HADOOP-18321) | Fix when to read an additional record from a BZip2 text file split | Critical | io | Ashutosh Gupta | Ashutosh Gupta |
| [HADOOP-18100](https://issues.apache.org/jira/browse/HADOOP-18100) | Change scope of inner classes in InodeTree to make them accessible outside package | Major | . | Abhishek Das | Abhishek Das |
| [HADOOP-18217](https://issues.apache.org/jira/browse/HADOOP-18217) | shutdownhookmanager should not be multithreaded (deadlock possible) | Minor | util | Catherinot Remi | |
| [MAPREDUCE-7372](https://issues.apache.org/jira/browse/MAPREDUCE-7372) | MapReduce set permission too late in copyJar method | Major | mrv2 | Zhang Dongsheng | |
| [HADOOP-18330](https://issues.apache.org/jira/browse/HADOOP-18330) | S3AFileSystem removes Path when calling createS3Client | Minor | fs/s3 | Ashutosh Pant | Ashutosh Pant |
| [HADOOP-18390](https://issues.apache.org/jira/browse/HADOOP-18390) | Fix out of sync import for HADOOP-18321 | Minor | . | Ashutosh Gupta | Ashutosh Gupta |
| [HADOOP-18340](https://issues.apache.org/jira/browse/HADOOP-18340) | deleteOnExit does not work with S3AFileSystem | Minor | fs/s3 | Huaxiang Sun | Huaxiang Sun |
| [HADOOP-18383](https://issues.apache.org/jira/browse/HADOOP-18383) | Codecs with @DoNotPool annotation are not closed causing memory leak | Major | common | Kevin Sewell | Kevin Sewell |
| [HDFS-16729](https://issues.apache.org/jira/browse/HDFS-16729) | RBF: fix some unreasonably annotated docs | Major | documentation, rbf | JiangHua Zhu | JiangHua Zhu |
| [HADOOP-18398](https://issues.apache.org/jira/browse/HADOOP-18398) | Prevent AvroRecord\*.class from being included non-test jar | Major | common | YUBI LEE | YUBI LEE |
| [HDFS-4043](https://issues.apache.org/jira/browse/HDFS-4043) | Namenode Kerberos Login does not use proper hostname for host qualified hdfs principal name. | Major | security | Ahad Rana | Steve Vaughan |
| [MAPREDUCE-7403](https://issues.apache.org/jira/browse/MAPREDUCE-7403) | Support spark dynamic partitioning in the Manifest Committer | Major | mrv2 | Steve Loughran | Steve Loughran |
| [HDFS-16732](https://issues.apache.org/jira/browse/HDFS-16732) | [SBN READ] Avoid get location from observer when the block report is delayed. | Critical | hdfs | zhengchenyu | zhengchenyu |
| [HADOOP-18375](https://issues.apache.org/jira/browse/HADOOP-18375) | Fix failure of shelltest for hadoop\_add\_ldlibpath | Minor | test | Masatake Iwasaki | Masatake Iwasaki |
| [HDFS-16755](https://issues.apache.org/jira/browse/HDFS-16755) | TestQJMWithFaults.testUnresolvableHostName() can fail due to unexpected host resolution | Minor | test | Steve Vaughan | Steve Vaughan |
| [HADOOP-18400](https://issues.apache.org/jira/browse/HADOOP-18400) | Fix file split duplicating records from a succeeding split when reading BZip2 text files | Critical | . | Ashutosh Gupta | Ashutosh Gupta |
| [HADOOP-18242](https://issues.apache.org/jira/browse/HADOOP-18242) | ABFS Rename Failure when tracking metadata is in incomplete state | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh |
| [HADOOP-18456](https://issues.apache.org/jira/browse/HADOOP-18456) | NullPointerException in ObjectListingIterator's constructor | Blocker | fs/s3 | Quanlong Huang | Steve Loughran |
| [HADOOP-18444](https://issues.apache.org/jira/browse/HADOOP-18444) | Add Support for localized trash for ViewFileSystem in Trash.moveToAppropriateTrash | Major | . | Xing Lin | Xing Lin |
| [HADOOP-18443](https://issues.apache.org/jira/browse/HADOOP-18443) | Upgrade snakeyaml to 1.32 | Major | security | Ashutosh Gupta | Ashutosh Gupta |
| [HDFS-16766](https://issues.apache.org/jira/browse/HDFS-16766) | hdfs ec command loads (administrator provided) erasure code policy files without disabling xml entity expansion | Major | security | Jing | Ashutosh Gupta |
| [HDFS-13369](https://issues.apache.org/jira/browse/HDFS-13369) | FSCK Report broken with RequestHedgingProxyProvider | Major | hdfs | Harshakiran Reddy | Ranith Sardar |
| [YARN-11039](https://issues.apache.org/jira/browse/YARN-11039) | LogAggregationFileControllerFactory::getFileControllerForRead can leak threads | Blocker | log-aggregation | Rajesh Balamohan | Steve Loughran |
| [HADOOP-18499](https://issues.apache.org/jira/browse/HADOOP-18499) | S3A to support HTTPS web proxies | Major | fs/s3 | Mehakmeet Singh | Mehakmeet Singh |
| [HADOOP-18233](https://issues.apache.org/jira/browse/HADOOP-18233) | Possible race condition with TemporaryAWSCredentialsProvider | Major | auth, fs/s3 | Jason Sleight | Jimmy Wong |
| [MAPREDUCE-7425](https://issues.apache.org/jira/browse/MAPREDUCE-7425) | Document Fix for yarn.app.mapreduce.client-am.ipc.max-retries | Major | yarn | teng wang | teng wang |
| [HADOOP-18528](https://issues.apache.org/jira/browse/HADOOP-18528) | Disable abfs prefetching by default | Major | fs/azure | Mehakmeet Singh | Mehakmeet Singh |
| [HDFS-16836](https://issues.apache.org/jira/browse/HDFS-16836) | StandbyCheckpointer can still trigger rollback fs image after RU is finalized | Major | hdfs | Lei Yang | Lei Yang |
| [HADOOP-18324](https://issues.apache.org/jira/browse/HADOOP-18324) | Interrupting RPC Client calls can lead to thread exhaustion | Critical | ipc | Owen O'Malley | Owen O'Malley |
| [HDFS-16832](https://issues.apache.org/jira/browse/HDFS-16832) | [SBN READ] Fix NPE when check the block location of empty directory | Major | . | zhengchenyu | zhengchenyu |
| [HADOOP-18498](https://issues.apache.org/jira/browse/HADOOP-18498) | [ABFS]: Error introduced when SAS Token containing '?' prefix is passed | Minor | fs/azure | Sree Bhattacharyya | Sree Bhattacharyya |
| [HDFS-16847](https://issues.apache.org/jira/browse/HDFS-16847) | RBF: StateStore writer should not commit tmp fail if there was an error in writing the file. | Critical | hdfs, rbf | Simbarashe Dzinamarira | Simbarashe Dzinamarira |
| [HADOOP-18401](https://issues.apache.org/jira/browse/HADOOP-18401) | No ARM binaries in branch-3.3.x releases | Minor | build | Ling Xu | |
| [HADOOP-18408](https://issues.apache.org/jira/browse/HADOOP-18408) | [ABFS]: ITestAbfsManifestCommitProtocol fails on nonHNS configuration | Minor | fs/azure, test | Pranav Saxena | Sree Bhattacharyya |
| [HADOOP-18402](https://issues.apache.org/jira/browse/HADOOP-18402) | S3A committer NPE in spark job abort | Blocker | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18569](https://issues.apache.org/jira/browse/HADOOP-18569) | NFS Gateway may release buffer too early | Blocker | nfs | Attila Doroszlai | Attila Doroszlai |
| [HADOOP-18574](https://issues.apache.org/jira/browse/HADOOP-18574) | Changing log level of IOStatistics increment to make the DEBUG logs less noisy | Major | fs/s3 | Mehakmeet Singh | Mehakmeet Singh |
| [HADOOP-18521](https://issues.apache.org/jira/browse/HADOOP-18521) | ABFS ReadBufferManager buffer sharing across concurrent HTTP requests | Critical | fs/azure | Steve Loughran | Steve Loughran |
| [MAPREDUCE-7375](https://issues.apache.org/jira/browse/MAPREDUCE-7375) | JobSubmissionFiles don't set right permission after mkdirs | Major | mrv2 | Zhang Dongsheng | |
| [HADOOP-17717](https://issues.apache.org/jira/browse/HADOOP-17717) | Update wildfly openssl to 1.1.3.Final | Major | . | Wei-Chiu Chuang | Wei-Chiu Chuang |
| [HADOOP-18598](https://issues.apache.org/jira/browse/HADOOP-18598) | maven site generation doesn't include javadocs | Blocker | site | Steve Loughran | Steve Loughran |
| [HDFS-16895](https://issues.apache.org/jira/browse/HDFS-16895) | NamenodeHeartbeatService should use credentials of logged in user | Major | rbf | Hector Sandoval Chaverri | Hector Sandoval Chaverri |
| [HDFS-16853](https://issues.apache.org/jira/browse/HDFS-16853) | The UT TestLeaseRecovery2#testHardLeaseRecoveryAfterNameNodeRestart failed because HADOOP-18324 | Blocker | . | ZanderXu | ZanderXu |
| [HADOOP-18641](https://issues.apache.org/jira/browse/HADOOP-18641) | cyclonedx maven plugin breaks builds on recent maven releases (3.9.0) | Major | build | Steve Loughran | Steve Loughran |
| [HDFS-16923](https://issues.apache.org/jira/browse/HDFS-16923) | The getListing RPC will throw NPE if the path does not exist | Critical | . | ZanderXu | ZanderXu |
| [HDFS-16896](https://issues.apache.org/jira/browse/HDFS-16896) | HDFS Client hedged read has increased failure rate than without hedged read | Major | hdfs-client | Tom McCormick | Tom McCormick |
| [YARN-11383](https://issues.apache.org/jira/browse/YARN-11383) | Workflow priority mappings is case sensitive | Major | yarn | Aparajita Choudhary | Aparajita Choudhary |
| [HDFS-16939](https://issues.apache.org/jira/browse/HDFS-16939) | Fix the thread safety bug in LowRedundancyBlocks | Major | namanode | Shuyan Zhang | Shuyan Zhang |
| [HDFS-16934](https://issues.apache.org/jira/browse/HDFS-16934) | org.apache.hadoop.hdfs.tools.TestDFSAdmin#testAllDatanodesReconfig regression | Minor | dfsadmin, test | Steve Loughran | Shilun Fan |
### TESTS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HDFS-16573](https://issues.apache.org/jira/browse/HDFS-16573) | Fix test TestDFSStripedInputStreamWithRandomECPolicy | Minor | test | daimin | daimin |
| [HDFS-16637](https://issues.apache.org/jira/browse/HDFS-16637) | TestHDFSCLI#testAll consistently failing | Major | . | Viraj Jasani | Viraj Jasani |
| [YARN-11248](https://issues.apache.org/jira/browse/YARN-11248) | Add unit test for FINISHED\_CONTAINERS\_PULLED\_BY\_AM event on DECOMMISSIONING | Major | test | Ashutosh Gupta | Ashutosh Gupta |
| [HDFS-16625](https://issues.apache.org/jira/browse/HDFS-16625) | Unit tests aren't checking for PMDK availability | Major | test | Steve Vaughan | Steve Vaughan |
### SUB-TASKS:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HDFS-13293](https://issues.apache.org/jira/browse/HDFS-13293) | RBF: The RouterRPCServer should transfer client IP via CallerContext to NamenodeRpcServer | Major | rbf | Baolong Mao | Hui Fei |
| [HDFS-15630](https://issues.apache.org/jira/browse/HDFS-15630) | RBF: Fix wrong client IP info in CallerContext when requests mount points with multi-destinations. | Major | rbf | Chengwei Wang | Chengwei Wang |
| [HADOOP-17152](https://issues.apache.org/jira/browse/HADOOP-17152) | Implement wrapper for guava newArrayList and newLinkedList | Major | common | Ahmed Hussein | Viraj Jasani |
| [HADOOP-17851](https://issues.apache.org/jira/browse/HADOOP-17851) | S3A to support user-specified content encoding | Minor | fs/s3 | Holden Karau | Holden Karau |
| [HADOOP-17492](https://issues.apache.org/jira/browse/HADOOP-17492) | abfs listLocatedStatus to support incremental/async page fetching | Major | fs/azure | Steve Loughran | Steve Loughran |
| [HADOOP-17409](https://issues.apache.org/jira/browse/HADOOP-17409) | Remove S3Guard - no longer needed | Major | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18084](https://issues.apache.org/jira/browse/HADOOP-18084) | ABFS: Add testfilePath while verifying test contents are read correctly | Minor | fs/azure, test | Anmol Asrani | Anmol Asrani |
| [HDFS-16169](https://issues.apache.org/jira/browse/HDFS-16169) | Fix TestBlockTokenWithDFSStriped#testEnd2End failure | Major | test | Hui Fei | secfree |
| [HADOOP-18091](https://issues.apache.org/jira/browse/HADOOP-18091) | S3A auditing leaks memory through ThreadLocal references | Major | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18071](https://issues.apache.org/jira/browse/HADOOP-18071) | ABFS: Set driver global timeout for ITestAzureBlobFileSystemBasics | Major | fs/azure | Sumangala Patki | Sumangala Patki |
| [HADOOP-17765](https://issues.apache.org/jira/browse/HADOOP-17765) | ABFS: Use Unique File Paths in Tests | Major | fs/azure | Sumangala Patki | Sumangala Patki |
| [HADOOP-17862](https://issues.apache.org/jira/browse/HADOOP-17862) | ABFS: Fix unchecked cast compiler warning for AbfsListStatusRemoteIterator | Major | fs/azure | Sumangala Patki | Sumangala Patki |
| [HADOOP-18075](https://issues.apache.org/jira/browse/HADOOP-18075) | ABFS: Fix failure caused by listFiles() in ITestAbfsRestOperationException | Major | fs/azure | Sumangala Patki | Sumangala Patki |
| [HADOOP-18112](https://issues.apache.org/jira/browse/HADOOP-18112) | Implement paging during S3 multi object delete. | Critical | fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-16204](https://issues.apache.org/jira/browse/HADOOP-16204) | ABFS tests to include terasort | Minor | fs/azure, test | Steve Loughran | Steve Loughran |
| [HDFS-13248](https://issues.apache.org/jira/browse/HDFS-13248) | RBF: Namenode need to choose block location for the client | Major | . | Wu Weiwei | Owen O'Malley |
| [HADOOP-13704](https://issues.apache.org/jira/browse/HADOOP-13704) | S3A getContentSummary() to move to listFiles(recursive) to count children; instrument use | Minor | fs/s3 | Steve Loughran | Ahmar Suhail |
| [HADOOP-14661](https://issues.apache.org/jira/browse/HADOOP-14661) | S3A to support Requester Pays Buckets | Minor | common, util | Mandus Momberg | Daniel Carl Jones |
| [HDFS-16484](https://issues.apache.org/jira/browse/HDFS-16484) | [SPS]: Fix an infinite loop bug in SPSPathIdProcessor thread | Major | . | qinyuren | qinyuren |
| [HADOOP-17682](https://issues.apache.org/jira/browse/HADOOP-17682) | ABFS: Support FileStatus input to OpenFileWithOptions() via OpenFileParameters | Major | fs/azure | Sumangala Patki | Sumangala Patki |
| [HADOOP-15983](https://issues.apache.org/jira/browse/HADOOP-15983) | Use jersey-json that is built to use jackson2 | Major | build | Akira Ajisaka | PJ Fanning |
| [HADOOP-18104](https://issues.apache.org/jira/browse/HADOOP-18104) | Add configs to configure minSeekForVectorReads and maxReadSizeForVectorReads | Major | common, fs | Mukund Thakur | Mukund Thakur |
| [HADOOP-18168](https://issues.apache.org/jira/browse/HADOOP-18168) | ITestMarkerTool.testRunLimitedLandsatAudit failing due to most of bucket content purged | Minor | fs/s3, test | Steve Loughran | Daniel Carl Jones |
| [HADOOP-12020](https://issues.apache.org/jira/browse/HADOOP-12020) | Support configuration of different S3 storage classes | Major | fs/s3 | Yann Landrin-Schweitzer | Monthon Klongklaew |
| [HADOOP-18105](https://issues.apache.org/jira/browse/HADOOP-18105) | Implement a variant of ElasticByteBufferPool which uses weak references for garbage collection. | Major | common, fs | Mukund Thakur | Mukund Thakur |
| [HADOOP-18107](https://issues.apache.org/jira/browse/HADOOP-18107) | Vectored IO support for large S3 files. | Major | fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-18106](https://issues.apache.org/jira/browse/HADOOP-18106) | Handle memory fragmentation in S3 Vectored IO implementation. | Major | fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-17461](https://issues.apache.org/jira/browse/HADOOP-17461) | Add thread-level IOStatistics Context | Major | fs, fs/azure, fs/s3 | Steve Loughran | Mehakmeet Singh |
| [HADOOP-18372](https://issues.apache.org/jira/browse/HADOOP-18372) | ILoadTestS3ABulkDeleteThrottling failing | Minor | fs/s3, test | Steve Loughran | Ahmar Suhail |
| [HADOOP-18368](https://issues.apache.org/jira/browse/HADOOP-18368) | ITestCustomSigner fails when access point name has '-' | Minor | . | Ahmar Suhail | Ahmar Suhail |
| [HADOOP-15964](https://issues.apache.org/jira/browse/HADOOP-15964) | Add S3A support for Async Scatter/Gather IO | Major | fs/s3 | Steve Loughran | Mukund Thakur |
| [HADOOP-18366](https://issues.apache.org/jira/browse/HADOOP-18366) | ITestS3Select.testSelectSeekFullLandsat is timing out | Minor | . | Ahmar Suhail | Ahmar Suhail |
| [HADOOP-18373](https://issues.apache.org/jira/browse/HADOOP-18373) | IOStatisticsContext tuning | Minor | fs/s3, test | Steve Loughran | Viraj Jasani |
| [HADOOP-18227](https://issues.apache.org/jira/browse/HADOOP-18227) | Add input stream IOstats for vectored IO api in S3A. | Major | fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-18392](https://issues.apache.org/jira/browse/HADOOP-18392) | Propagate vectored s3a input stream stats to file system stats. | Major | fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-18355](https://issues.apache.org/jira/browse/HADOOP-18355) | Update previous index properly while validating overlapping ranges. | Major | common, fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-18371](https://issues.apache.org/jira/browse/HADOOP-18371) | s3a FS init logs at warn if fs.s3a.create.storage.class is unset | Blocker | fs/s3 | Steve Loughran | Viraj Jasani |
| [HADOOP-18385](https://issues.apache.org/jira/browse/HADOOP-18385) | ITestS3ACannedACLs failure; not in a span | Major | fs/s3, test | Steve Loughran | Ashutosh Gupta |
| [HADOOP-18403](https://issues.apache.org/jira/browse/HADOOP-18403) | Fix FileSystem leak in ITestS3AAWSCredentialsProvider | Minor | fs/s3 | Viraj Jasani | Viraj Jasani |
| [HADOOP-17882](https://issues.apache.org/jira/browse/HADOOP-17882) | distcp to use openFile() with sequential IO; ranges of reads | Major | tools/distcp | Steve Loughran | Steve Loughran |
| [HADOOP-18391](https://issues.apache.org/jira/browse/HADOOP-18391) | Improve VectoredReadUtils#readVectored() for direct buffers | Major | fs | Steve Loughran | Mukund Thakur |
| [HADOOP-18407](https://issues.apache.org/jira/browse/HADOOP-18407) | Improve vectored IO api spec. | Minor | fs, fs/s3 | Mukund Thakur | Mukund Thakur |
| [HADOOP-18339](https://issues.apache.org/jira/browse/HADOOP-18339) | S3A storage class option only picked up when buffering writes to disk | Major | fs/s3 | Steve Loughran | Monthon Klongklaew |
| [HADOOP-18410](https://issues.apache.org/jira/browse/HADOOP-18410) | S3AInputStream.unbuffer() async drain not releasing http connections | Blocker | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18439](https://issues.apache.org/jira/browse/HADOOP-18439) | Fix VectoredIO for LocalFileSystem when checksum is enabled. | Major | common | Mukund Thakur | Mukund Thakur |
| [HADOOP-18416](https://issues.apache.org/jira/browse/HADOOP-18416) | ITestS3AIOStatisticsContext failure | Major | fs/s3, test | Steve Loughran | Mehakmeet Singh |
| [HADOOP-18347](https://issues.apache.org/jira/browse/HADOOP-18347) | Restrict vectoredIO threadpool to reduce memory pressure | Major | common, fs, fs/adl, fs/s3 | Rajesh Balamohan | Mukund Thakur |
| [HADOOP-18463](https://issues.apache.org/jira/browse/HADOOP-18463) | Add an integration test to process data asynchronously during vectored read. | Major | . | Mukund Thakur | Mukund Thakur |
| [HADOOP-15460](https://issues.apache.org/jira/browse/HADOOP-15460) | S3A FS to add "fs.s3a.create.performance" to the builder file creation option set | Major | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18382](https://issues.apache.org/jira/browse/HADOOP-18382) | Upgrade AWS SDK to V2 - Prerequisites | Minor | . | Ahmar Suhail | Ahmar Suhail |
| [HADOOP-18480](https://issues.apache.org/jira/browse/HADOOP-18480) | upgrade AWS SDK to 1.12.316 | Major | build, fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18460](https://issues.apache.org/jira/browse/HADOOP-18460) | ITestS3AContractVectoredRead.testStopVectoredIoOperationsUnbuffer failing | Minor | fs/s3, test | Steve Loughran | Mukund Thakur |
| [HADOOP-18488](https://issues.apache.org/jira/browse/HADOOP-18488) | Cherrypick HADOOP-11245 to branch-3.3 | Major | . | Wei-Chiu Chuang | Ashutosh Gupta |
| [HADOOP-18481](https://issues.apache.org/jira/browse/HADOOP-18481) | AWS v2 SDK upgrade log to not warn of use standard AWS Credential Providers | Major | fs/s3 | Steve Loughran | Ahmar Suhail |
| [HADOOP-18476](https://issues.apache.org/jira/browse/HADOOP-18476) | Abfs and S3A FileContext bindings to close wrapped filesystems in finalizer | Blocker | fs/azure, fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18304](https://issues.apache.org/jira/browse/HADOOP-18304) | Improve S3A committers documentation clarity | Trivial | documentation | Daniel Carl Jones | Daniel Carl Jones |
| [HADOOP-18465](https://issues.apache.org/jira/browse/HADOOP-18465) | S3A server-side encryption tests fail before checking encryption tests should skip | Minor | fs/s3, test | Daniel Carl Jones | Daniel Carl Jones |
| [HADOOP-18530](https://issues.apache.org/jira/browse/HADOOP-18530) | ChecksumFileSystem::readVectored might return byte buffers not positioned at 0 | Blocker | fs | Harshit Gupta | Harshit Gupta |
| [HADOOP-18457](https://issues.apache.org/jira/browse/HADOOP-18457) | ABFS: Support for account level throttling | Major | . | Anmol Asrani | Anmol Asrani |
| [HADOOP-18560](https://issues.apache.org/jira/browse/HADOOP-18560) | AvroFSInput opens a stream twice and discards the second one without closing | Blocker | fs | Steve Loughran | Steve Loughran |
| [HADOOP-18526](https://issues.apache.org/jira/browse/HADOOP-18526) | Leak of S3AInstrumentation instances via hadoop Metrics references | Blocker | fs/s3 | Steve Loughran | Steve Loughran |
| [HADOOP-18546](https://issues.apache.org/jira/browse/HADOOP-18546) | disable purging list of in progress reads in abfs stream closed | Blocker | fs/azure | Steve Loughran | Pranav Saxena |
| [HADOOP-18577](https://issues.apache.org/jira/browse/HADOOP-18577) | ABFS: add probes of readahead fix | Major | fs/azure | Steve Loughran | Steve Loughran |
| [HADOOP-11867](https://issues.apache.org/jira/browse/HADOOP-11867) | Add a high-performance vectored read API. | Major | fs, fs/azure, fs/s3, hdfs-client | Gopal Vijayaraghavan | Mukund Thakur |
| [HADOOP-18507](https://issues.apache.org/jira/browse/HADOOP-18507) | VectorIO FileRange type to support a "reference" field | Major | fs | Steve Loughran | Steve Loughran |
| [HADOOP-18627](https://issues.apache.org/jira/browse/HADOOP-18627) | site intro docs to make clear Kerberos is mandatory for secure clusters | Major | site | Steve Loughran | Arnout Engelen |
| [HADOOP-17584](https://issues.apache.org/jira/browse/HADOOP-17584) | s3a magic committer may commit more data | Major | fs/s3 | yinan zhan | Steve Loughran |
| [HADOOP-18642](https://issues.apache.org/jira/browse/HADOOP-18642) | Cut excess dependencies from hadoop-azure, hadoop-aliyun transitive imports; fix LICENSE-binary | Blocker | build, fs/azure, fs/oss | Steve Loughran | Steve Loughran |
### OTHER:
| JIRA | Summary | Priority | Component | Reporter | Contributor |
|:---- |:---- | :--- |:---- |:---- |:---- |
| [HDFS-15854](https://issues.apache.org/jira/browse/HDFS-15854) | Make some parameters configurable for SlowDiskTracker and SlowPeerTracker | Major | . | Tao Li | Tao Li |
| [YARN-10747](https://issues.apache.org/jira/browse/YARN-10747) | Bump YARN CSI protobuf version to 3.7.1 | Major | . | Siyao Meng | Siyao Meng |
| [HDFS-16139](https://issues.apache.org/jira/browse/HDFS-16139) | Update BPServiceActor Scheduler's nextBlockReportTime atomically | Major | . | Viraj Jasani | Viraj Jasani |
| [HADOOP-18014](https://issues.apache.org/jira/browse/HADOOP-18014) | CallerContext should not include some characters | Major | . | Takanobu Asanuma | Takanobu Asanuma |
| [MAPREDUCE-7371](https://issues.apache.org/jira/browse/MAPREDUCE-7371) | DistributedCache alternative APIs should not use DistributedCache APIs internally | Major | . | Viraj Jasani | Viraj Jasani |
| [HADOOP-18114](https://issues.apache.org/jira/browse/HADOOP-18114) | Documentation Syntax Error Fix \> AWS Assumed Roles | Trivial | documentation, fs/s3 | Joey Krabacher | Joey Krabacher |
| [HDFS-16481](https://issues.apache.org/jira/browse/HDFS-16481) | Provide support to set Http and Rpc ports in MiniJournalCluster | Major | . | Viraj Jasani | Viraj Jasani |
| [HDFS-16502](https://issues.apache.org/jira/browse/HDFS-16502) | Reconfigure Block Invalidate limit | Major | . | Viraj Jasani | Viraj Jasani |
| [HDFS-16522](https://issues.apache.org/jira/browse/HDFS-16522) | Set Http and Ipc ports for Datanodes in MiniDFSCluster | Major | . | Viraj Jasani | Viraj Jasani |
| [HADOOP-18191](https://issues.apache.org/jira/browse/HADOOP-18191) | Log retry count while handling exceptions in RetryInvocationHandler | Minor | . | Viraj Jasani | Viraj Jasani |
| [HDFS-16551](https://issues.apache.org/jira/browse/HDFS-16551) | Backport HADOOP-17588 to 3.3 and other active old branches. | Major | . | Renukaprasad C | Renukaprasad C |
| [HDFS-16618](https://issues.apache.org/jira/browse/HDFS-16618) | sync\_file\_range error should include more volume and file info | Minor | . | Viraj Jasani | Viraj Jasani |
| [HADOOP-18300](https://issues.apache.org/jira/browse/HADOOP-18300) | Update google-gson to 2.9.0 | Minor | build | Igor Dvorzhak | Igor Dvorzhak |
| [HADOOP-18397](https://issues.apache.org/jira/browse/HADOOP-18397) | Shutdown AWSSecurityTokenService when its resources are no longer in use | Major | fs/s3 | Viraj Jasani | Viraj Jasani |
| [HADOOP-18575](https://issues.apache.org/jira/browse/HADOOP-18575) | Make XML transformer factory more lenient | Major | common | PJ Fanning | PJ Fanning |
| [HADOOP-18586](https://issues.apache.org/jira/browse/HADOOP-18586) | Update the year to 2023 | Major | . | Ayush Saxena | Ayush Saxena |
| [HADOOP-18587](https://issues.apache.org/jira/browse/HADOOP-18587) | upgrade to jettison 1.5.3 to fix CVE-2022-40150 | Major | common | PJ Fanning | PJ Fanning |

View File

@ -1,89 +0,0 @@
<!---
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-->
# Apache Hadoop 3.3.5 Release Notes
These release notes cover new developer and user-facing incompatibilities, important issues, features, and major improvements.
---
* [HADOOP-17956](https://issues.apache.org/jira/browse/HADOOP-17956) | *Major* | **Replace all default Charset usage with UTF-8**
All of the default charset usages have been replaced to UTF-8. If the default charset of your environment is not UTF-8, the behavior can be different.
---
* [HADOOP-15983](https://issues.apache.org/jira/browse/HADOOP-15983) | *Major* | **Use jersey-json that is built to use jackson2**
Use modified jersey-json 1.20 in https://github.com/pjfanning/jersey-1.x/tree/v1.20 that uses Jackson 2.x. By this change, Jackson 1.x dependency has been removed from Hadoop.
downstream applications which explicitly exclude jersey from transitive dependencies must now exclude com.github.pjfanning:jersey-json
---
* [HDFS-16595](https://issues.apache.org/jira/browse/HDFS-16595) | *Major* | **Slow peer metrics - add median, mad and upper latency limits**
Namenode metrics that represent Slownode Json now include three important factors (median, median absolute deviation, upper latency limit) that can help user determine how urgently a given slownode requires manual intervention.
---
* [HADOOP-17833](https://issues.apache.org/jira/browse/HADOOP-17833) | *Minor* | **Improve Magic Committer Performance**
S3A filesytem's createFile() operation supports an option to disable all safety checks when creating a file. Consult the documentation and use with care
---
* [HADOOP-18382](https://issues.apache.org/jira/browse/HADOOP-18382) | *Minor* | **Upgrade AWS SDK to V2 - Prerequisites**
In preparation for an (incompatible but necessary) move to the AWS SDK v2, some uses of internal/deprecated uses of AWS classes/interfaces are logged as warnings, though only once during the life of a JVM. Set the log "org.apache.hadoop.fs.s3a.SDKV2Upgrade" to only log at INFO to hide these.
---
* [HADOOP-18442](https://issues.apache.org/jira/browse/HADOOP-18442) | *Major* | **Remove the hadoop-openstack module**
The swift:// connector for openstack support has been removed. It had fundamental problems (swift's handling of files \> 4GB). A subset of the S3 protocol is now exported by almost all object store services -please use that through the s3a connector instead. The hadoop-openstack jar remains, only now it is empty of code. This is to ensure that projects which declare the JAR a dependency will still have successful builds.
---
* [HADOOP-17563](https://issues.apache.org/jira/browse/HADOOP-17563) | *Major* | **Update Bouncy Castle to 1.68 or later**
bouncy castle 1.68+ is a multirelease JAR containing java classes compiled for different target JREs. older versions of asm.jar and maven shade plugin may have problems with these. fix: upgrade the dependencies
---
* [HADOOP-18528](https://issues.apache.org/jira/browse/HADOOP-18528) | *Major* | **Disable abfs prefetching by default**
ABFS block prefetching has been disabled to avoid HADOOP-18521 and buffer sharing on multithreaded processes (Hive, Spark etc). This will have little/no performance impact on queries against Parquet or ORC data, but can slow down sequential stream processing, including CSV files -however, the read data will be correct.
It may slow down distcp downloads, where the race condition does not arise. For maximum distcp performance re-enable the readahead by setting fs.abfs.enable.readahead to true.
---
* [HADOOP-18621](https://issues.apache.org/jira/browse/HADOOP-18621) | *Critical* | **CryptoOutputStream::close leak when encrypted zones + quota exceptions**
**WARNING: No release note provided for this change.**

View File

@ -19,7 +19,6 @@
package org.apache.hadoop.conf;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.test.ReflectionUtils;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
@ -91,8 +90,6 @@ public abstract class TestConfigurationFieldsBase {
private static final Logger LOG_XML = LoggerFactory.getLogger(
"org.apache.hadoop.conf.TestConfigurationFieldsBase.xml");
private static final String VALID_PROP_REGEX = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z%s0-9_-]+)+$";
private static final Pattern validPropertiesPattern = Pattern.compile(VALID_PROP_REGEX);
/**
* Member variable for storing xml filename.
@ -143,17 +140,17 @@ public abstract class TestConfigurationFieldsBase {
/**
* Member variable to store Configuration variables for later comparison.
*/
private Map<String, String> configurationMemberVariables = null;
private Map<String,String> configurationMemberVariables = null;
/**
* Member variable to store Configuration variables for later reference.
*/
private Map<String, String> configurationDefaultVariables = null;
private Map<String,String> configurationDefaultVariables = null;
/**
* Member variable to store XML properties for later comparison.
*/
private Map<String, String> xmlKeyValueMap = null;
private Map<String,String> xmlKeyValueMap = null;
/**
* Member variable to store Configuration variables that are not in the
@ -188,38 +185,36 @@ public abstract class TestConfigurationFieldsBase {
* @param fields The class member variables
* @return HashMap containing (StringValue, MemberVariableName) entries
*/
private Map<String, String>
private HashMap<String,String>
extractMemberVariablesFromConfigurationFields(Field[] fields) {
// Sanity Check
if (fields == null) {
if (fields == null)
return null;
}
Map<String, String> validConfigProperties = new HashMap<>();
HashMap<String,String> retVal = new HashMap<>();
// Setup regexp for valid properties
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z%s0-9_-]+)+$";
Pattern p = Pattern.compile(propRegex);
// Iterate through class member variables
String value;
Set<String> fieldsNotPassedRegex = new HashSet<>();
for (Field f : fields) {
LOG_CONFIG.debug("Field: {}", f);
// Filter out anything that isn't "public static final"
if (!Modifier.isStatic(f.getModifiers()) ||
!Modifier.isPublic(f.getModifiers()) ||
!Modifier.isFinal(f.getModifiers())) {
LOG_CONFIG.debug(" Is skipped as it is not public static final");
continue;
}
// Filter out anything that isn't a string. int/float are generally
// default values
if (!f.getType().getName().equals("java.lang.String")) {
LOG_CONFIG.debug(" Is skipped as it is not type of String");
continue;
}
// filter out default-value fields
if (isFieldADefaultValue(f)) {
LOG_CONFIG.debug(" Is skipped as it is a 'default value field'");
continue;
}
@ -227,7 +222,6 @@ public abstract class TestConfigurationFieldsBase {
try {
value = (String) f.get(null);
} catch (IllegalAccessException iaException) {
LOG_CONFIG.debug(" Is skipped as it cannot be converted to a String");
continue;
}
LOG_CONFIG.debug(" Value: {}", value);
@ -235,13 +229,10 @@ public abstract class TestConfigurationFieldsBase {
// or file properties (ending in .xml)
if (value.endsWith(".xml") ||
value.endsWith(".") ||
value.endsWith("-")) {
LOG_CONFIG.debug(" Is skipped as it a 'partial property'");
value.endsWith("-"))
continue;
}
// Ignore known configuration props
if (configurationPropsToSkipCompare.contains(value)) {
LOG_CONFIG.debug(" Is skipped as it is registered as a property to be skipped");
continue;
}
// Ignore known configuration prefixes
@ -249,8 +240,6 @@ public abstract class TestConfigurationFieldsBase {
for (String cfgPrefix : configurationPrefixToSkipCompare) {
if (value.startsWith(cfgPrefix)) {
skipPrefix = true;
LOG_CONFIG.debug(" Is skipped as it is starts with a " +
"registered property prefix to skip: {}", cfgPrefix);
break;
}
}
@ -259,23 +248,22 @@ public abstract class TestConfigurationFieldsBase {
}
// Positive Filter: Look only for property values. Expect it to look
// something like: blah.blah2(.blah3.blah4...)
Matcher m = validPropertiesPattern.matcher(value);
Matcher m = p.matcher(value);
if (!m.find()) {
LOG_CONFIG.debug(" Passes Regex: false");
fieldsNotPassedRegex.add(f.getName());
continue;
}
LOG_CONFIG.debug(" Passes Regex: true");
if (!validConfigProperties.containsKey(value)) {
validConfigProperties.put(value, f.getName());
// Save member variable/value as hash
if (!retVal.containsKey(value)) {
retVal.put(value,f.getName());
} else {
LOG_CONFIG.debug("ERROR: Already found key for property " + value);
}
}
LOG_CONFIG.debug("Listing fields did not pass regex pattern: {}", fieldsNotPassedRegex);
return validConfigProperties;
return retVal;
}
/**
@ -284,7 +272,7 @@ public abstract class TestConfigurationFieldsBase {
* @param filename XML filename
* @return HashMap containing &lt;Property,Value&gt; entries from XML file
*/
private Map<String, String> extractPropertiesFromXml(String filename) {
private HashMap<String,String> extractPropertiesFromXml(String filename) {
if (filename == null) {
return null;
}
@ -294,10 +282,10 @@ public abstract class TestConfigurationFieldsBase {
conf.setAllowNullValueProperties(true);
conf.addResource(filename);
Map<String, String> retVal = new HashMap<>();
Iterator<Map.Entry<String, String>> kvItr = conf.iterator();
HashMap<String,String> retVal = new HashMap<>();
Iterator<Map.Entry<String,String>> kvItr = conf.iterator();
while (kvItr.hasNext()) {
Map.Entry<String, String> entry = kvItr.next();
Map.Entry<String,String> entry = kvItr.next();
String key = entry.getKey();
// Ignore known xml props
if (xmlPropsToSkipCompare.contains(key)) {
@ -311,11 +299,11 @@ public abstract class TestConfigurationFieldsBase {
}
if (conf.onlyKeyExists(key)) {
retVal.put(key, null);
LOG_XML.debug(" XML Key, Null Value: " + key);
LOG_XML.debug(" XML Key,Null Value: " + key);
} else {
if (conf.get(key) != null) {
retVal.put(key, entry.getValue());
LOG_XML.debug(" XML Key, Valid Value: " + key);
LOG_XML.debug(" XML Key,Valid Value: " + key);
}
}
kvItr.remove();
@ -341,18 +329,22 @@ public abstract class TestConfigurationFieldsBase {
* @param fields The class member variables
* @return HashMap containing (DefaultVariableName, DefaultValue) entries
*/
private Map<String, String>
private HashMap<String,String>
extractDefaultVariablesFromConfigurationFields(Field[] fields) {
// Sanity Check
if (fields == null) {
return null;
}
Map<String, String> retVal = new HashMap<>();
HashMap<String,String> retVal = new HashMap<String,String>();
// Setup regexp for valid properties
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)+$";
Pattern p = Pattern.compile(propRegex);
// Iterate through class member variables
int totalFields = 0;
String value;
for (Field f : fields) {
// Filter out anything that isn't "public static final"
if (!Modifier.isStatic(f.getModifiers()) ||
@ -367,8 +359,31 @@ public abstract class TestConfigurationFieldsBase {
continue;
}
try {
String s = ReflectionUtils.getStringValueOfField(f);
retVal.put(f.getName(), s);
if (f.getType().getName().equals("java.lang.String")) {
String sValue = (String) f.get(null);
retVal.put(f.getName(),sValue);
} else if (f.getType().getName().equals("short")) {
short shValue = (short) f.get(null);
retVal.put(f.getName(),Integer.toString(shValue));
} else if (f.getType().getName().equals("int")) {
int iValue = (int) f.get(null);
retVal.put(f.getName(),Integer.toString(iValue));
} else if (f.getType().getName().equals("long")) {
long lValue = (long) f.get(null);
retVal.put(f.getName(),Long.toString(lValue));
} else if (f.getType().getName().equals("float")) {
float fValue = (float) f.get(null);
retVal.put(f.getName(),Float.toString(fValue));
} else if (f.getType().getName().equals("double")) {
double dValue = (double) f.get(null);
retVal.put(f.getName(),Double.toString(dValue));
} else if (f.getType().getName().equals("boolean")) {
boolean bValue = (boolean) f.get(null);
retVal.put(f.getName(),Boolean.toString(bValue));
} else {
LOG.debug("Config variable {} has unknown type {}",
f.getName(), f.getType().getName());
}
} catch (IllegalAccessException iaException) {
LOG.error("{}", f, iaException);
}
@ -386,7 +401,7 @@ public abstract class TestConfigurationFieldsBase {
* @return Returns set operation keyMap1-keyMap2
*/
private static Set<String> compareConfigurationToXmlFields(
Map<String, String> keyMap1, Map<String, String> keyMap2) {
Map<String,String> keyMap1, Map<String,String> keyMap2) {
Set<String> retVal = new HashSet<>(keyMap1.keySet());
retVal.removeAll(keyMap2.keySet());
@ -398,19 +413,19 @@ public abstract class TestConfigurationFieldsBase {
* class and the XML properties file.
*/
@Before
public void setupTestConfigurationFields() {
public void setupTestConfigurationFields() throws Exception {
initializeMemberVariables();
// Error if subclass hasn't set class members
assertNotNull("XML file name is null", xmlFilename);
assertNotNull("Configuration classes array is null", configurationClasses);
assertNotNull(xmlFilename);
assertNotNull(configurationClasses);
// Create class member/value map
configurationMemberVariables = new HashMap<>();
LOG_CONFIG.debug("Reading configuration classes\n");
for (Class c : configurationClasses) {
Field[] fields = c.getDeclaredFields();
Map<String, String> memberMap =
Map<String,String> memberMap =
extractMemberVariablesFromConfigurationFields(fields);
if (memberMap != null) {
configurationMemberVariables.putAll(memberMap);
@ -434,12 +449,12 @@ public abstract class TestConfigurationFieldsBase {
LOG.debug("\n=====\n");
// Find class members not in the XML file
configurationFieldsMissingInXmlFile = compareConfigurationToXmlFields(
configurationMemberVariables, xmlKeyValueMap);
configurationFieldsMissingInXmlFile = compareConfigurationToXmlFields
(configurationMemberVariables, xmlKeyValueMap);
// Find XML properties not in the class
xmlFieldsMissingInConfiguration = compareConfigurationToXmlFields(
xmlKeyValueMap, configurationMemberVariables);
xmlFieldsMissingInConfiguration = compareConfigurationToXmlFields
(xmlKeyValueMap, configurationMemberVariables);
}
/**
@ -449,16 +464,15 @@ public abstract class TestConfigurationFieldsBase {
@Test
public void testCompareConfigurationClassAgainstXml() {
// Error if subclass hasn't set class members
assertNotNull("XML file name is null", xmlFilename);
assertNotNull("Configuration classes array is null", configurationClasses);
assertNotNull(xmlFilename);
assertNotNull(configurationClasses);
final int missingXmlSize = configurationFieldsMissingInXmlFile.size();
for (Class c : configurationClasses) {
LOG.info("Configuration class: {}", c.toString());
LOG.info(c.toString());
}
LOG.info("({} member variables)\n", configurationMemberVariables.size());
StringBuilder xmlErrorMsg = new StringBuilder();
for (Class c : configurationClasses) {
xmlErrorMsg.append(c);
@ -469,7 +483,6 @@ public abstract class TestConfigurationFieldsBase {
xmlErrorMsg.append(" variables missing in ");
xmlErrorMsg.append(xmlFilename);
LOG.error(xmlErrorMsg.toString());
if (missingXmlSize == 0) {
LOG.info(" (None)");
} else {
@ -503,8 +516,8 @@ public abstract class TestConfigurationFieldsBase {
@Test
public void testCompareXmlAgainstConfigurationClass() {
// Error if subclass hasn't set class members
assertNotNull("XML file name is null", xmlFilename);
assertNotNull("Configuration classes array is null", configurationClasses);
assertNotNull(xmlFilename);
assertNotNull(configurationClasses);
final int missingConfigSize = xmlFieldsMissingInConfiguration.size();
@ -535,17 +548,19 @@ public abstract class TestConfigurationFieldsBase {
@Test
public void testXmlAgainstDefaultValuesInConfigurationClass() {
// Error if subclass hasn't set class members
assertNotNull("XML file name is null", xmlFilename);
assertNotNull("Configuration member variables is null", configurationMemberVariables);
assertNotNull("Configuration default variables is null", configurationMemberVariables);
assertNotNull(xmlFilename);
assertNotNull(configurationMemberVariables);
assertNotNull(configurationDefaultVariables);
Set<String> xmlPropertiesWithEmptyValue = new TreeSet<>();
Set<String> configPropertiesWithNoDefaultConfig = new TreeSet<>();
Map<String, String> xmlPropertiesMatchingConfigDefault = new HashMap<>();
TreeSet<String> xmlPropertiesWithEmptyValue = new TreeSet<>();
TreeSet<String> configPropertiesWithNoDefaultConfig = new TreeSet<>();
HashMap<String,String> xmlPropertiesMatchingConfigDefault =
new HashMap<>();
// Ugly solution. Should have tuple-based solution.
Map<Map<String, String>, Map<String, String>> mismatchingXmlConfig = new HashMap<>();
HashMap<HashMap<String,String>, HashMap<String,String>> mismatchingXmlConfig
= new HashMap<>();
for (Map.Entry<String, String> xEntry : xmlKeyValueMap.entrySet()) {
for (Map.Entry<String,String> xEntry : xmlKeyValueMap.entrySet()) {
String xmlProperty = xEntry.getKey();
String xmlDefaultValue = xEntry.getValue();
String configProperty = configurationMemberVariables.get(xmlProperty);
@ -586,9 +601,9 @@ public abstract class TestConfigurationFieldsBase {
if (xmlDefaultValue == null) {
xmlPropertiesWithEmptyValue.add(xmlProperty);
} else if (!xmlDefaultValue.equals(defaultConfigValue)) {
Map<String, String> xmlEntry = new HashMap<>();
HashMap<String, String> xmlEntry = new HashMap<>();
xmlEntry.put(xmlProperty, xmlDefaultValue);
Map<String, String> configEntry = new HashMap<>();
HashMap<String, String> configEntry = new HashMap<>();
configEntry.put(defaultConfigName, defaultConfigValue);
mismatchingXmlConfig.put(xmlEntry, configEntry);
} else {
@ -607,18 +622,18 @@ public abstract class TestConfigurationFieldsBase {
if (mismatchingXmlConfig.isEmpty()) {
LOG.info(" (None)");
} else {
for (Map.Entry<Map<String, String>, Map<String, String>> xcEntry :
mismatchingXmlConfig.entrySet()) {
xcEntry.getKey().forEach((key, value) -> {
LOG.info("XML Property: {}", key);
LOG.info("XML Value: {}", value);
});
xcEntry.getValue().forEach((key, value) -> {
LOG.info("Config Name: {}", key);
LOG.info("Config Value: {}", value);
});
LOG.info("");
}
for (Map.Entry<HashMap<String,String>,HashMap<String,String>> xcEntry :
mismatchingXmlConfig.entrySet()) {
xcEntry.getKey().forEach((key, value) -> {
LOG.info("XML Property: {}", key);
LOG.info("XML Value: {}", value);
});
xcEntry.getValue().forEach((key, value) -> {
LOG.info("Config Name: {}", key);
LOG.info("Config Value: {}", value);
});
LOG.info("");
}
}
LOG.info("\n");
@ -694,5 +709,7 @@ public abstract class TestConfigurationFieldsBase {
}
LOG.info("Checked {} default values for collision.", valuesChecked);
}
}
}

View File

@ -52,8 +52,7 @@ public class TestFileSystemStorageStatistics {
"bytesReadDistanceOfOneOrTwo",
"bytesReadDistanceOfThreeOrFour",
"bytesReadDistanceOfFiveOrLarger",
"bytesReadErasureCoded",
"remoteReadTimeMS"
"bytesReadErasureCoded"
};
private FileSystem.Statistics statistics =
@ -75,7 +74,6 @@ public class TestFileSystemStorageStatistics {
statistics.incrementBytesReadByDistance(1, RandomUtils.nextInt(0, 100));
statistics.incrementBytesReadByDistance(3, RandomUtils.nextInt(0, 100));
statistics.incrementBytesReadErasureCoded(RandomUtils.nextInt(0, 100));
statistics.increaseRemoteReadTime(RandomUtils.nextInt(0, 100));
}
@Test
@ -130,8 +128,6 @@ public class TestFileSystemStorageStatistics {
return statistics.getBytesReadByDistance(5);
case "bytesReadErasureCoded":
return statistics.getBytesReadErasureCoded();
case "remoteReadTimeMS":
return statistics.getRemoteReadTime();
default:
return 0;
}

View File

@ -528,11 +528,4 @@ public class TestPath {
}
}
@Test(timeout = 30000)
public void testSuffixFromRoot() {
Path root = new Path("/");
Assert.assertNull(root.getParent());
Assert.assertEquals(new Path("/bar"), root.suffix("bar"));
}
}

View File

@ -1,90 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LeaseRecoverable;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.test.LambdaTestUtils;
import static org.apache.hadoop.fs.CommonPathCapabilities.LEASE_RECOVERABLE;
public abstract class AbstractContractLeaseRecoveryTest extends
AbstractFSContractTestBase {
@Test
public void testLeaseRecovery() throws Throwable {
final Path path = methodPath();
final FileSystem fs = getFileSystem();
ContractTestUtils.touch(fs, path);
LeaseRecoverable leaseRecoverableFs = verifyAndGetLeaseRecoverableInstance(fs, path);
Assertions.assertThat(leaseRecoverableFs.recoverLease(path))
.describedAs("Issuing lease recovery on a closed file must be successful")
.isTrue();
Assertions.assertThat(leaseRecoverableFs.isFileClosed(path))
.describedAs("Get the isFileClose status on a closed file must be successful")
.isTrue();
}
@Test
public void testLeaseRecoveryFileNotExist() throws Throwable {
final Path path = new Path("notExist");
final FileSystem fs = getFileSystem();
LeaseRecoverable leaseRecoverableFs = verifyAndGetLeaseRecoverableInstance(fs, path);
LambdaTestUtils.intercept(FileNotFoundException.class, "File does not exist",
() -> leaseRecoverableFs.recoverLease(path));
LambdaTestUtils.intercept(FileNotFoundException.class, "File does not exist",
() -> leaseRecoverableFs.isFileClosed(path));
}
@Test
public void testLeaseRecoveryFileOnDirectory() throws Throwable {
final Path path = methodPath();
final FileSystem fs = getFileSystem();
LeaseRecoverable leaseRecoverableFs = verifyAndGetLeaseRecoverableInstance(fs, path);
final Path parentDirectory = path.getParent();
LambdaTestUtils.intercept(FileNotFoundException.class, "Path is not a file",
() -> leaseRecoverableFs.recoverLease(parentDirectory));
LambdaTestUtils.intercept(FileNotFoundException.class, "Path is not a file",
() -> leaseRecoverableFs.isFileClosed(parentDirectory));
}
private LeaseRecoverable verifyAndGetLeaseRecoverableInstance(FileSystem fs, Path path)
throws IOException {
Assertions.assertThat(fs.hasPathCapability(path, LEASE_RECOVERABLE))
.describedAs("path capability %s of %s", LEASE_RECOVERABLE, path)
.isTrue();
Assertions.assertThat(fs)
.describedAs("filesystem %s", fs)
.isInstanceOf(LeaseRecoverable.class);
return (LeaseRecoverable) fs;
}
}

View File

@ -43,7 +43,6 @@ import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.apache.hadoop.test.LambdaTestUtils.interceptFuture;
import static org.apache.hadoop.util.functional.FutureIO.awaitFuture;
import org.assertj.core.api.Assertions;
import org.junit.Test;
/**
@ -187,7 +186,7 @@ public abstract class AbstractContractOpenTest
@Test
public void testOpenFileReadZeroByte() throws Throwable {
describe("create & read a 0 byte file through the builders; use a negative length");
describe("create & read a 0 byte file through the builders");
Path path = path("zero.txt");
FileSystem fs = getFileSystem();
fs.createFile(path).overwrite(true).build().close();
@ -195,7 +194,6 @@ public abstract class AbstractContractOpenTest
.opt("fs.test.something", true)
.opt("fs.test.something2", 3)
.opt("fs.test.something3", "3")
.optLong(FS_OPTION_OPENFILE_LENGTH, -1L)
.build().get()) {
assertMinusOne("initial byte read", is.read());
}
@ -212,17 +210,6 @@ public abstract class AbstractContractOpenTest
() -> builder.build());
}
@Test
public void testOpenFileUnknownOptionLong() throws Throwable {
describe("calling openFile fails when a 'must()' option is unknown");
FutureDataInputStreamBuilder builder =
getFileSystem().openFile(path("testOpenFileUnknownOption"))
.optLong("fs.test.something", 1L)
.mustLong("fs.test.something2", 1L);
intercept(IllegalArgumentException.class,
() -> builder.build());
}
@Test
public void testOpenFileLazyFail() throws Throwable {
describe("openFile fails on a missing file in the get() and not before");
@ -333,22 +320,16 @@ public abstract class AbstractContractOpenTest
describe("verify that async accept callbacks are evaluated");
Path path = path("testOpenFileApplyAsyncRead");
FileSystem fs = getFileSystem();
final int len = 512;
createFile(fs, path, true,
dataset(len, 0x40, 0x80));
CompletableFuture<FSDataInputStream> future = fs.openFile(path)
.mustDouble(FS_OPTION_OPENFILE_LENGTH, 43.2e60) // pass in a double
.build();
dataset(4, 0x40, 0x80));
CompletableFuture<FSDataInputStream> future = fs.openFile(path).build();
AtomicBoolean accepted = new AtomicBoolean(false);
final Long bytes = future.thenApply(stream -> {
future.thenApply(stream -> {
accepted.set(true);
return ContractTestUtils.readStream(stream);
}).get();
return stream;
}).get().close();
assertTrue("async accept operation not invoked",
accepted.get());
Assertions.assertThat(bytes)
.describedAs("bytes read from stream")
.isEqualTo(len);
}
/**
@ -376,8 +357,8 @@ public abstract class AbstractContractOpenTest
.withFileStatus(null)
.opt(FS_OPTION_OPENFILE_READ_POLICY,
"unknown, sequential, random")
.optLong(FS_OPTION_OPENFILE_BUFFER_SIZE, 32768)
.optLong(FS_OPTION_OPENFILE_LENGTH, len)
.opt(FS_OPTION_OPENFILE_BUFFER_SIZE, 32768)
.opt(FS_OPTION_OPENFILE_LENGTH, len)
.build();
try (FSDataInputStream in = future.get()) {
@ -386,26 +367,4 @@ public abstract class AbstractContractOpenTest
compareByteArrays(dataset, result, len);
}
/**
* open a file with a length set as a double; verifies resilience
* of the parser.
*/
@Test
public void testFloatingPointLength() throws Throwable {
describe("Open file with a length");
Path path = methodPath();
FileSystem fs = getFileSystem();
int len = 4096;
createFile(fs, path, true,
dataset(len, 0x40, 0x80));
final Long l = fs.openFile(path)
.mustDouble(FS_OPTION_OPENFILE_LENGTH, len)
.build()
.thenApply(ContractTestUtils::readStream)
.get();
Assertions.assertThat(l)
.describedAs("bytes read from file %s", path)
.isEqualTo(len);
}
}

View File

@ -195,9 +195,10 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
for (FileStatus status : statuses) {
ContractTestUtils.assertDeleted(fs, status.getPath(), false, true, false);
}
Assertions.assertThat(fs.listStatus(root))
.describedAs("ls /")
.hasSize(0);
FileStatus[] rootListStatus = fs.listStatus(root);
assertEquals("listStatus on empty root-directory returned found: "
+ join("\n", rootListStatus),
0, rootListStatus.length);
assertNoElements("listFiles(/, false)",
fs.listFiles(root, false));
assertNoElements("listFiles(/, true)",

View File

@ -1,54 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.contract;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.SafeMode;
import org.apache.hadoop.fs.SafeModeAction;
public abstract class AbstractContractSafeModeTest extends AbstractFSContractTestBase {
@Test
public void testSafeMode() throws Throwable {
final FileSystem fs = getFileSystem();
SafeMode fsWithSafeMode = verifyAndGetSafeModeInstance(fs);
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.GET))
.describedAs("Getting the status of safe mode before entering should be off.").isFalse();
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.ENTER))
.describedAs("Entering Safe mode and safe mode turns on.").isTrue();
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.GET))
.describedAs("Getting the status of safe mode after entering, safe mode should be on.")
.isTrue();
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.LEAVE))
.describedAs("Leaving safe mode, and safe mode switches off.").isFalse();
Assertions.assertThat(fsWithSafeMode.setSafeMode(SafeModeAction.FORCE_EXIT))
.describedAs("Force exist safe mode at any time, safe mode should always switches off.")
.isFalse();
}
private SafeMode verifyAndGetSafeModeInstance(FileSystem fs) {
Assertions.assertThat(fs)
.describedAs("File system %s must be an instance of %s", fs, SafeMode.class.getClass())
.isInstanceOf(SafeMode.class);
return (SafeMode) fs;
}
}

View File

@ -23,11 +23,8 @@ import java.nio.ByteBuffer;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.test.AbstractHadoopTestBase;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_TMP_DIR;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@ -39,8 +36,6 @@ public class TestBlockCache extends AbstractHadoopTestBase {
private static final int BUFFER_SIZE = 16;
private static final Configuration CONF = new Configuration();
@Test
public void testArgChecks() throws Exception {
// Should not throw.
@ -51,7 +46,7 @@ public class TestBlockCache extends AbstractHadoopTestBase {
// Verify it throws correctly.
intercept(IllegalArgumentException.class, "'buffer' must not be null",
() -> cache.put(42, null, null, null));
() -> cache.put(42, null));
intercept(NullPointerException.class, null,
@ -72,7 +67,7 @@ public class TestBlockCache extends AbstractHadoopTestBase {
assertEquals(0, cache.size());
assertFalse(cache.containsBlock(0));
cache.put(0, buffer1, CONF, new LocalDirAllocator(HADOOP_TMP_DIR));
cache.put(0, buffer1);
assertEquals(1, cache.size());
assertTrue(cache.containsBlock(0));
ByteBuffer buffer2 = ByteBuffer.allocate(BUFFER_SIZE);
@ -82,7 +77,7 @@ public class TestBlockCache extends AbstractHadoopTestBase {
assertEquals(1, cache.size());
assertFalse(cache.containsBlock(1));
cache.put(1, buffer1, CONF, new LocalDirAllocator(HADOOP_TMP_DIR));
cache.put(1, buffer1);
assertEquals(2, cache.size());
assertTrue(cache.containsBlock(1));
ByteBuffer buffer3 = ByteBuffer.allocate(BUFFER_SIZE);

View File

@ -1,189 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.statistics;
import java.util.Arrays;
import java.util.Collection;
import org.assertj.core.api.Assertions;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.apache.hadoop.fs.statistics.impl.ForwardingIOStatisticsStore;
import org.apache.hadoop.fs.statistics.impl.IOStatisticsStore;
import org.apache.hadoop.test.AbstractHadoopTestBase;
import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticCounter;
import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticGauge;
import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticMaximum;
import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticMean;
import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.assertThatStatisticMinimum;
import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.iostatisticsStore;
/**
* Test the {@link IOStatisticsSetters} interface implementations through
* a parameterized run with each implementation.
* For each of the setters, the value is set, verified,
* updated, verified again.
* An option known to be undefined in all created IOStatisticsStore instances
* is set, to verify it is harmless.
*/
@RunWith(Parameterized.class)
public class TestIOStatisticsSetters extends AbstractHadoopTestBase {
public static final String COUNTER = "counter";
public static final String GAUGE = "gauge";
public static final String MAXIMUM = "max";
public static final String MINIMUM = "min";
public static final String MEAN = "mean";
private final IOStatisticsSetters ioStatistics;
private final boolean createsNewEntries;
@Parameterized.Parameters(name="{0}")
public static Collection<Object[]> params() {
return Arrays.asList(new Object[][]{
{"IOStatisticsSnapshot", new IOStatisticsSnapshot(), true},
{"IOStatisticsStore", createTestStore(), false},
{"ForwardingIOStatisticsStore", new ForwardingIOStatisticsStore(createTestStore()), false},
});
}
/**
* Create a test store with the stats used for testing set up.
* @return a set up store
*/
private static IOStatisticsStore createTestStore() {
return iostatisticsStore()
.withCounters(COUNTER)
.withGauges(GAUGE)
.withMaximums(MAXIMUM)
.withMinimums(MINIMUM)
.withMeanStatistics(MEAN)
.build();
}
public TestIOStatisticsSetters(
String source,
IOStatisticsSetters ioStatisticsSetters,
boolean createsNewEntries) {
this.ioStatistics = ioStatisticsSetters;
this.createsNewEntries = createsNewEntries;
}
@Test
public void testCounter() throws Throwable {
// write
ioStatistics.setCounter(COUNTER, 1);
assertThatStatisticCounter(ioStatistics, COUNTER)
.isEqualTo(1);
// update
ioStatistics.setCounter(COUNTER, 2);
assertThatStatisticCounter(ioStatistics, COUNTER)
.isEqualTo(2);
// unknown value
final String unknown = "unknown";
ioStatistics.setCounter(unknown, 3);
if (createsNewEntries) {
assertThatStatisticCounter(ioStatistics, unknown)
.isEqualTo(3);
} else {
Assertions.assertThat(ioStatistics.counters())
.describedAs("Counter map in {}", ioStatistics)
.doesNotContainKey(unknown);
}
}
@Test
public void testMaximum() throws Throwable {
// write
ioStatistics.setMaximum(MAXIMUM, 1);
assertThatStatisticMaximum(ioStatistics, MAXIMUM)
.isEqualTo(1);
// update
ioStatistics.setMaximum(MAXIMUM, 2);
assertThatStatisticMaximum(ioStatistics, MAXIMUM)
.isEqualTo(2);
// unknown value
ioStatistics.setMaximum("mm2", 3);
}
@Test
public void testMinimum() throws Throwable {
// write
ioStatistics.setMinimum(MINIMUM, 1);
assertThatStatisticMinimum(ioStatistics, MINIMUM)
.isEqualTo(1);
// update
ioStatistics.setMinimum(MINIMUM, 2);
assertThatStatisticMinimum(ioStatistics, MINIMUM)
.isEqualTo(2);
// unknown value
ioStatistics.setMinimum("c2", 3);
}
@Test
public void testGauge() throws Throwable {
// write
ioStatistics.setGauge(GAUGE, 1);
assertThatStatisticGauge(ioStatistics, GAUGE)
.isEqualTo(1);
// update
ioStatistics.setGauge(GAUGE, 2);
assertThatStatisticGauge(ioStatistics, GAUGE)
.isEqualTo(2);
// unknown value
ioStatistics.setGauge("g2", 3);
}
@Test
public void testMean() throws Throwable {
// write
final MeanStatistic mean11 = new MeanStatistic(1, 1);
ioStatistics.setMeanStatistic(MEAN, mean11);
assertThatStatisticMean(ioStatistics, MEAN)
.isEqualTo(mean11);
// update
final MeanStatistic mean22 = new MeanStatistic(2, 2);
ioStatistics.setMeanStatistic(MEAN, mean22);
assertThatStatisticMean(ioStatistics, MEAN)
.isEqualTo(mean22);
// unknown value
ioStatistics.setMeanStatistic("m2", mean11);
}
}

View File

@ -1,172 +0,0 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.store;
import java.io.IOException;
import javax.annotation.Nonnull;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSBuilder;
import org.apache.hadoop.fs.impl.FSBuilderSupport;
import org.apache.hadoop.test.AbstractHadoopTestBase;
import static org.assertj.core.api.Assertions.assertThat;
/**
* Test builder support, forwarding of opt double/float to long,
* resilience.
*/
@SuppressWarnings("deprecation")
public class TestFSBuilderSupport extends AbstractHadoopTestBase {
@Test
public void testOptFloatDoubleForwardsToLong() throws Throwable {
FSBuilderSupport c = builder()
.opt("f", 1.8f)
.opt("d", 2.0e3)
.build();
assertThat(c.getLong("f", 2))
.isEqualTo(1);
assertThat(c.getLong("d", 2))
.isEqualTo(2000);
}
@Test
public void testMustFloatDoubleForwardsToLong() throws Throwable {
FSBuilderSupport c = builder()
.must("f", 1.8f)
.must("d", 2.0e3)
.build();
assertThat(c.getLong("f", 2))
.isEqualTo(1);
assertThat(c.getLong("d", 2))
.isEqualTo(2000);
}
@Test
public void testLongOptStillWorks() throws Throwable {
FSBuilderSupport c = builder()
.opt("o", 1L)
.must("m", 1L)
.build();
assertThat(c.getLong("o", 2))
.isEqualTo(1L);
assertThat(c.getLong("m", 2))
.isEqualTo(1L);
}
@Test
public void testFloatParseFallback() throws Throwable {
FSBuilderSupport c = builder()
.opt("f", "1.8f")
.opt("d", "1.8e20")
.build();
assertThat(c.getLong("f", 2))
.isEqualTo(2);
assertThat(c.getLong("d", 2))
.isEqualTo(2);
}
@Test
public void testNegatives() throws Throwable {
FSBuilderSupport c = builder()
.optLong("-1", -1)
.mustLong("-2", -2)
.build();
// getLong gets the long value
assertThat(c.getLong("-1", 2))
.isEqualTo(-1);
// but getPositiveLong returns the positive default
assertThat(c.getPositiveLong("-1", 2))
.isEqualTo(2);
}
@Test
public void testBoolean() throws Throwable {
final FSBuilderSupport c = builder()
.opt("f", false)
.opt("t", true)
.opt("o", "other")
.build();
assertThat(c.getOptions().getBoolean("f", true))
.isEqualTo(false);
assertThat(c.getOptions().getBoolean("t", false))
.isEqualTo(true);
// this is handled in Configuration itself.
assertThat(c.getOptions().getBoolean("o", true))
.isEqualTo(true);
}
private SimpleBuilder builder() {
return new BuilderImpl();
}
private interface SimpleBuilder
extends FSBuilder<FSBuilderSupport, SimpleBuilder> {
}
/**
* This is a minimal builder which relies on default implementations of the interface.
* If it ever stops compiling, it means a new interface has been added which
* is not backwards compatible with external implementations, such as that
* in HBoss (see HBASE-26483).
*
*/
private static final class BuilderImpl
implements SimpleBuilder {
private final Configuration options = new Configuration(false);
@Override
public SimpleBuilder opt(@Nonnull final String key, @Nonnull final String value) {
options.set(key, value);
return this;
}
@Override
public SimpleBuilder opt(@Nonnull final String key, @Nonnull final String... values) {
options.setStrings(key, values);
return this;
}
@Override
public SimpleBuilder must(@Nonnull final String key, @Nonnull final String value) {
return opt(key, value);
}
@Override
public SimpleBuilder must(@Nonnull final String key, @Nonnull final String... values) {
return opt(key, values);
}
@Override
public FSBuilderSupport build()
throws IllegalArgumentException, UnsupportedOperationException, IOException {
return new FSBuilderSupport(options);
}
}
}

View File

@ -19,10 +19,8 @@ package org.apache.hadoop.http;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
@ -106,9 +104,7 @@ public class TestHttpServerWithSpnego {
*/
@Test
public void testAuthenticationWithProxyUser() throws Exception {
Configuration spnegoConf = getSpnegoConf(new Configuration());
spnegoConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
ProxyUserAuthenticationFilterInitializer.class.getName());
Configuration spengoConf = getSpengoConf(new Configuration());
//setup logs dir
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
@ -122,15 +118,15 @@ public class TestHttpServerWithSpnego {
new String[]{"groupC"});
// Make userA impersonate users in groupB
spnegoConf.set("hadoop.proxyuser.userA.hosts", "*");
spnegoConf.set("hadoop.proxyuser.userA.groups", "groupB");
ProxyUsers.refreshSuperUserGroupsConfiguration(spnegoConf);
spengoConf.set("hadoop.proxyuser.userA.hosts", "*");
spengoConf.set("hadoop.proxyuser.userA.groups", "groupB");
ProxyUsers.refreshSuperUserGroupsConfiguration(spengoConf);
HttpServer2 httpServer = null;
try {
// Create http server to test.
httpServer = getCommonBuilder()
.setConf(spnegoConf)
.setConf(spengoConf)
.setACL(new AccessControlList("userA groupA"))
.build();
httpServer.start();
@ -195,48 +191,6 @@ public class TestHttpServerWithSpnego {
}
}
@Test
public void testAuthenticationToAllowList() throws Exception {
Configuration spnegoConf = getSpnegoConf(new Configuration());
String[] allowList = new String[] {"/jmx", "/prom"};
String[] denyList = new String[] {"/conf", "/stacks", "/logLevel"};
spnegoConf.set(PREFIX + "kerberos.endpoint.whitelist", String.join(",", allowList));
spnegoConf.set(CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED, "true");
spnegoConf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
AuthenticationFilterInitializer.class.getName());
//setup logs dir
System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
HttpServer2 httpServer = null;
try {
// Create http server to test.
httpServer = getCommonBuilder().setConf(spnegoConf).setSecurityEnabled(true)
.setUsernameConfKey(PREFIX + "kerberos.principal")
.setKeytabConfKey(PREFIX + "kerberos.keytab").build();
httpServer.start();
String serverURL = "http://" + NetUtils.getHostPortString(httpServer.getConnectorAddress(0));
// endpoints in whitelist should not require Kerberos authentication
for (String endpoint : allowList) {
HttpURLConnection conn = (HttpURLConnection) new URL(serverURL + endpoint).openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
}
// endpoints not in whitelist should require Kerberos authentication
for (String endpoint : denyList) {
HttpURLConnection conn = (HttpURLConnection) new URL(serverURL + endpoint).openConnection();
Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode());
}
} finally {
if (httpServer != null) {
httpServer.stop();
}
}
}
private AuthenticatedURL.Token getEncryptedAuthToken(Signer signer,
String user) throws Exception {
AuthenticationToken token =
@ -255,8 +209,10 @@ public class TestHttpServerWithSpnego {
return new Signer(secretProvider);
}
private Configuration getSpnegoConf(Configuration conf) {
private Configuration getSpengoConf(Configuration conf) {
conf = new Configuration();
conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
ProxyUserAuthenticationFilterInitializer.class.getName());
conf.set(PREFIX + "type", "kerberos");
conf.setBoolean(PREFIX + "simple.anonymous.allowed", false);
conf.set(PREFIX + "signature.secret.file",

View File

@ -30,21 +30,17 @@ import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawEncoder;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawEncoder;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawDecoder;
import org.apache.hadoop.io.erasurecode.rawcoder.XORRawErasureCoderFactory;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
/**
* Test the codec to raw coder mapping.
*/
public class TestCodecRawCoderMapping {
private static Configuration conf;
private static final int numDataUnit = 6;
private static final int numParityUnit = 3;
@ -154,39 +150,4 @@ public class TestCodecRawCoderMapping {
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
Assert.assertTrue(decoder instanceof XORRawDecoder);
}
@Test
public void testCodecNativeEnabled() {
assumeTrue(ErasureCodeNative.isNativeCodeLoaded());
ErasureCoderOptions coderOptions = new ErasureCoderOptions(
numDataUnit, numParityUnit);
RawErasureEncoder rsEncoder = CodecUtil.createRawEncoder(
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
RawErasureDecoder rsDecoder = CodecUtil.createRawDecoder(
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
RawErasureEncoder xorEncoder = CodecUtil.createRawEncoder(
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
RawErasureDecoder xorDecoder = CodecUtil.createRawDecoder(
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
assertTrue(rsEncoder instanceof NativeRSRawEncoder);
assertTrue(rsDecoder instanceof NativeRSRawDecoder);
assertTrue(xorEncoder instanceof NativeXORRawEncoder);
assertTrue(xorDecoder instanceof NativeXORRawDecoder);
conf.setBoolean(CodecUtil.IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY,
false);
rsEncoder = CodecUtil.createRawEncoder(
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
rsDecoder = CodecUtil.createRawDecoder(
conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
xorEncoder = CodecUtil.createRawEncoder(
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
xorDecoder = CodecUtil.createRawDecoder(
conf, ErasureCodeConstants.XOR_CODEC_NAME, coderOptions);
assertTrue(rsEncoder instanceof RSRawEncoder);
assertTrue(rsDecoder instanceof RSRawDecoder);
assertTrue(xorEncoder instanceof XORRawEncoder);
assertTrue(xorDecoder instanceof XORRawDecoder);
}
}

View File

@ -29,9 +29,10 @@ import org.junit.Test;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
@ -271,7 +272,7 @@ public class TestTFileSeek {
try {
Options opts = buildOptions();
CommandLineParser parser = new DefaultParser();
CommandLineParser parser = new GnuParser();
CommandLine line = parser.parse(opts, args, true);
processOptions(line, opts);
validateOptions();
@ -289,56 +290,81 @@ public class TestTFileSeek {
private Options buildOptions() {
Option compress =
Option.builder("c").longOpt("compress").argName("[none|lzo|gz]")
.hasArg().desc("compression scheme").build();
OptionBuilder.withLongOpt("compress").withArgName("[none|lzo|gz]")
.hasArg().withDescription("compression scheme").create('c');
Option fileSize =
Option.builder("s").longOpt("file-size").argName("size-in-MB")
.hasArg().desc("target size of the file (in MB).").build();
OptionBuilder.withLongOpt("file-size").withArgName("size-in-MB")
.hasArg().withDescription("target size of the file (in MB).")
.create('s');
Option fsInputBufferSz =
Option.builder("i").longOpt("fs-input-buffer").argName("size")
.hasArg().desc("size of the file system input buffer (in bytes).").build();
OptionBuilder.withLongOpt("fs-input-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system input buffer (in bytes).").create(
'i');
Option fsOutputBufferSize =
Option.builder("o").longOpt("fs-output-buffer").argName("size")
.hasArg().desc("size of the file system output buffer (in bytes).").build();
OptionBuilder.withLongOpt("fs-output-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system output buffer (in bytes).").create(
'o');
Option keyLen =
Option.builder("k").longOpt("key-length").argName("min,max")
.hasArg().desc("the length range of the key (in bytes)").build();
OptionBuilder
.withLongOpt("key-length")
.withArgName("min,max")
.hasArg()
.withDescription(
"the length range of the key (in bytes)")
.create('k');
Option valueLen =
Option.builder("v").longOpt("value-length").argName("min,max")
.hasArg().desc("the length range of the value (in bytes)").build();
OptionBuilder
.withLongOpt("value-length")
.withArgName("min,max")
.hasArg()
.withDescription(
"the length range of the value (in bytes)")
.create('v');
Option blockSz =
Option.builder("b").longOpt("block").argName("size-in-KB").hasArg()
.desc("minimum block size (in KB)").build();
OptionBuilder.withLongOpt("block").withArgName("size-in-KB").hasArg()
.withDescription("minimum block size (in KB)").create('b');
Option seed =
Option.builder("S").longOpt("seed").argName("long-int").hasArg()
.desc("specify the seed").build();
OptionBuilder.withLongOpt("seed").withArgName("long-int").hasArg()
.withDescription("specify the seed").create('S');
Option operation =
Option.builder("x").longOpt("operation").argName("r|w|rw").hasArg()
.desc("action: seek-only, create-only, seek-after-create").build();
OptionBuilder.withLongOpt("operation").withArgName("r|w|rw").hasArg()
.withDescription(
"action: seek-only, create-only, seek-after-create").create(
'x');
Option rootDir =
Option.builder("r").longOpt("root-dir").argName("path").hasArg()
.desc("specify root directory where files will be created.").build();
OptionBuilder.withLongOpt("root-dir").withArgName("path").hasArg()
.withDescription(
"specify root directory where files will be created.")
.create('r');
Option file =
Option.builder("f").longOpt("file").argName("name").hasArg()
.desc("specify the file name to be created or read.").build();
OptionBuilder.withLongOpt("file").withArgName("name").hasArg()
.withDescription("specify the file name to be created or read.")
.create('f');
Option seekCount =
Option.builder("n").longOpt("seek").argName("count").hasArg()
.desc("specify how many seek operations we perform (requires -x r or -x rw.").build();
OptionBuilder
.withLongOpt("seek")
.withArgName("count")
.hasArg()
.withDescription(
"specify how many seek operations we perform (requires -x r or -x rw.")
.create('n');
Option help =
Option.builder("h").longOpt("help").hasArg(false)
.desc("show this screen").build();
OptionBuilder.withLongOpt("help").hasArg(false).withDescription(
"show this screen").create("h");
return new Options().addOption(compress).addOption(fileSize).addOption(
fsInputBufferSz).addOption(fsOutputBufferSize).addOption(keyLen)

View File

@ -30,9 +30,10 @@ import org.junit.Test;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.DefaultParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
@ -552,7 +553,7 @@ public class TestTFileSeqFileComparison {
try {
Options opts = buildOptions();
CommandLineParser parser = new DefaultParser();
CommandLineParser parser = new GnuParser();
CommandLine line = parser.parse(opts, args, true);
processOptions(line, opts);
validateOptions();
@ -570,70 +571,87 @@ public class TestTFileSeqFileComparison {
private Options buildOptions() {
Option compress =
Option.builder("c").longOpt("compress").argName("[none|lzo|gz]")
.hasArg().desc("compression scheme").build();
OptionBuilder.withLongOpt("compress").withArgName("[none|lzo|gz]")
.hasArg().withDescription("compression scheme").create('c');
Option ditSize =
Option.builder("d").longOpt("dict").argName("size")
.hasArg().desc("number of dictionary entries").build();
OptionBuilder.withLongOpt("dict").withArgName("size").hasArg()
.withDescription("number of dictionary entries").create('d');
Option fileSize =
Option.builder("s").longOpt("file-size").argName("size-in-MB")
.hasArg().desc("target size of the file (in MB).").build();
OptionBuilder.withLongOpt("file-size").withArgName("size-in-MB")
.hasArg().withDescription("target size of the file (in MB).")
.create('s');
Option format =
Option.builder("f").longOpt("format").argName("[tfile|seqfile]")
.hasArg().desc("choose TFile or SeqFile").build();
OptionBuilder.withLongOpt("format").withArgName("[tfile|seqfile]")
.hasArg().withDescription("choose TFile or SeqFile").create('f');
Option fsInputBufferSz =
Option.builder("i").longOpt("fs-input-buffer").argName("size")
.hasArg().desc("size of the file system input buffer (in bytes).").build();
OptionBuilder.withLongOpt("fs-input-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system input buffer (in bytes).").create(
'i');
Option fsOutputBufferSize =
Option.builder("o").longOpt("fs-output-buffer").argName("size")
.hasArg().desc("size of the file system output buffer (in bytes).").build();
OptionBuilder.withLongOpt("fs-output-buffer").withArgName("size")
.hasArg().withDescription(
"size of the file system output buffer (in bytes).").create(
'o');
Option keyLen =
Option.builder("o").longOpt("key-length").argName("length")
.hasArg()
.desc("base length of the key (in bytes), actual length varies in [base, 2*base)")
.build();
OptionBuilder
.withLongOpt("key-length")
.withArgName("length")
.hasArg()
.withDescription(
"base length of the key (in bytes), actual length varies in [base, 2*base)")
.create('k');
Option valueLen =
Option.builder("v").longOpt("key-length").argName("length")
.longOpt("value-length").argName("length").hasArg()
.desc("base length of the value (in bytes), actual length varies in [base, 2*base)")
.build();
OptionBuilder
.withLongOpt("value-length")
.withArgName("length")
.hasArg()
.withDescription(
"base length of the value (in bytes), actual length varies in [base, 2*base)")
.create('v');
Option wordLen =
Option.builder("w").longOpt("word-length").argName("min,max")
.hasArg().desc("range of dictionary word length (in bytes)").build();
OptionBuilder.withLongOpt("word-length").withArgName("min,max")
.hasArg().withDescription(
"range of dictionary word length (in bytes)").create('w');
Option blockSz =
Option.builder("b").longOpt("block").argName("size-in-KB").hasArg()
.desc("minimum block size (in KB)").build();
OptionBuilder.withLongOpt("block").withArgName("size-in-KB").hasArg()
.withDescription("minimum block size (in KB)").create('b');
Option seed =
Option.builder("S").longOpt("seed").argName("long-int").hasArg()
.desc("specify the seed").build();
OptionBuilder.withLongOpt("seed").withArgName("long-int").hasArg()
.withDescription("specify the seed").create('S');
Option operation =
Option.builder("x").longOpt("operation").argName("r|w|rw").hasArg()
.desc("action: read-only, create-only, read-after-create").build();
OptionBuilder.withLongOpt("operation").withArgName("r|w|rw").hasArg()
.withDescription(
"action: read-only, create-only, read-after-create").create(
'x');
Option rootDir =
Option.builder("r").longOpt("root-dir").argName("path").hasArg()
.desc("specify root directory where files will be created.").build();
OptionBuilder.withLongOpt("root-dir").withArgName("path").hasArg()
.withDescription(
"specify root directory where files will be created.")
.create('r');
Option help =
Option.builder("h").longOpt("help").hasArg(false)
.desc("show this screen").build();
OptionBuilder.withLongOpt("help").hasArg(false).withDescription(
"show this screen").create("h");
return new Options().addOption(compress).addOption(ditSize).addOption(
fileSize).addOption(format).addOption(fsInputBufferSz).addOption(
fsOutputBufferSize).addOption(keyLen).addOption(wordLen).addOption(
blockSz).addOption(rootDir).addOption(valueLen).addOption(operation)
.addOption(help);
}
private void processOptions(CommandLine line, Options opts)

View File

@ -23,7 +23,7 @@ import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.HelpFormatter;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
@ -88,78 +88,59 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
}
}
@SuppressWarnings("static-access")
private Options buildOptions() {
Options opts = new Options();
opts.addOption(
Option.builder("s")
.longOpt("serverThreads")
.hasArg(true)
.argName("numthreads")
.desc("number of server threads (handlers) to run (or 0 to not run server)")
.build());
OptionBuilder.withLongOpt("serverThreads").hasArg(true)
.withArgName("numthreads")
.withDescription("number of server threads (handlers) to run (or 0 to not run server)")
.create("s"));
opts.addOption(
Option.builder("r")
.longOpt("serverReaderThreads")
.hasArg(true)
.argName("threads")
.desc("number of server reader threads to run")
.build());
OptionBuilder.withLongOpt("serverReaderThreads").hasArg(true)
.withArgName("threads")
.withDescription("number of server reader threads to run")
.create("r"));
opts.addOption(
Option.builder("c")
.longOpt("clientThreads")
.hasArg(true)
.argName("numthreads")
.desc("number of client threads to run (or 0 to not run client)")
.build());
opts.addOption(
Option.builder("m")
.longOpt("messageSize")
.hasArg(true)
.argName("bytes")
.desc("size of call parameter in bytes")
.build());
opts.addOption(
Option.builder("t")
.longOpt("time")
.hasArg(true)
.argName("seconds")
.desc("number of seconds to run clients for")
.build());
opts.addOption(
Option.builder("p")
.longOpt("port")
.hasArg(true)
.argName("port")
.desc("port to listen or connect on")
.build());
opts.addOption(
Option.builder("h")
.longOpt("host")
.hasArg(true)
.argName("addr")
.desc("host to listen or connect on")
.build());
opts.addOption(
Option.builder("e")
.longOpt("engine")
.hasArg(true)
.argName("protobuf")
.desc("engine to use")
.build());
OptionBuilder.withLongOpt("clientThreads").hasArg(true)
.withArgName("numthreads")
.withDescription("number of client threads to run (or 0 to not run client)")
.create("c"));
opts.addOption(
OptionBuilder.withLongOpt("messageSize").hasArg(true)
.withArgName("bytes")
.withDescription("size of call parameter in bytes")
.create("m"));
opts.addOption(
OptionBuilder.withLongOpt("time").hasArg(true)
.withArgName("seconds")
.withDescription("number of seconds to run clients for")
.create("t"));
opts.addOption(
OptionBuilder.withLongOpt("port").hasArg(true)
.withArgName("port")
.withDescription("port to listen or connect on")
.create("p"));
opts.addOption(
OptionBuilder.withLongOpt("host").hasArg(true)
.withArgName("addr")
.withDescription("host to listen or connect on")
.create('h'));
opts.addOption(
Option.builder("?")
.longOpt("help")
.hasArg(false)
.desc("show this screen")
.build());
OptionBuilder.withLongOpt("engine").hasArg(true)
.withArgName("protobuf")
.withDescription("engine to use")
.create('e'));
opts.addOption(
OptionBuilder.withLongOpt("help").hasArg(false)
.withDescription("show this screen")
.create('?'));
return opts;
}

View File

@ -1728,47 +1728,6 @@ public class TestIPC {
checkUserBinding(true);
}
@Test(timeout=60000)
public void testUpdateAddressEnsureResolved() throws Exception {
// start server
Server server = new TestServer(1, false);
server.start();
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
doThrow(new ConnectTimeoutException("fake")).when(mockFactory)
.createSocket();
Client client = new Client(LongWritable.class, conf, mockFactory);
InetSocketAddress address =
new InetSocketAddress("localhost", NetUtils.getFreeSocketPort());
ConnectionId remoteId = getConnectionId(address, 100, conf);
try {
LambdaTestUtils.intercept(IOException.class, (Callable<Void>) () -> {
client.call(RpcKind.RPC_BUILTIN, new LongWritable(RANDOM.nextLong()),
remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT, null);
return null;
});
assertFalse(address.isUnresolved());
assertFalse(remoteId.getAddress().isUnresolved());
assertEquals(System.identityHashCode(remoteId.getAddress()),
System.identityHashCode(address));
NetUtils.addStaticResolution("localhost", "host.invalid");
LambdaTestUtils.intercept(IOException.class, (Callable<Void>) () -> {
client.call(RpcKind.RPC_BUILTIN, new LongWritable(RANDOM.nextLong()),
remoteId, RPC.RPC_SERVICE_CLASS_DEFAULT, null);
return null;
});
assertFalse(remoteId.getAddress().isUnresolved());
assertEquals(System.identityHashCode(remoteId.getAddress()),
System.identityHashCode(address));
} finally {
client.stop();
server.stop();
}
}
private void checkUserBinding(boolean asProxy) throws Exception {
Socket s;
// don't attempt bind with no service host.

View File

@ -20,9 +20,8 @@ package org.apache.hadoop.ipc;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.test.LambdaTestUtils;
import org.junit.Test;
import java.util.List;
@ -34,7 +33,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.conf.Configuration;
public class TestIdentityProviders {
public static class FakeSchedulable implements Schedulable {
public class FakeSchedulable implements Schedulable {
public FakeSchedulable() {
}
@ -62,9 +61,7 @@ public class TestIdentityProviders {
CommonConfigurationKeys.IPC_IDENTITY_PROVIDER_KEY,
IdentityProvider.class);
assertThat(providers)
.describedAs("provider list")
.hasSize(1);
assertTrue(providers.size() == 1);
IdentityProvider ip = providers.get(0);
assertNotNull(ip);
@ -72,20 +69,14 @@ public class TestIdentityProviders {
}
@Test
public void testUserIdentityProvider() throws Exception {
public void testUserIdentityProvider() throws IOException {
UserIdentityProvider uip = new UserIdentityProvider();
FakeSchedulable fakeSchedulable = new FakeSchedulable();
String identity = uip.makeIdentity(fakeSchedulable);
String identity = uip.makeIdentity(new FakeSchedulable());
// Get our username
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String username = ugi.getUserName();
assertEquals(username, identity);
// FakeSchedulable doesn't override getCallerContext()
// accessing it should throw an UnsupportedOperationException
LambdaTestUtils.intercept(UnsupportedOperationException.class,
"Invalid operation.", fakeSchedulable::getCallerContext);
}
}

View File

@ -1336,16 +1336,12 @@ public class TestRPC extends TestRpcBase {
3000, getLongCounter("RpcProcessingTimeNumOps", rpcMetrics));
assertEquals("Expected correct rpc lock wait count",
3000, getLongCounter("RpcLockWaitTimeNumOps", rpcMetrics));
assertEquals("Expected correct rpc response count",
3000, getLongCounter("RpcResponseTimeNumOps", rpcMetrics));
assertEquals("Expected zero rpc lock wait time",
0, getDoubleGauge("RpcLockWaitTimeAvgTime", rpcMetrics), 0.001);
MetricsAsserts.assertQuantileGauges("RpcQueueTime" + interval + "s",
rpcMetrics);
MetricsAsserts.assertQuantileGauges("RpcProcessingTime" + interval + "s",
rpcMetrics);
MetricsAsserts.assertQuantileGauges("RpcResponseTime" + interval + "s",
rpcMetrics);
String actualUserVsCon = MetricsAsserts
.getStringMetric("NumOpenConnectionsPerUser", rpcMetrics);
String proxyUser =

View File

@ -0,0 +1,264 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ContainerNode;
import org.junit.Test;
import static org.junit.Assert.*;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Appender;
import org.apache.log4j.Category;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.WriterAppender;
import org.apache.log4j.spi.HierarchyEventListener;
import org.apache.log4j.spi.LoggerFactory;
import org.apache.log4j.spi.LoggerRepository;
import org.apache.log4j.spi.ThrowableInformation;
import java.io.IOException;
import java.io.StringWriter;
import java.io.Writer;
import java.net.NoRouteToHostException;
import java.util.Enumeration;
import java.util.Vector;
public class TestLog4Json {
@Test
public void testConstruction() throws Throwable {
Log4Json l4j = new Log4Json();
String outcome = l4j.toJson(new StringWriter(),
"name", 0, "DEBUG", "thread1",
"hello, world", null).toString();
println("testConstruction", outcome);
}
@Test
public void testException() throws Throwable {
Exception e =
new NoRouteToHostException("that box caught fire 3 years ago");
ThrowableInformation ti = new ThrowableInformation(e);
Log4Json l4j = new Log4Json();
long timeStamp = Time.now();
String outcome = l4j.toJson(new StringWriter(),
"testException",
timeStamp,
"INFO",
"quoted\"",
"new line\n and {}",
ti)
.toString();
println("testException", outcome);
}
@Test
public void testNestedException() throws Throwable {
Exception e =
new NoRouteToHostException("that box caught fire 3 years ago");
Exception ioe = new IOException("Datacenter problems", e);
ThrowableInformation ti = new ThrowableInformation(ioe);
Log4Json l4j = new Log4Json();
long timeStamp = Time.now();
String outcome = l4j.toJson(new StringWriter(),
"testNestedException",
timeStamp,
"INFO",
"quoted\"",
"new line\n and {}",
ti)
.toString();
println("testNestedException", outcome);
ContainerNode rootNode = Log4Json.parse(outcome);
assertEntryEquals(rootNode, Log4Json.LEVEL, "INFO");
assertEntryEquals(rootNode, Log4Json.NAME, "testNestedException");
assertEntryEquals(rootNode, Log4Json.TIME, timeStamp);
assertEntryEquals(rootNode, Log4Json.EXCEPTION_CLASS,
ioe.getClass().getName());
JsonNode node = assertNodeContains(rootNode, Log4Json.STACK);
assertTrue("Not an array: " + node, node.isArray());
node = assertNodeContains(rootNode, Log4Json.DATE);
assertTrue("Not a string: " + node, node.isTextual());
//rather than try and make assertions about the format of the text
//message equalling another ISO date, this test asserts that the hypen
//and colon characters are in the string.
String dateText = node.textValue();
assertTrue("No '-' in " + dateText, dateText.contains("-"));
assertTrue("No '-' in " + dateText, dateText.contains(":"));
}
/**
* Create a log instance and and log to it
* @throws Throwable if it all goes wrong
*/
@Test
public void testLog() throws Throwable {
String message = "test message";
Throwable throwable = null;
String json = logOut(message, throwable);
println("testLog", json);
}
/**
* Create a log instance and and log to it
* @throws Throwable if it all goes wrong
*/
@Test
public void testLogExceptions() throws Throwable {
String message = "test message";
Throwable inner = new IOException("Directory / not found");
Throwable throwable = new IOException("startup failure", inner);
String json = logOut(message, throwable);
println("testLogExceptions", json);
}
void assertEntryEquals(ContainerNode rootNode, String key, String value) {
JsonNode node = assertNodeContains(rootNode, key);
assertEquals(value, node.textValue());
}
private JsonNode assertNodeContains(ContainerNode rootNode, String key) {
JsonNode node = rootNode.get(key);
if (node == null) {
fail("No entry of name \"" + key + "\" found in " + rootNode.toString());
}
return node;
}
void assertEntryEquals(ContainerNode rootNode, String key, long value) {
JsonNode node = assertNodeContains(rootNode, key);
assertEquals(value, node.numberValue());
}
/**
* Print out what's going on. The logging APIs aren't used and the text
* delimited for more details
*
* @param name name of operation
* @param text text to print
*/
private void println(String name, String text) {
System.out.println(name + ": #" + text + "#");
}
private String logOut(String message, Throwable throwable) {
StringWriter writer = new StringWriter();
Logger logger = createLogger(writer);
logger.info(message, throwable);
//remove and close the appender
logger.removeAllAppenders();
return writer.toString();
}
public Logger createLogger(Writer writer) {
TestLoggerRepository repo = new TestLoggerRepository();
Logger logger = repo.getLogger("test");
Log4Json layout = new Log4Json();
WriterAppender appender = new WriterAppender(layout, writer);
logger.addAppender(appender);
return logger;
}
/**
* This test logger avoids integrating with the main runtimes Logger hierarchy
* in ways the reader does not want to know.
*/
private static class TestLogger extends Logger {
private TestLogger(String name, LoggerRepository repo) {
super(name);
repository = repo;
setLevel(Level.INFO);
}
}
public static class TestLoggerRepository implements LoggerRepository {
@Override
public void addHierarchyEventListener(HierarchyEventListener listener) {
}
@Override
public boolean isDisabled(int level) {
return false;
}
@Override
public void setThreshold(Level level) {
}
@Override
public void setThreshold(String val) {
}
@Override
public void emitNoAppenderWarning(Category cat) {
}
@Override
public Level getThreshold() {
return Level.ALL;
}
@Override
public Logger getLogger(String name) {
return new TestLogger(name, this);
}
@Override
public Logger getLogger(String name, LoggerFactory factory) {
return new TestLogger(name, this);
}
@Override
public Logger getRootLogger() {
return new TestLogger("root", this);
}
@Override
public Logger exists(String name) {
return null;
}
@Override
public void shutdown() {
}
@Override
public Enumeration getCurrentLoggers() {
return new Vector().elements();
}
@Override
public Enumeration getCurrentCategories() {
return new Vector().elements();
}
@Override
public void fireAddAppenderEvent(Category logger, Appender appender) {
}
@Override
public void resetConfiguration() {
}
}
}

View File

@ -52,8 +52,6 @@ public class TestMutableMetrics {
private static final Logger LOG =
LoggerFactory.getLogger(TestMutableMetrics.class);
private static final double EPSILON = 1e-42;
private static final int SLEEP_TIME_MS = 6 * 1000; // 6 seconds.
private static final int SAMPLE_COUNT = 1000;
/**
* Test the snapshot method
@ -397,14 +395,14 @@ public class TestMutableMetrics {
MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
"Latency", 5);
// Push some values in and wait for it to publish
long startTimeMS = System.currentTimeMillis();
for (long i = 1; i <= SAMPLE_COUNT; i++) {
long start = System.nanoTime() / 1000000;
for (long i = 1; i <= 1000; i++) {
quantiles.add(i);
quantiles.add(1001 - i);
}
long endTimeMS = System.currentTimeMillis();
long end = System.nanoTime() / 1000000;
Thread.sleep(SLEEP_TIME_MS - (endTimeMS - startTimeMS));
Thread.sleep(6000 - (end - start));
registry.snapshot(mb, false);
@ -416,8 +414,10 @@ public class TestMutableMetrics {
}
// Verify the results are within our requirements
verify(mb).addGauge(info("FooNumOps", "Number of ops for stat with 5s interval"), 2000L);
Quantile[] quants = MutableQuantiles.QUANTILES;
verify(mb).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"),
(long) 2000);
Quantile[] quants = MutableQuantiles.quantiles;
String name = "Foo%dthPercentileLatency";
String desc = "%d percentile latency with 5 second interval for stat";
for (Quantile q : quants) {
@ -431,46 +431,6 @@ public class TestMutableMetrics {
}
}
/**
* Ensure that quantile estimates from {@link MutableInverseQuantiles} are within
* specified error bounds.
*/
@Test(timeout = 30000)
public void testMutableInverseQuantilesError() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles inverseQuantiles = registry.newInverseQuantiles("foo", "stat", "Ops",
"Latency", 5);
// Push some values in and wait for it to publish
long startTimeMS = System.currentTimeMillis();
for (long i = 1; i <= SAMPLE_COUNT; i++) {
inverseQuantiles.add(i);
inverseQuantiles.add(1001 - i);
}
long endTimeMS = System.currentTimeMillis();
Thread.sleep(SLEEP_TIME_MS - (endTimeMS - startTimeMS));
registry.snapshot(mb, false);
// Verify the results are within our requirements
verify(mb).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 2000L);
Quantile[] inverseQuants = MutableInverseQuantiles.INVERSE_QUANTILES;
String name = "Foo%dthInversePercentileLatency";
String desc = "%d inverse percentile latency with 5 second interval for stat";
for (Quantile q : inverseQuants) {
int inversePercentile = (int) (100 * (1 - q.quantile));
int error = (int) (1000 * q.error);
String n = String.format(name, inversePercentile);
String d = String.format(desc, inversePercentile);
long expected = (long) (q.quantile * 1000);
verify(mb).addGauge(eq(info(n, d)), leq(expected + error));
verify(mb).addGauge(eq(info(n, d)), geq(expected - error));
}
}
/**
* Test that {@link MutableQuantiles} rolls the window over at the specified
* interval.
@ -483,21 +443,21 @@ public class TestMutableMetrics {
MutableQuantiles quantiles = registry.newQuantiles("foo", "stat", "Ops",
"Latency", 5);
Quantile[] quants = MutableQuantiles.QUANTILES;
Quantile[] quants = MutableQuantiles.quantiles;
String name = "Foo%dthPercentileLatency";
String desc = "%d percentile latency with 5 second interval for stat";
// Push values for three intervals
long startTimeMS = System.currentTimeMillis();
long start = System.nanoTime() / 1000000;
for (int i = 1; i <= 3; i++) {
// Insert the values
for (long j = 1; j <= SAMPLE_COUNT; j++) {
for (long j = 1; j <= 1000; j++) {
quantiles.add(i);
}
// Sleep until 1s after the next 5s interval, to let the metrics
// roll over
long sleepTimeMS = startTimeMS + (5000L * i) + 1000 - System.currentTimeMillis();
Thread.sleep(sleepTimeMS);
long sleep = (start + (5000 * i) + 1000) - (System.nanoTime() / 1000000);
Thread.sleep(sleep);
// Verify that the window reset, check it has the values we pushed in
registry.snapshot(mb, false);
for (Quantile q : quants) {
@ -510,7 +470,8 @@ public class TestMutableMetrics {
// Verify the metrics were added the right number of times
verify(mb, times(3)).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 1000L);
info("FooNumOps", "Number of ops for stat with 5s interval"),
(long) 1000);
for (Quantile q : quants) {
int percentile = (int) (100 * q.quantile);
String n = String.format(name, percentile);
@ -520,56 +481,7 @@ public class TestMutableMetrics {
}
/**
* Test that {@link MutableInverseQuantiles} rolls the window over at the specified
* interval.
*/
@Test(timeout = 30000)
public void testMutableInverseQuantilesRollover() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles inverseQuantiles = registry.newInverseQuantiles("foo", "stat", "Ops",
"Latency", 5);
Quantile[] quants = MutableInverseQuantiles.INVERSE_QUANTILES;
String name = "Foo%dthInversePercentileLatency";
String desc = "%d inverse percentile latency with 5 second interval for stat";
// Push values for three intervals
long startTimeMS = System.currentTimeMillis();
for (int i = 1; i <= 3; i++) {
// Insert the values
for (long j = 1; j <= SAMPLE_COUNT; j++) {
inverseQuantiles.add(i);
}
// Sleep until 1s after the next 5s interval, to let the metrics
// roll over
long sleepTimeMS = startTimeMS + (5000L * i) + 1000 - System.currentTimeMillis();
Thread.sleep(sleepTimeMS);
// Verify that the window reset, check it has the values we pushed in
registry.snapshot(mb, false);
for (Quantile q : quants) {
int inversePercentile = (int) (100 * (1 - q.quantile));
String n = String.format(name, inversePercentile);
String d = String.format(desc, inversePercentile);
verify(mb).addGauge(info(n, d), (long) i);
}
}
// Verify the metrics were added the right number of times
verify(mb, times(3)).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 1000L);
for (Quantile q : quants) {
int inversePercentile = (int) (100 * (1 - q.quantile));
String n = String.format(name, inversePercentile);
String d = String.format(desc, inversePercentile);
verify(mb, times(3)).addGauge(eq(info(n, d)), anyLong());
}
}
/**
* Test that {@link MutableQuantiles} rolls over correctly even if no items.
* Test that {@link MutableQuantiles} rolls over correctly even if no items
* have been added to the window
*/
@Test(timeout = 30000)
@ -583,33 +495,11 @@ public class TestMutableMetrics {
// Check it initially
quantiles.snapshot(mb, true);
verify(mb).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 0L);
Thread.sleep(SLEEP_TIME_MS);
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
Thread.sleep(6000);
quantiles.snapshot(mb, false);
verify(mb, times(2)).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 0L);
}
/**
* Test that {@link MutableInverseQuantiles} rolls over correctly even if no items
* have been added to the window
*/
@Test(timeout = 30000)
public void testMutableInverseQuantilesEmptyRollover() throws Exception {
MetricsRecordBuilder mb = mockMetricsRecordBuilder();
MetricsRegistry registry = new MetricsRegistry("test");
// Use a 5s rollover period
MutableQuantiles inverseQuantiles = registry.newInverseQuantiles("foo", "stat", "Ops",
"Latency", 5);
// Check it initially
inverseQuantiles.snapshot(mb, true);
verify(mb).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 0L);
Thread.sleep(SLEEP_TIME_MS);
inverseQuantiles.snapshot(mb, false);
verify(mb, times(2)).addGauge(
info("FooNumOps", "Number of ops for stat with 5s interval"), 0L);
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
}
/**

View File

@ -24,7 +24,6 @@ import java.util.Collections;
import java.util.Map;
import java.util.Random;
import org.apache.hadoop.metrics2.lib.MutableInverseQuantiles;
import org.junit.Before;
import org.junit.Test;
@ -37,7 +36,6 @@ public class TestSampleQuantiles {
new Quantile(0.95, 0.005), new Quantile(0.99, 0.001) };
SampleQuantiles estimator;
final static int NUM_REPEATS = 10;
@Before
public void init() {
@ -93,70 +91,28 @@ public class TestSampleQuantiles {
@Test
public void testQuantileError() throws IOException {
final int count = 100000;
Random rnd = new Random(0xDEADDEAD);
int[] values = new int[count];
Random r = new Random(0xDEADDEAD);
Long[] values = new Long[count];
for (int i = 0; i < count; i++) {
values[i] = i + 1;
values[i] = (long) (i + 1);
}
// Repeat shuffle/insert/check cycles 10 times
for (int i = 0; i < NUM_REPEATS; i++) {
// Shuffle
Collections.shuffle(Arrays.asList(values), rnd);
// Do 10 shuffle/insert/check cycles
for (int i = 0; i < 10; i++) {
System.out.println("Starting run " + i);
Collections.shuffle(Arrays.asList(values), r);
estimator.clear();
// Insert
for (int value : values) {
estimator.insert(value);
for (int j = 0; j < count; j++) {
estimator.insert(values[j]);
}
Map<Quantile, Long> snapshot;
snapshot = estimator.snapshot();
// Check
for (Quantile q : quantiles) {
long actual = (long) (q.quantile * count);
long error = (long) (q.error * count);
long estimate = snapshot.get(q);
assertThat(estimate <= actual + error).isTrue();
assertThat(estimate >= actual - error).isTrue();
}
}
}
/**
* Correctness test that checks that absolute error of the estimate for inverse quantiles
* is within specified error bounds for some randomly permuted streams of items.
*/
@Test
public void testInverseQuantiles() throws IOException {
SampleQuantiles inverseQuantilesEstimator =
new SampleQuantiles(MutableInverseQuantiles.INVERSE_QUANTILES);
final int count = 100000;
Random rnd = new Random(0xDEADDEAD);
int[] values = new int[count];
for (int i = 0; i < count; i++) {
values[i] = i + 1;
}
// Repeat shuffle/insert/check cycles 10 times
for (int i = 0; i < NUM_REPEATS; i++) {
// Shuffle
Collections.shuffle(Arrays.asList(values), rnd);
inverseQuantilesEstimator.clear();
// Insert
for (int value : values) {
inverseQuantilesEstimator.insert(value);
}
Map<Quantile, Long> snapshot;
snapshot = inverseQuantilesEstimator.snapshot();
// Check
for (Quantile q : MutableInverseQuantiles.INVERSE_QUANTILES) {
long actual = (long) (q.quantile * count);
long error = (long) (q.error * count);
long estimate = snapshot.get(q);
System.out
.println(String.format("Expected %d with error %d, estimated %d",
actual, error, estimate));
assertThat(estimate <= actual + error).isTrue();
assertThat(estimate >= actual - error).isTrue();
}

View File

@ -392,34 +392,13 @@ public class MetricsAsserts {
*/
public static void assertQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
for (Quantile q : MutableQuantiles.QUANTILES) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0l));
for (Quantile q : MutableQuantiles.quantiles) {
String nameTemplate = prefix + "%dthPercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
eqName(info(String.format(nameTemplate, percentile), "")),
geq(0L));
}
}
/**
* Asserts that the NumOps and inverse quantiles for a metric have been changed at
* some point to a non-zero value, for the specified value name of the
* metrics (e.g., "Rate").
*
* @param prefix of the metric
* @param rb MetricsRecordBuilder with the metric
* @param valueName the value name for the metric
*/
public static void assertInverseQuantileGauges(String prefix,
MetricsRecordBuilder rb, String valueName) {
verify(rb).addGauge(eqName(info(prefix + "NumOps", "")), geq(0L));
for (Quantile q : MutableQuantiles.QUANTILES) {
String nameTemplate = prefix + "%dthInversePercentile" + valueName;
int percentile = (int) (100 * q.quantile);
verify(rb).addGauge(
eqName(info(String.format(nameTemplate, percentile), "")),
geq(0L));
geq(0l));
}
}

View File

@ -1,51 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.test;
import java.lang.reflect.Field;
public final class ReflectionUtils {
private ReflectionUtils() {}
public static String getStringValueOfField(Field f) throws IllegalAccessException {
switch (f.getType().getName()) {
case "java.lang.String":
return (String) f.get(null);
case "short":
short shValue = (short) f.get(null);
return Integer.toString(shValue);
case "int":
int iValue = (int) f.get(null);
return Integer.toString(iValue);
case "long":
long lValue = (long) f.get(null);
return Long.toString(lValue);
case "float":
float fValue = (float) f.get(null);
return Float.toString(fValue);
case "double":
double dValue = (double) f.get(null);
return Double.toString(dValue);
case "boolean":
boolean bValue = (boolean) f.get(null);
return Boolean.toString(bValue);
default:
return null;
}
}
}

View File

@ -34,6 +34,7 @@ import java.util.List;
import java.util.Map;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.math3.util.Pair;
import org.apache.hadoop.conf.Configuration;
@ -197,11 +198,10 @@ public class TestGenericOptionsParser {
@Test
public void testCreateWithOptions() throws Exception {
// Create new option newOpt
Option opt = Option.builder("newOpt").argName("int")
.hasArg()
.desc("A new option")
.build();
Option opt = OptionBuilder.withArgName("int")
.hasArg()
.withDescription("A new option")
.create("newOpt");
Options opts = new Options();
opts.addOption(opt);

View File

@ -140,7 +140,7 @@ public class TestGenericsUtil {
@Test
public void testIsLog4jLogger() throws Exception {
assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger((Class<?>) null));
assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null));
assertTrue("The implementation is Log4j",
GenericsUtil.isLog4jLogger(TestGenericsUtil.class));
}

View File

@ -1,214 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util.curator;
import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.apache.curator.test.InstanceSpec;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.zookeeper.ClientCnxnSocketNetty;
import org.apache.zookeeper.ZooKeeper;
import org.apache.zookeeper.client.ZKClientConfig;
import org.apache.zookeeper.common.ClientX509Util;
import org.apache.zookeeper.server.NettyServerCnxnFactory;
import static org.apache.hadoop.fs.FileContext.LOG;
import static org.junit.Assert.assertEquals;
/**
* Test the manager for ZooKeeper Curator when SSL/TLS is enabled for the ZK server-client
* connection.
*/
public class TestSecureZKCuratorManager {
public static final boolean DELETE_DATA_DIRECTORY_ON_CLOSE = true;
private TestingServer server;
private ZKCuratorManager curator;
private Configuration hadoopConf;
static final int SECURE_CLIENT_PORT = 2281;
static final int JUTE_MAXBUFFER = 400000000;
static final File ZK_DATA_DIR = new File("testZkSSLClientConnectionDataDir");
private static final int SERVER_ID = 1;
private static final int TICK_TIME = 100;
private static final int MAX_CLIENT_CNXNS = 10;
public static final int ELECTION_PORT = -1;
public static final int QUORUM_PORT = -1;
@Before
public void setup() throws Exception {
// inject values to the ZK configuration file for secure connection
Map<String, Object> customConfiguration = new HashMap<>();
customConfiguration.put("secureClientPort", String.valueOf(SECURE_CLIENT_PORT));
customConfiguration.put("audit.enable", true);
this.hadoopConf = setUpSecureConfig();
InstanceSpec spec =
new InstanceSpec(ZK_DATA_DIR, SECURE_CLIENT_PORT, ELECTION_PORT, QUORUM_PORT,
DELETE_DATA_DIRECTORY_ON_CLOSE, SERVER_ID, TICK_TIME, MAX_CLIENT_CNXNS,
customConfiguration);
this.server = new TestingServer(spec, true);
this.hadoopConf.set(CommonConfigurationKeys.ZK_ADDRESS, this.server.getConnectString());
this.curator = new ZKCuratorManager(this.hadoopConf);
this.curator.start(new ArrayList<>(), true);
}
/**
* A static method to configure the test ZK server to accept secure client connection.
* The self-signed certificates were generated for testing purposes as described below.
* For the ZK client to connect with the ZK server, the ZK server's keystore and truststore
* should be used.
* For testing purposes the keystore and truststore were generated using default values.
* 1. to generate the keystore.jks file:
* # keytool -genkey -alias mockcert -keyalg RSA -keystore keystore.jks -keysize 2048
* 2. generate the ca-cert and the ca-key:
* # openssl req -new -x509 -keyout ca-key -out ca-cert
* 3. to generate the certificate signing request (cert-file):
* # keytool -keystore keystore.jks -alias mockcert -certreq -file certificate-request
* 4. to generate the ca-cert.srl file and make the cert valid for 10 years:
* # openssl x509 -req -CA ca-cert -CAkey ca-key -in certificate-request -out cert-signed
* -days 3650 -CAcreateserial -passin pass:password
* 5. add the ca-cert to the keystore.jks:
* # keytool -keystore keystore.jks -alias mockca -import -file ca-cert
* 6. install the signed certificate to the keystore:
* # keytool -keystore keystore.jks -alias mockcert -import -file cert-signed
* 7. add the certificate to the truststore:
* # keytool -keystore truststore.jks -alias mockcert -import -file ca-cert
* For our purpose, we only need the end result of this process: the keystore.jks and the
* truststore.jks files.
*
* @return conf The method returns the updated Configuration.
*/
public static Configuration setUpSecureConfig() {
return setUpSecureConfig(new Configuration(),
"src/test/java/org/apache/hadoop/util/curator" + "/resources/data");
}
public static Configuration setUpSecureConfig(Configuration conf, String testDataPath) {
System.setProperty("zookeeper.serverCnxnFactory",
NettyServerCnxnFactory.class.getCanonicalName());
System.setProperty("zookeeper.ssl.keyStore.location", testDataPath + "keystore.jks");
System.setProperty("zookeeper.ssl.keyStore.password", "password");
System.setProperty("zookeeper.ssl.trustStore.location", testDataPath + "truststore.jks");
System.setProperty("zookeeper.ssl.trustStore.password", "password");
System.setProperty("zookeeper.request.timeout", "12345");
System.setProperty("jute.maxbuffer", String.valueOf(JUTE_MAXBUFFER));
System.setProperty("javax.net.debug", "ssl");
System.setProperty("zookeeper.authProvider.x509",
"org.apache.zookeeper.server.auth.X509AuthenticationProvider");
conf.set(CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION,
testDataPath + "/ssl/keystore.jks");
conf.set(CommonConfigurationKeys.ZK_SSL_KEYSTORE_PASSWORD, "password");
conf.set(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION,
testDataPath + "/ssl/truststore.jks");
conf.set(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_PASSWORD, "password");
return conf;
}
@After
public void teardown() throws Exception {
this.curator.close();
if (this.server != null) {
this.server.close();
this.server = null;
}
}
@Test
public void testSecureZKConfiguration() throws Exception {
LOG.info("Entered to the testSecureZKConfiguration test case.");
// Validate that HadoopZooKeeperFactory will set ZKConfig with given principals
ZKCuratorManager.HadoopZookeeperFactory factory =
new ZKCuratorManager.HadoopZookeeperFactory(null, null, null, true,
new ZKCuratorManager.TruststoreKeystore(hadoopConf));
ZooKeeper zk = factory.newZooKeeper(this.server.getConnectString(), 1000, null, false);
validateSSLConfiguration(this.hadoopConf.get(CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION),
this.hadoopConf.get(CommonConfigurationKeys.ZK_SSL_KEYSTORE_PASSWORD),
this.hadoopConf.get(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION),
this.hadoopConf.get(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_PASSWORD), zk);
}
private void validateSSLConfiguration(String keystoreLocation, String keystorePassword,
String truststoreLocation, String truststorePassword, ZooKeeper zk) {
try (ClientX509Util x509Util = new ClientX509Util()) {
//testing if custom values are set properly
assertEquals("Validate that expected clientConfig is set in ZK config", keystoreLocation,
zk.getClientConfig().getProperty(x509Util.getSslKeystoreLocationProperty()));
assertEquals("Validate that expected clientConfig is set in ZK config", keystorePassword,
zk.getClientConfig().getProperty(x509Util.getSslKeystorePasswdProperty()));
assertEquals("Validate that expected clientConfig is set in ZK config", truststoreLocation,
zk.getClientConfig().getProperty(x509Util.getSslTruststoreLocationProperty()));
assertEquals("Validate that expected clientConfig is set in ZK config", truststorePassword,
zk.getClientConfig().getProperty(x509Util.getSslTruststorePasswdProperty()));
}
//testing if constant values hardcoded into the code are set properly
assertEquals("Validate that expected clientConfig is set in ZK config", Boolean.TRUE.toString(),
zk.getClientConfig().getProperty(ZKClientConfig.SECURE_CLIENT));
assertEquals("Validate that expected clientConfig is set in ZK config",
ClientCnxnSocketNetty.class.getCanonicalName(),
zk.getClientConfig().getProperty(ZKClientConfig.ZOOKEEPER_CLIENT_CNXN_SOCKET));
}
@Test
public void testTruststoreKeystoreConfiguration() {
LOG.info("Entered to the testTruststoreKeystoreConfiguration test case.");
/*
By default the truststore/keystore configurations are not set, hence the values are null.
Validate that the null values are converted into empty strings by the class.
*/
Configuration conf = new Configuration();
ZKCuratorManager.TruststoreKeystore truststoreKeystore =
new ZKCuratorManager.TruststoreKeystore(conf);
assertEquals("Validate that null value is converted to empty string.", "",
truststoreKeystore.getKeystoreLocation());
assertEquals("Validate that null value is converted to empty string.", "",
truststoreKeystore.getKeystorePassword());
assertEquals("Validate that null value is converted to empty string.", "",
truststoreKeystore.getTruststoreLocation());
assertEquals("Validate that null value is converted to empty string.", "",
truststoreKeystore.getTruststorePassword());
//Validate that non-null values will remain intact
conf.set(CommonConfigurationKeys.ZK_SSL_KEYSTORE_LOCATION, "/keystore.jks");
conf.set(CommonConfigurationKeys.ZK_SSL_KEYSTORE_PASSWORD, "keystorePassword");
conf.set(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_LOCATION, "/truststore.jks");
conf.set(CommonConfigurationKeys.ZK_SSL_TRUSTSTORE_PASSWORD, "truststorePassword");
ZKCuratorManager.TruststoreKeystore truststoreKeystore1 =
new ZKCuratorManager.TruststoreKeystore(conf);
assertEquals("Validate that non-null value kept intact.", "/keystore.jks",
truststoreKeystore1.getKeystoreLocation());
assertEquals("Validate that null value is converted to empty string.", "keystorePassword",
truststoreKeystore1.getKeystorePassword());
assertEquals("Validate that null value is converted to empty string.", "/truststore.jks",
truststoreKeystore1.getTruststoreLocation());
assertEquals("Validate that null value is converted to empty string.", "truststorePassword",
truststoreKeystore1.getTruststorePassword());
}
}

View File

@ -22,10 +22,8 @@ import java.io.Closeable;
import java.io.IOException;
import java.util.Iterator;
import java.util.NoSuchElementException;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.hadoop.util.Preconditions;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -39,7 +37,6 @@ import org.apache.hadoop.test.AbstractHadoopTestBase;
import static org.apache.hadoop.fs.statistics.IOStatisticAssertions.extractStatistics;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.apache.hadoop.util.functional.RemoteIterators.*;
import static org.apache.hadoop.util.functional.RemoteIterators.haltableRemoteIterator;
import static org.assertj.core.api.Assertions.assertThat;
/**
@ -290,44 +287,6 @@ public class TestRemoteIterators extends AbstractHadoopTestBase {
}
@Test
public void testHaltableIterator() throws Throwable {
final int limit = 4;
AtomicInteger count = new AtomicInteger(limit);
// a countdown of 10, but the halting predicate will fail earlier
// if the value of "count" has dropped to zero
final RemoteIterator<Long> it =
haltableRemoteIterator(
rangeExcludingIterator(0, 10),
() -> count.get() > 0);
verifyInvoked(it, limit, (v) -> count.decrementAndGet());
}
@Test
public void testHaltableIteratorNoHalt() throws Throwable {
// a countdown of 10, but the halting predicate will fail earlier
// if the value of "count" has dropped to zero
final int finish = 10;
final RemoteIterator<Long> it =
haltableRemoteIterator(
rangeExcludingIterator(0, finish),
() -> true);
verifyInvoked(it, finish);
}
@Test
public void testRangeExcludingIterator() throws Throwable {
verifyInvoked(rangeExcludingIterator(0, 0), 0);
verifyInvoked(rangeExcludingIterator(0, -1), 0);
verifyInvoked(rangeExcludingIterator(0, 100), 100);
intercept(NoSuchElementException.class, () ->
rangeExcludingIterator(0, 0).next());
}
/**
* assert that the string value of an object contains the
* expected text.
@ -368,19 +327,6 @@ public class TestRemoteIterators extends AbstractHadoopTestBase {
.isEqualTo(length);
}
/**
* Verify that the iteration completes with a given invocation count.
* @param it iterator
* @param <T> type.
* @param length expected size
*/
protected <T> void verifyInvoked(
final RemoteIterator<T> it,
final int length)
throws IOException {
verifyInvoked(it, length, (t) -> { });
}
/**
* Close an iterator if it is iterable.
* @param it iterator

View File

@ -32,6 +32,7 @@ import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
import org.apache.commons.cli.GnuParser;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.OptionBuilder;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.ParseException;
import org.apache.hadoop.conf.Configuration;
@ -245,29 +246,29 @@ public class RegistryCli extends Configured implements Tool, Closeable {
}
public int bind(String[] args) {
Option rest = Option.builder("rest").argName("rest")
Option rest = OptionBuilder.withArgName("rest")
.hasArg()
.desc("rest Option")
.build();
Option webui = Option.builder("webui").argName("webui")
.withDescription("rest Option")
.create("rest");
Option webui = OptionBuilder.withArgName("webui")
.hasArg()
.desc("webui Option")
.build();
Option inet = Option.builder("inet").argName("inet")
.desc("inet Option")
.build();
Option port = Option.builder("p").argName("port")
.withDescription("webui Option")
.create("webui");
Option inet = OptionBuilder.withArgName("inet")
.withDescription("inet Option")
.create("inet");
Option port = OptionBuilder.withArgName("port")
.hasArg()
.desc("port to listen on [9999]")
.build();
Option host = Option.builder("h").argName("host")
.withDescription("port to listen on [9999]")
.create("p");
Option host = OptionBuilder.withArgName("host")
.hasArg()
.desc("host name")
.build();
Option apiOpt = Option.builder("api").argName("api")
.withDescription("host name")
.create("h");
Option apiOpt = OptionBuilder.withArgName("api")
.hasArg()
.desc("api")
.build();
.withDescription("api")
.create("api");
Options inetOption = new Options();
inetOption.addOption(inet);
inetOption.addOption(port);
@ -411,9 +412,9 @@ public class RegistryCli extends Configured implements Tool, Closeable {
@SuppressWarnings("unchecked")
public int rm(String[] args) {
Option recursive = Option.builder("r").argName("recursive")
.desc("delete recursively")
.build();
Option recursive = OptionBuilder.withArgName("recursive")
.withDescription("delete recursively")
.create("r");
Options rmOption = new Options();
rmOption.addOption(recursive);

View File

@ -349,8 +349,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
if (numResponseToDrop > 0) {
// This case is used for testing.
LOG.warn("{} is set to {} , this hacked client will proactively drop responses",
DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, numResponseToDrop);
LOG.warn(DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
+ " is set to " + numResponseToDrop
+ ", this hacked client will proactively drop responses");
proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
nameNodeUri, ClientProtocol.class, numResponseToDrop,
nnFallbackToSimpleAuth);
@ -377,9 +378,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
conf.getTrimmedStrings(DFS_CLIENT_LOCAL_INTERFACES);
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
LOG.debug("Using local interfaces [{}] with addresses [{}]",
Joiner.on(',').join(localInterfaces),
Joiner.on(',').join(localInterfaceAddrs));
LOG.debug("Using local interfaces [" +
Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
Joiner.on(',').join(localInterfaceAddrs) + "]");
}
Boolean readDropBehind =
@ -622,9 +623,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
// Abort if the lease has already expired.
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
if (elapsed > dfsClientConf.getleaseHardLimitPeriod()) {
LOG.warn("Failed to renew lease for {} for {} seconds (>= hard-limit ={} seconds.) "
+ "Closing all files being written ...", clientName, (elapsed/1000),
(dfsClientConf.getleaseHardLimitPeriod() / 1000), e);
LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= hard-limit ="
+ (dfsClientConf.getleaseHardLimitPeriod() / 1000) + " seconds.) "
+ "Closing all files being written ...", e);
closeAllFilesBeingWritten(true);
} else {
// Let the lease renewer handle it and retry.
@ -662,8 +664,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
out.close();
}
} catch(IOException ie) {
LOG.error("Failed to {} file: {} with renewLeaseKey: {}",
(abort ? "abort" : "close"), out.getSrc(), key, ie);
LOG.error("Failed to " + (abort ? "abort" : "close") + " file: "
+ out.getSrc() + " with renewLeaseKey: " + key, ie);
}
}
}
@ -755,9 +757,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
namenode.getDelegationToken(renewer);
if (token != null) {
token.setService(this.dtService);
LOG.info("Created {}", DelegationTokenIdentifier.stringifyToken(token));
LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
} else {
LOG.info("Cannot get delegation token from {}", renewer);
LOG.info("Cannot get delegation token from " + renewer);
}
return token;
}
@ -773,7 +775,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
@Deprecated
public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
LOG.info("Renewing {}", DelegationTokenIdentifier.stringifyToken(token));
LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
try {
return token.renew(conf);
} catch (InterruptedException ie) {
@ -793,7 +795,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
@Deprecated
public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
throws IOException {
LOG.info("Cancelling {}", DelegationTokenIdentifier.stringifyToken(token));
LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
try {
token.cancel(conf);
} catch (InterruptedException ie) {
@ -837,7 +839,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void cancel(Token<?> token, Configuration conf) throws IOException {
Token<DelegationTokenIdentifier> delToken =
(Token<DelegationTokenIdentifier>) token;
LOG.info("Cancelling {}", DelegationTokenIdentifier.stringifyToken(delToken));
LOG.info("Cancelling " +
DelegationTokenIdentifier.stringifyToken(delToken));
ClientProtocol nn = getNNProxy(delToken, conf);
try {
nn.cancelDelegationToken(delToken);
@ -2053,7 +2056,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
public static long getStateAtIndex(long[] states, int index) {
private long getStateAtIndex(long[] states, int index) {
return states.length > index ? states[index] : -1;
}
@ -2706,8 +2709,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
try {
reportBadBlocks(lblocks);
} catch (IOException ie) {
LOG.info("Found corruption while reading {}"
+ ". Error repairing corrupt blocks. Bad blocks remain.", file, ie);
LOG.info("Found corruption while reading " + file
+ ". Error repairing corrupt blocks. Bad blocks remain.", ie);
}
}
@ -3087,14 +3090,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
void updateFileSystemReadStats(int distance, int readBytes, long readTimeMS) {
void updateFileSystemReadStats(int distance, int nRead) {
if (stats != null) {
stats.incrementBytesRead(readBytes);
stats.incrementBytesReadByDistance(distance, readBytes);
if (distance > 0) {
//remote read
stats.increaseRemoteReadTime(readTimeMS);
}
stats.incrementBytesRead(nRead);
stats.incrementBytesReadByDistance(distance, nRead);
}
}

View File

@ -851,9 +851,8 @@ public class DFSInputStream extends FSInputStream
locatedBlocks.getFileLength() - pos);
}
}
long beginReadMS = Time.monotonicNow();
int result = readBuffer(strategy, realLen, corruptedBlocks);
long readTimeMS = Time.monotonicNow() - beginReadMS;
if (result >= 0) {
pos += result;
} else {
@ -862,7 +861,7 @@ public class DFSInputStream extends FSInputStream
}
updateReadStatistics(readStatistics, result, blockReader);
dfsClient.updateFileSystemReadStats(blockReader.getNetworkDistance(),
result, readTimeMS);
result);
if (readStatistics.getBlockType() == BlockType.STRIPED) {
dfsClient.updateFileSystemECReadStats(result);
}
@ -1185,7 +1184,6 @@ public class DFSInputStream extends FSInputStream
ByteBuffer tmp = buf.duplicate();
tmp.limit(tmp.position() + len);
tmp = tmp.slice();
long beginReadMS = Time.monotonicNow();
int nread = 0;
int ret;
while (true) {
@ -1195,12 +1193,11 @@ public class DFSInputStream extends FSInputStream
}
nread += ret;
}
long readTimeMS = Time.monotonicNow() - beginReadMS;
buf.position(buf.position() + nread);
IOUtilsClient.updateReadStatistics(readStatistics, nread, reader);
dfsClient.updateFileSystemReadStats(
reader.getNetworkDistance(), nread, readTimeMS);
reader.getNetworkDistance(), nread);
if (readStatistics.getBlockType() == BlockType.STRIPED) {
dfsClient.updateFileSystemECReadStats(nread);
}

View File

@ -331,17 +331,15 @@ public class DFSStripedInputStream extends DFSInputStream {
* its ThreadLocal.
*
* @param stats striped read stats
* @param readTimeMS read time metrics in ms
*
*/
void updateReadStats(final StripedBlockUtil.BlockReadStats stats, long readTimeMS) {
void updateReadStats(final StripedBlockUtil.BlockReadStats stats) {
if (stats == null) {
return;
}
updateReadStatistics(readStatistics, stats.getBytesRead(),
stats.isShortCircuit(), stats.getNetworkDistance());
dfsClient.updateFileSystemReadStats(stats.getNetworkDistance(),
stats.getBytesRead(), readTimeMS);
stats.getBytesRead());
assert readStatistics.getBlockType() == BlockType.STRIPED;
dfsClient.updateFileSystemECReadStats(stats.getBytesRead());
}

Some files were not shown because too many files have changed in this diff Show More