Merge branch 'trunk' into HDFS-16792
This commit is contained in:
commit
61a3bd71c5
|
@ -0,0 +1,17 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
dev-support/docker/Dockerfile_windows_10
|
113
BUILDING.txt
113
BUILDING.txt
|
@ -492,39 +492,66 @@ Building on CentOS 8
|
||||||
|
|
||||||
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------
|
||||||
|
|
||||||
Building on Windows
|
Building on Windows 10
|
||||||
|
|
||||||
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------
|
||||||
Requirements:
|
Requirements:
|
||||||
|
|
||||||
* Windows System
|
* Windows 10
|
||||||
* JDK 1.8
|
* JDK 1.8
|
||||||
* Maven 3.0 or later
|
* Maven 3.0 or later (maven.apache.org)
|
||||||
* Boost 1.72
|
* Boost 1.72 (boost.org)
|
||||||
* Protocol Buffers 3.7.1
|
* Protocol Buffers 3.7.1 (https://github.com/protocolbuffers/protobuf/releases)
|
||||||
* CMake 3.19 or newer
|
* CMake 3.19 or newer (cmake.org)
|
||||||
* Visual Studio 2010 Professional or Higher
|
* Visual Studio 2019 (visualstudio.com)
|
||||||
* Windows SDK 8.1 (if building CPU rate control for the container executor)
|
* Windows SDK 8.1 (optional, if building CPU rate control for the container executor. Get this from
|
||||||
* zlib headers (if building native code bindings for zlib)
|
http://msdn.microsoft.com/en-us/windows/bg162891.aspx)
|
||||||
|
* Zlib (zlib.net, if building native code bindings for zlib)
|
||||||
|
* Git (preferably, get this from https://git-scm.com/download/win since the package also contains
|
||||||
|
Unix command-line tools that are needed during packaging).
|
||||||
|
* Python (python.org, for generation of docs using 'mvn site')
|
||||||
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
|
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
|
||||||
* Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
|
|
||||||
tools must be present on your PATH.
|
|
||||||
* Python ( for generation of docs using 'mvn site')
|
|
||||||
|
|
||||||
Unix command-line tools are also included with the Windows Git package which
|
|
||||||
can be downloaded from http://git-scm.com/downloads
|
|
||||||
|
|
||||||
If using Visual Studio, it must be Professional level or higher.
|
|
||||||
Do not use Visual Studio Express. It does not support compiling for 64-bit,
|
|
||||||
which is problematic if running a 64-bit system.
|
|
||||||
|
|
||||||
The Windows SDK 8.1 is available to download at:
|
|
||||||
|
|
||||||
http://msdn.microsoft.com/en-us/windows/bg162891.aspx
|
|
||||||
|
|
||||||
Cygwin is not required.
|
|
||||||
|
|
||||||
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
Building guidelines:
|
||||||
|
|
||||||
|
Hadoop repository provides the Dockerfile for building Hadoop on Windows 10, located at
|
||||||
|
dev-support/docker/Dockerfile_windows_10. It is highly recommended to use this and create the
|
||||||
|
Docker image for building Hadoop on Windows 10, since you don't have to install anything else
|
||||||
|
other than Docker and no additional steps are required in terms of aligning the environment with
|
||||||
|
the necessary paths etc.
|
||||||
|
|
||||||
|
However, if you still prefer taking the route of not using Docker, this Dockerfile_windows_10 will
|
||||||
|
still be immensely useful as a raw guide for all the steps involved in creating the environment
|
||||||
|
needed to build Hadoop on Windows 10.
|
||||||
|
|
||||||
|
Building using the Docker:
|
||||||
|
We first need to build the Docker image for building Hadoop on Windows 10. Run this command from
|
||||||
|
the root of the Hadoop repository.
|
||||||
|
> docker build -t hadoop-windows-10-builder -f .\dev-support\docker\Dockerfile_windows_10 .\dev-support\docker\
|
||||||
|
|
||||||
|
Start the container with the image that we just built.
|
||||||
|
> docker run --rm -it hadoop-windows-10-builder
|
||||||
|
|
||||||
|
You can now clone the Hadoop repo inside this container and proceed with the build.
|
||||||
|
|
||||||
|
NOTE:
|
||||||
|
While one may perceive the idea of mounting the locally cloned (on the host filesystem) Hadoop
|
||||||
|
repository into the container (using the -v option), we have seen the build to fail owing to some
|
||||||
|
files not being able to be located by Maven. Thus, we suggest cloning the Hadoop repository to a
|
||||||
|
non-mounted folder inside the container and proceed with the build. When the build is completed,
|
||||||
|
you may use the "docker cp" command to copy the built Hadoop tar.gz file from the docker container
|
||||||
|
to the host filesystem. If you still would like to mount the Hadoop codebase, a workaround would
|
||||||
|
be to copy the mounted Hadoop codebase into another folder (which doesn't point to a mount) in the
|
||||||
|
container's filesystem and use this for building.
|
||||||
|
|
||||||
|
However, we noticed no build issues when the Maven repository from the host filesystem was mounted
|
||||||
|
into the container. One may use this to greatly reduce the build time. Assuming that the Maven
|
||||||
|
repository is located at D:\Maven\Repository in the host filesystem, one can use the following
|
||||||
|
command to mount the same onto the default Maven repository location while launching the container.
|
||||||
|
> docker run --rm -v D:\Maven\Repository:C:\Users\ContainerAdministrator\.m2\repository -it hadoop-windows-10-builder
|
||||||
|
|
||||||
Building:
|
Building:
|
||||||
|
|
||||||
Keep the source code tree in a short path to avoid running into problems related
|
Keep the source code tree in a short path to avoid running into problems related
|
||||||
|
@ -540,6 +567,24 @@ configure the bit-ness of the build, and set several optional components.
|
||||||
Several tests require that the user must have the Create Symbolic Links
|
Several tests require that the user must have the Create Symbolic Links
|
||||||
privilege.
|
privilege.
|
||||||
|
|
||||||
|
To simplify the installation of Boost, Protocol buffers, OpenSSL and Zlib dependencies we can use
|
||||||
|
vcpkg (https://github.com/Microsoft/vcpkg.git). Upon cloning the vcpkg repo, checkout the commit
|
||||||
|
7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d to get the required versions of the dependencies
|
||||||
|
mentioned above.
|
||||||
|
> git clone https://github.com/Microsoft/vcpkg.git
|
||||||
|
> cd vcpkg
|
||||||
|
> git checkout 7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d
|
||||||
|
> .\bootstrap-vcpkg.bat
|
||||||
|
> .\vcpkg.exe install boost:x64-windows
|
||||||
|
> .\vcpkg.exe install protobuf:x64-windows
|
||||||
|
> .\vcpkg.exe install openssl:x64-windows
|
||||||
|
> .\vcpkg.exe install zlib:x64-windows
|
||||||
|
|
||||||
|
Set the following environment variables -
|
||||||
|
(Assuming that vcpkg was checked out at C:\vcpkg)
|
||||||
|
> set PROTOBUF_HOME=C:\vcpkg\installed\x64-windows
|
||||||
|
> set MAVEN_OPTS=-Xmx2048M -Xss128M
|
||||||
|
|
||||||
All Maven goals are the same as described above with the exception that
|
All Maven goals are the same as described above with the exception that
|
||||||
native code is built by enabling the 'native-win' Maven profile. -Pnative-win
|
native code is built by enabling the 'native-win' Maven profile. -Pnative-win
|
||||||
is enabled by default when building on Windows since the native components
|
is enabled by default when building on Windows since the native components
|
||||||
|
@ -557,6 +602,24 @@ the zlib 1.2.7 source tree.
|
||||||
|
|
||||||
http://www.zlib.net/
|
http://www.zlib.net/
|
||||||
|
|
||||||
|
|
||||||
|
Build command:
|
||||||
|
The following command builds all the modules in the Hadoop project and generates the tar.gz file in
|
||||||
|
hadoop-dist/target upon successful build. Run these commands from an
|
||||||
|
"x64 Native Tools Command Prompt for VS 2019" which can be found under "Visual Studio 2019" in the
|
||||||
|
Windows start menu. If you're using the Docker image from Dockerfile_windows_10, you'll be
|
||||||
|
logged into "x64 Native Tools Command Prompt for VS 2019" automatically when you start the
|
||||||
|
container.
|
||||||
|
|
||||||
|
> set classpath=
|
||||||
|
> set PROTOBUF_HOME=C:\vcpkg\installed\x64-windows
|
||||||
|
> mvn clean package -Dhttps.protocols=TLSv1.2 -DskipTests -DskipDocs -Pnative-win,dist^
|
||||||
|
-Drequire.openssl -Drequire.test.libhadoop -Pyarn-ui -Dshell-executable=C:\Git\bin\bash.exe^
|
||||||
|
-Dtar -Dopenssl.prefix=C:\vcpkg\installed\x64-windows^
|
||||||
|
-Dcmake.prefix.path=C:\vcpkg\installed\x64-windows^
|
||||||
|
-Dwindows.cmake.toolchain.file=C:\vcpkg\scripts\buildsystems\vcpkg.cmake -Dwindows.cmake.build.type=RelWithDebInfo^
|
||||||
|
-Dwindows.build.hdfspp.dll=off -Dwindows.no.sasl=on -Duse.platformToolsetVersion=v142
|
||||||
|
|
||||||
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------
|
||||||
Building distributions:
|
Building distributions:
|
||||||
|
|
||||||
|
|
|
@ -215,17 +215,17 @@ com.aliyun:aliyun-java-sdk-ecs:4.2.0
|
||||||
com.aliyun:aliyun-java-sdk-ram:3.0.0
|
com.aliyun:aliyun-java-sdk-ram:3.0.0
|
||||||
com.aliyun:aliyun-java-sdk-sts:3.0.0
|
com.aliyun:aliyun-java-sdk-sts:3.0.0
|
||||||
com.aliyun.oss:aliyun-sdk-oss:3.13.2
|
com.aliyun.oss:aliyun-sdk-oss:3.13.2
|
||||||
com.amazonaws:aws-java-sdk-bundle:1.12.262
|
com.amazonaws:aws-java-sdk-bundle:1.12.316
|
||||||
com.cedarsoftware:java-util:1.9.0
|
com.cedarsoftware:java-util:1.9.0
|
||||||
com.cedarsoftware:json-io:2.5.1
|
com.cedarsoftware:json-io:2.5.1
|
||||||
com.fasterxml.jackson.core:jackson-annotations:2.12.7
|
com.fasterxml.jackson.core:jackson-annotations:2.12.7
|
||||||
com.fasterxml.jackson.core:jackson-core:2.12.7
|
com.fasterxml.jackson.core:jackson-core:2.12.7
|
||||||
com.fasterxml.jackson.core:jackson-databind:2.12.7
|
com.fasterxml.jackson.core:jackson-databind:2.12.7.1
|
||||||
com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.12.7
|
com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.12.7
|
||||||
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.12.7
|
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.12.7
|
||||||
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.12.7
|
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.12.7
|
||||||
com.fasterxml.uuid:java-uuid-generator:3.1.4
|
com.fasterxml.uuid:java-uuid-generator:3.1.4
|
||||||
com.fasterxml.woodstox:woodstox-core:5.3.0
|
com.fasterxml.woodstox:woodstox-core:5.4.0
|
||||||
com.github.davidmoten:rxjava-extras:0.8.0.17
|
com.github.davidmoten:rxjava-extras:0.8.0.17
|
||||||
com.github.stephenc.jcip:jcip-annotations:1.0-1
|
com.github.stephenc.jcip:jcip-annotations:1.0-1
|
||||||
com.google:guice:4.0
|
com.google:guice:4.0
|
||||||
|
@ -241,17 +241,17 @@ com.google.guava:guava:27.0-jre
|
||||||
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
||||||
com.microsoft.azure:azure-storage:7.0.0
|
com.microsoft.azure:azure-storage:7.0.0
|
||||||
com.nimbusds:nimbus-jose-jwt:9.8.1
|
com.nimbusds:nimbus-jose-jwt:9.8.1
|
||||||
com.squareup.okhttp3:okhttp:4.9.3
|
com.squareup.okhttp3:okhttp:4.10.0
|
||||||
com.squareup.okio:okio:1.6.0
|
com.squareup.okio:okio:3.2.0
|
||||||
com.zaxxer:HikariCP:4.0.3
|
com.zaxxer:HikariCP:4.0.3
|
||||||
commons-beanutils:commons-beanutils:1.9.3
|
commons-beanutils:commons-beanutils:1.9.4
|
||||||
commons-cli:commons-cli:1.2
|
commons-cli:commons-cli:1.2
|
||||||
commons-codec:commons-codec:1.11
|
commons-codec:commons-codec:1.11
|
||||||
commons-collections:commons-collections:3.2.2
|
commons-collections:commons-collections:3.2.2
|
||||||
commons-daemon:commons-daemon:1.0.13
|
commons-daemon:commons-daemon:1.0.13
|
||||||
commons-io:commons-io:2.8.0
|
commons-io:commons-io:2.8.0
|
||||||
commons-logging:commons-logging:1.1.3
|
commons-logging:commons-logging:1.1.3
|
||||||
commons-net:commons-net:3.8.0
|
commons-net:commons-net:3.9.0
|
||||||
de.ruedigermoeller:fst:2.50
|
de.ruedigermoeller:fst:2.50
|
||||||
io.grpc:grpc-api:1.26.0
|
io.grpc:grpc-api:1.26.0
|
||||||
io.grpc:grpc-context:1.26.0
|
io.grpc:grpc-context:1.26.0
|
||||||
|
@ -260,7 +260,6 @@ io.grpc:grpc-netty:1.26.0
|
||||||
io.grpc:grpc-protobuf:1.26.0
|
io.grpc:grpc-protobuf:1.26.0
|
||||||
io.grpc:grpc-protobuf-lite:1.26.0
|
io.grpc:grpc-protobuf-lite:1.26.0
|
||||||
io.grpc:grpc-stub:1.26.0
|
io.grpc:grpc-stub:1.26.0
|
||||||
io.netty:netty:3.10.6.Final
|
|
||||||
io.netty:netty-all:4.1.77.Final
|
io.netty:netty-all:4.1.77.Final
|
||||||
io.netty:netty-buffer:4.1.77.Final
|
io.netty:netty-buffer:4.1.77.Final
|
||||||
io.netty:netty-codec:4.1.77.Final
|
io.netty:netty-codec:4.1.77.Final
|
||||||
|
@ -306,11 +305,11 @@ org.apache.avro:avro:1.9.2
|
||||||
org.apache.commons:commons-collections4:4.2
|
org.apache.commons:commons-collections4:4.2
|
||||||
org.apache.commons:commons-compress:1.21
|
org.apache.commons:commons-compress:1.21
|
||||||
org.apache.commons:commons-configuration2:2.8.0
|
org.apache.commons:commons-configuration2:2.8.0
|
||||||
org.apache.commons:commons-csv:1.0
|
org.apache.commons:commons-csv:1.9.0
|
||||||
org.apache.commons:commons-digester:1.8.1
|
org.apache.commons:commons-digester:1.8.1
|
||||||
org.apache.commons:commons-lang3:3.12.0
|
org.apache.commons:commons-lang3:3.12.0
|
||||||
org.apache.commons:commons-math3:3.6.1
|
org.apache.commons:commons-math3:3.6.1
|
||||||
org.apache.commons:commons-text:1.9
|
org.apache.commons:commons-text:1.10.0
|
||||||
org.apache.commons:commons-validator:1.6
|
org.apache.commons:commons-validator:1.6
|
||||||
org.apache.curator:curator-client:5.2.0
|
org.apache.curator:curator-client:5.2.0
|
||||||
org.apache.curator:curator-framework:5.2.0
|
org.apache.curator:curator-framework:5.2.0
|
||||||
|
@ -324,7 +323,7 @@ org.apache.htrace:htrace-core:3.1.0-incubating
|
||||||
org.apache.htrace:htrace-core4:4.1.0-incubating
|
org.apache.htrace:htrace-core4:4.1.0-incubating
|
||||||
org.apache.httpcomponents:httpclient:4.5.6
|
org.apache.httpcomponents:httpclient:4.5.6
|
||||||
org.apache.httpcomponents:httpcore:4.4.10
|
org.apache.httpcomponents:httpcore:4.4.10
|
||||||
org.apache.kafka:kafka-clients:2.8.1
|
org.apache.kafka:kafka-clients:2.8.2
|
||||||
org.apache.kerby:kerb-admin:2.0.2
|
org.apache.kerby:kerb-admin:2.0.2
|
||||||
org.apache.kerby:kerb-client:2.0.2
|
org.apache.kerby:kerb-client:2.0.2
|
||||||
org.apache.kerby:kerb-common:2.0.2
|
org.apache.kerby:kerb-common:2.0.2
|
||||||
|
@ -343,7 +342,7 @@ org.apache.kerby:token-provider:2.0.2
|
||||||
org.apache.solr:solr-solrj:8.8.2
|
org.apache.solr:solr-solrj:8.8.2
|
||||||
org.apache.yetus:audience-annotations:0.5.0
|
org.apache.yetus:audience-annotations:0.5.0
|
||||||
org.apache.zookeeper:zookeeper:3.6.3
|
org.apache.zookeeper:zookeeper:3.6.3
|
||||||
org.codehaus.jettison:jettison:1.1
|
org.codehaus.jettison:jettison:1.5.3
|
||||||
org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
|
org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
|
||||||
org.eclipse.jetty:jetty-http:9.4.48.v20220622
|
org.eclipse.jetty:jetty-http:9.4.48.v20220622
|
||||||
org.eclipse.jetty:jetty-io:9.4.48.v20220622
|
org.eclipse.jetty:jetty-io:9.4.48.v20220622
|
||||||
|
@ -362,8 +361,8 @@ org.ehcache:ehcache:3.3.1
|
||||||
org.lz4:lz4-java:1.7.1
|
org.lz4:lz4-java:1.7.1
|
||||||
org.objenesis:objenesis:2.6
|
org.objenesis:objenesis:2.6
|
||||||
org.xerial.snappy:snappy-java:1.0.5
|
org.xerial.snappy:snappy-java:1.0.5
|
||||||
org.yaml:snakeyaml:1.32
|
org.yaml:snakeyaml:1.33
|
||||||
org.wildfly.openssl:wildfly-openssl:1.0.7.Final
|
org.wildfly.openssl:wildfly-openssl:1.1.3.Final
|
||||||
|
|
||||||
|
|
||||||
--------------------------------------------------------------------------------
|
--------------------------------------------------------------------------------
|
||||||
|
@ -427,7 +426,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
||||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/*
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.11.5/*
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
||||||
|
@ -435,7 +434,7 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanage
|
||||||
bootstrap v3.3.6
|
bootstrap v3.3.6
|
||||||
broccoli-asset-rev v2.4.2
|
broccoli-asset-rev v2.4.2
|
||||||
broccoli-funnel v1.0.1
|
broccoli-funnel v1.0.1
|
||||||
datatables v1.10.19
|
datatables v1.11.5
|
||||||
em-helpers v0.5.13
|
em-helpers v0.5.13
|
||||||
em-table v0.1.6
|
em-table v0.1.6
|
||||||
ember v2.2.0
|
ember v2.2.0
|
||||||
|
@ -523,7 +522,7 @@ junit:junit:4.13.2
|
||||||
HSQL License
|
HSQL License
|
||||||
------------
|
------------
|
||||||
|
|
||||||
org.hsqldb:hsqldb:2.5.2
|
org.hsqldb:hsqldb:2.7.1
|
||||||
|
|
||||||
|
|
||||||
JDOM License
|
JDOM License
|
||||||
|
|
|
@ -252,7 +252,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
||||||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/*
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.11.5/*
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
||||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
||||||
|
|
|
@ -0,0 +1,81 @@
|
||||||
|
# Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
# or more contributor license agreements. See the NOTICE file
|
||||||
|
# distributed with this work for additional information
|
||||||
|
# regarding copyright ownership. The ASF licenses this file
|
||||||
|
# to you under the Apache License, Version 2.0 (the
|
||||||
|
# "License"); you may not use this file except in compliance
|
||||||
|
# with the License. You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# Dockerfile for installing the necessary dependencies for building Hadoop.
|
||||||
|
# See BUILDING.txt.
|
||||||
|
|
||||||
|
FROM mcr.microsoft.com/windows:ltsc2019
|
||||||
|
|
||||||
|
# Need to disable the progress bar for speeding up the downloads.
|
||||||
|
# hadolint ignore=SC2086
|
||||||
|
RUN powershell $Global:ProgressPreference = 'SilentlyContinue'
|
||||||
|
|
||||||
|
# Restore the default Windows shell for correct batch processing.
|
||||||
|
SHELL ["cmd", "/S", "/C"]
|
||||||
|
|
||||||
|
# Install Visual Studio 2019 Build Tools.
|
||||||
|
RUN curl -SL --output vs_buildtools.exe https://aka.ms/vs/16/release/vs_buildtools.exe \
|
||||||
|
&& (start /w vs_buildtools.exe --quiet --wait --norestart --nocache \
|
||||||
|
--installPath "%ProgramFiles(x86)%\Microsoft Visual Studio\2019\BuildTools" \
|
||||||
|
--add Microsoft.VisualStudio.Workload.VCTools \
|
||||||
|
--add Microsoft.VisualStudio.Component.VC.ASAN \
|
||||||
|
--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 \
|
||||||
|
--add Microsoft.VisualStudio.Component.Windows10SDK.19041 \
|
||||||
|
|| IF "%ERRORLEVEL%"=="3010" EXIT 0) \
|
||||||
|
&& del /q vs_buildtools.exe
|
||||||
|
|
||||||
|
# Install Chocolatey.
|
||||||
|
RUN powershell -NoProfile -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))"
|
||||||
|
RUN setx PATH "%PATH%;%ALLUSERSPROFILE%\chocolatey\bin"
|
||||||
|
|
||||||
|
# Install git.
|
||||||
|
RUN choco install git.install -y
|
||||||
|
RUN powershell Copy-Item -Recurse -Path 'C:\Program Files\Git' -Destination C:\Git
|
||||||
|
|
||||||
|
# Install vcpkg.
|
||||||
|
# hadolint ignore=DL3003
|
||||||
|
RUN powershell git clone https://github.com/microsoft/vcpkg.git \
|
||||||
|
&& cd vcpkg \
|
||||||
|
&& git checkout 7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d \
|
||||||
|
&& .\bootstrap-vcpkg.bat
|
||||||
|
RUN powershell .\vcpkg\vcpkg.exe install boost:x64-windows
|
||||||
|
RUN powershell .\vcpkg\vcpkg.exe install protobuf:x64-windows
|
||||||
|
RUN powershell .\vcpkg\vcpkg.exe install openssl:x64-windows
|
||||||
|
RUN powershell .\vcpkg\vcpkg.exe install zlib:x64-windows
|
||||||
|
ENV PROTOBUF_HOME "C:\vcpkg\installed\x64-windows"
|
||||||
|
|
||||||
|
# Install Azul Java 8 JDK.
|
||||||
|
RUN powershell Invoke-WebRequest -URI https://cdn.azul.com/zulu/bin/zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip -OutFile $Env:TEMP\zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip
|
||||||
|
RUN powershell Expand-Archive -Path $Env:TEMP\zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip -DestinationPath "C:\Java"
|
||||||
|
ENV JAVA_HOME "C:\Java\zulu8.62.0.19-ca-jdk8.0.332-win_x64"
|
||||||
|
RUN setx PATH "%PATH%;%JAVA_HOME%\bin"
|
||||||
|
|
||||||
|
# Install Apache Maven.
|
||||||
|
RUN powershell Invoke-WebRequest -URI https://dlcdn.apache.org/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.zip -OutFile $Env:TEMP\apache-maven-3.8.6-bin.zip
|
||||||
|
RUN powershell Expand-Archive -Path $Env:TEMP\apache-maven-3.8.6-bin.zip -DestinationPath "C:\Maven"
|
||||||
|
RUN setx PATH "%PATH%;C:\Maven\apache-maven-3.8.6\bin"
|
||||||
|
ENV MAVEN_OPTS '-Xmx2048M -Xss128M'
|
||||||
|
|
||||||
|
# Install CMake 3.19.0.
|
||||||
|
RUN powershell Invoke-WebRequest -URI https://cmake.org/files/v3.19/cmake-3.19.0-win64-x64.zip -OutFile $Env:TEMP\cmake-3.19.0-win64-x64.zip
|
||||||
|
RUN powershell Expand-Archive -Path $Env:TEMP\cmake-3.19.0-win64-x64.zip -DestinationPath "C:\CMake"
|
||||||
|
RUN setx PATH "%PATH%;C:\CMake\cmake-3.19.0-win64-x64\bin"
|
||||||
|
|
||||||
|
# We get strange Javadoc errors without this.
|
||||||
|
RUN setx classpath ""
|
||||||
|
|
||||||
|
# Define the entry point for the docker container.
|
||||||
|
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\VC\\Auxiliary\\Build\\vcvars64.bat", "&&", "cmd.exe"]
|
|
@ -148,6 +148,7 @@
|
||||||
<!-- Leave javax APIs that are stable -->
|
<!-- Leave javax APIs that are stable -->
|
||||||
<!-- the jdk ships part of the javax.annotation namespace, so if we want to relocate this we'll have to care it out by class :( -->
|
<!-- the jdk ships part of the javax.annotation namespace, so if we want to relocate this we'll have to care it out by class :( -->
|
||||||
<exclude>com.google.code.findbugs:jsr305</exclude>
|
<exclude>com.google.code.findbugs:jsr305</exclude>
|
||||||
|
<exclude>io.netty:*</exclude>
|
||||||
<exclude>io.dropwizard.metrics:metrics-core</exclude>
|
<exclude>io.dropwizard.metrics:metrics-core</exclude>
|
||||||
<exclude>org.eclipse.jetty:jetty-servlet</exclude>
|
<exclude>org.eclipse.jetty:jetty-servlet</exclude>
|
||||||
<exclude>org.eclipse.jetty:jetty-security</exclude>
|
<exclude>org.eclipse.jetty:jetty-security</exclude>
|
||||||
|
@ -156,6 +157,8 @@
|
||||||
<exclude>org.bouncycastle:*</exclude>
|
<exclude>org.bouncycastle:*</exclude>
|
||||||
<!-- Leave snappy that includes native methods which cannot be relocated. -->
|
<!-- Leave snappy that includes native methods which cannot be relocated. -->
|
||||||
<exclude>org.xerial.snappy:*</exclude>
|
<exclude>org.xerial.snappy:*</exclude>
|
||||||
|
<!-- leave out kotlin classes -->
|
||||||
|
<exclude>org.jetbrains.kotlin:*</exclude>
|
||||||
</excludes>
|
</excludes>
|
||||||
</artifactSet>
|
</artifactSet>
|
||||||
<filters>
|
<filters>
|
||||||
|
|
|
@ -127,11 +127,6 @@
|
||||||
<artifactId>hadoop-azure-datalake</artifactId>
|
<artifactId>hadoop-azure-datalake</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>org.apache.hadoop</groupId>
|
|
||||||
<artifactId>hadoop-openstack</artifactId>
|
|
||||||
<scope>compile</scope>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-cos</artifactId>
|
<artifactId>hadoop-cos</artifactId>
|
||||||
|
|
|
@ -18,6 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import java.security.AccessController;
|
||||||
|
import java.security.PrivilegedAction;
|
||||||
|
import java.util.Arrays;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
@ -33,10 +37,10 @@ public class PlatformName {
|
||||||
* per the java-vm.
|
* per the java-vm.
|
||||||
*/
|
*/
|
||||||
public static final String PLATFORM_NAME =
|
public static final String PLATFORM_NAME =
|
||||||
(System.getProperty("os.name").startsWith("Windows")
|
(System.getProperty("os.name").startsWith("Windows") ?
|
||||||
? System.getenv("os") : System.getProperty("os.name"))
|
System.getenv("os") : System.getProperty("os.name"))
|
||||||
+ "-" + System.getProperty("os.arch")
|
+ "-" + System.getProperty("os.arch") + "-"
|
||||||
+ "-" + System.getProperty("sun.arch.data.model");
|
+ System.getProperty("sun.arch.data.model");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The java vendor name used in this platform.
|
* The java vendor name used in this platform.
|
||||||
|
@ -44,10 +48,60 @@ public class PlatformName {
|
||||||
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
|
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A public static variable to indicate the current java vendor is
|
* Define a system class accessor that is open to changes in underlying implementations
|
||||||
* IBM java or not.
|
* of the system class loader modules.
|
||||||
*/
|
*/
|
||||||
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
|
private static final class SystemClassAccessor extends ClassLoader {
|
||||||
|
public Class<?> getSystemClass(String className) throws ClassNotFoundException {
|
||||||
|
return findSystemClass(className);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A public static variable to indicate the current java vendor is
|
||||||
|
* IBM and the type is Java Technology Edition which provides its
|
||||||
|
* own implementations of many security packages and Cipher suites.
|
||||||
|
* Note that these are not provided in Semeru runtimes:
|
||||||
|
* See https://developer.ibm.com/languages/java/semeru-runtimes for details.
|
||||||
|
*/
|
||||||
|
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM") &&
|
||||||
|
hasIbmTechnologyEditionModules();
|
||||||
|
|
||||||
|
private static boolean hasIbmTechnologyEditionModules() {
|
||||||
|
return Arrays.asList(
|
||||||
|
"com.ibm.security.auth.module.JAASLoginModule",
|
||||||
|
"com.ibm.security.auth.module.Win64LoginModule",
|
||||||
|
"com.ibm.security.auth.module.NTLoginModule",
|
||||||
|
"com.ibm.security.auth.module.AIX64LoginModule",
|
||||||
|
"com.ibm.security.auth.module.LinuxLoginModule",
|
||||||
|
"com.ibm.security.auth.module.Krb5LoginModule"
|
||||||
|
).stream().anyMatch((module) -> isSystemClassAvailable(module));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In rare cases where different behaviour is performed based on the JVM vendor
|
||||||
|
* this method should be used to test for a unique JVM class provided by the
|
||||||
|
* vendor rather than using the vendor method. For example if on JVM provides a
|
||||||
|
* different Kerberos login module testing for that login module being loadable
|
||||||
|
* before configuring to use it is preferable to using the vendor data.
|
||||||
|
*
|
||||||
|
* @param className the name of a class in the JVM to test for
|
||||||
|
* @return true if the class is available, false otherwise.
|
||||||
|
*/
|
||||||
|
private static boolean isSystemClassAvailable(String className) {
|
||||||
|
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> {
|
||||||
|
try {
|
||||||
|
// Using ClassLoader.findSystemClass() instead of
|
||||||
|
// Class.forName(className, false, null) because Class.forName with a null
|
||||||
|
// ClassLoader only looks at the boot ClassLoader with Java 9 and above
|
||||||
|
// which doesn't look at all the modules available to the findSystemClass.
|
||||||
|
new SystemClassAccessor().getSystemClass(className);
|
||||||
|
return true;
|
||||||
|
} catch (Exception ignored) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
System.out.println(PLATFORM_NAME);
|
System.out.println(PLATFORM_NAME);
|
||||||
|
|
|
@ -24,7 +24,7 @@ This filter must be configured in front of all the web application resources tha
|
||||||
|
|
||||||
The Hadoop Auth and dependent JAR files must be in the web application classpath (commonly the `WEB-INF/lib` directory).
|
The Hadoop Auth and dependent JAR files must be in the web application classpath (commonly the `WEB-INF/lib` directory).
|
||||||
|
|
||||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application classpath as well as the Log4j configuration file.
|
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part of the web application classpath as well as the Log4j configuration file.
|
||||||
|
|
||||||
### Common Configuration parameters
|
### Common Configuration parameters
|
||||||
|
|
||||||
|
|
|
@ -379,21 +379,6 @@
|
||||||
<Bug code="JLM" />
|
<Bug code="JLM" />
|
||||||
</Match>
|
</Match>
|
||||||
|
|
||||||
<!--
|
|
||||||
OpenStack Swift FS module -closes streams in a different method
|
|
||||||
from where they are opened.
|
|
||||||
-->
|
|
||||||
<Match>
|
|
||||||
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
|
|
||||||
<Method name="uploadFileAttempt"/>
|
|
||||||
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
|
|
||||||
</Match>
|
|
||||||
<Match>
|
|
||||||
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
|
|
||||||
<Method name="uploadFilePartAttempt"/>
|
|
||||||
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
|
|
||||||
</Match>
|
|
||||||
|
|
||||||
<!-- code from maven source, null value is checked at callee side. -->
|
<!-- code from maven source, null value is checked at callee side. -->
|
||||||
<Match>
|
<Match>
|
||||||
<Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
|
<Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
|
||||||
|
|
|
@ -383,6 +383,11 @@
|
||||||
<artifactId>mockwebserver</artifactId>
|
<artifactId>mockwebserver</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.squareup.okio</groupId>
|
||||||
|
<artifactId>okio-jvm</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>dnsjava</groupId>
|
<groupId>dnsjava</groupId>
|
||||||
<artifactId>dnsjava</artifactId>
|
<artifactId>dnsjava</artifactId>
|
||||||
|
|
|
@ -24,7 +24,6 @@ import com.ctc.wstx.io.SystemId;
|
||||||
import com.ctc.wstx.stax.WstxInputFactory;
|
import com.ctc.wstx.stax.WstxInputFactory;
|
||||||
import com.fasterxml.jackson.core.JsonFactory;
|
import com.fasterxml.jackson.core.JsonFactory;
|
||||||
import com.fasterxml.jackson.core.JsonGenerator;
|
import com.fasterxml.jackson.core.JsonGenerator;
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
import java.io.DataInput;
|
import java.io.DataInput;
|
||||||
|
@ -87,6 +86,7 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||||
import org.apache.commons.collections.map.UnmodifiableMap;
|
import org.apache.commons.collections.map.UnmodifiableMap;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
@ -98,18 +98,19 @@ import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.alias.CredentialProvider;
|
import org.apache.hadoop.security.alias.CredentialProvider;
|
||||||
import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
|
import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
|
||||||
import org.apache.hadoop.security.alias.CredentialProviderFactory;
|
import org.apache.hadoop.security.alias.CredentialProviderFactory;
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||||
|
import org.apache.hadoop.util.Preconditions;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringInterner;
|
import org.apache.hadoop.util.StringInterner;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.hadoop.util.XMLUtils;
|
||||||
|
|
||||||
import org.codehaus.stax2.XMLStreamReader2;
|
import org.codehaus.stax2.XMLStreamReader2;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.w3c.dom.Document;
|
import org.w3c.dom.Document;
|
||||||
import org.w3c.dom.Element;
|
import org.w3c.dom.Element;
|
||||||
|
|
||||||
import org.apache.hadoop.util.Preconditions;
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
|
||||||
|
|
||||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||||
import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
||||||
|
|
||||||
|
@ -3604,7 +3605,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
||||||
try {
|
try {
|
||||||
DOMSource source = new DOMSource(doc);
|
DOMSource source = new DOMSource(doc);
|
||||||
StreamResult result = new StreamResult(out);
|
StreamResult result = new StreamResult(out);
|
||||||
TransformerFactory transFactory = TransformerFactory.newInstance();
|
TransformerFactory transFactory = XMLUtils.newSecureTransformerFactory();
|
||||||
Transformer transformer = transFactory.newTransformer();
|
Transformer transformer = transFactory.newTransformer();
|
||||||
|
|
||||||
// Important to not hold Configuration log while writing result, since
|
// Important to not hold Configuration log while writing result, since
|
||||||
|
|
|
@ -639,13 +639,14 @@ public abstract class KeyProvider implements Closeable {
|
||||||
public abstract void flush() throws IOException;
|
public abstract void flush() throws IOException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Split the versionName in to a base name. Converts "/aaa/bbb/3" to
|
* Split the versionName in to a base name. Converts "/aaa/bbb@3" to
|
||||||
* "/aaa/bbb".
|
* "/aaa/bbb".
|
||||||
* @param versionName the version name to split
|
* @param versionName the version name to split
|
||||||
* @return the base name of the key
|
* @return the base name of the key
|
||||||
* @throws IOException raised on errors performing I/O.
|
* @throws IOException raised on errors performing I/O.
|
||||||
*/
|
*/
|
||||||
public static String getBaseName(String versionName) throws IOException {
|
public static String getBaseName(String versionName) throws IOException {
|
||||||
|
Objects.requireNonNull(versionName, "VersionName cannot be null");
|
||||||
int div = versionName.lastIndexOf('@');
|
int div = versionName.lastIndexOf('@');
|
||||||
if (div == -1) {
|
if (div == -1) {
|
||||||
throw new IOException("No version in key path " + versionName);
|
throw new IOException("No version in key path " + versionName);
|
||||||
|
|
|
@ -60,7 +60,6 @@ public class AvroFSInput implements Closeable, SeekableInput {
|
||||||
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
|
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
|
||||||
.withFileStatus(status)
|
.withFileStatus(status)
|
||||||
.build());
|
.build());
|
||||||
fc.open(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -55,6 +55,15 @@ public interface FileRange {
|
||||||
*/
|
*/
|
||||||
void setData(CompletableFuture<ByteBuffer> data);
|
void setData(CompletableFuture<ByteBuffer> data);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get any reference passed in to the file range constructor.
|
||||||
|
* This is not used by any implementation code; it is to help
|
||||||
|
* bind this API to libraries retrieving multiple stripes of
|
||||||
|
* data in parallel.
|
||||||
|
* @return a reference or null.
|
||||||
|
*/
|
||||||
|
Object getReference();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Factory method to create a FileRange object.
|
* Factory method to create a FileRange object.
|
||||||
* @param offset starting offset of the range.
|
* @param offset starting offset of the range.
|
||||||
|
@ -62,6 +71,17 @@ public interface FileRange {
|
||||||
* @return a new instance of FileRangeImpl.
|
* @return a new instance of FileRangeImpl.
|
||||||
*/
|
*/
|
||||||
static FileRange createFileRange(long offset, int length) {
|
static FileRange createFileRange(long offset, int length) {
|
||||||
return new FileRangeImpl(offset, length);
|
return new FileRangeImpl(offset, length, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Factory method to create a FileRange object.
|
||||||
|
* @param offset starting offset of the range.
|
||||||
|
* @param length length of the range.
|
||||||
|
* @param reference nullable reference to store in the range.
|
||||||
|
* @return a new instance of FileRangeImpl.
|
||||||
|
*/
|
||||||
|
static FileRange createFileRange(long offset, int length, Object reference) {
|
||||||
|
return new FileRangeImpl(offset, length, reference);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -402,7 +402,8 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare this FileStatus to another FileStatus
|
* Compare this FileStatus to another FileStatus based on lexicographical
|
||||||
|
* order of path.
|
||||||
* @param o the FileStatus to be compared.
|
* @param o the FileStatus to be compared.
|
||||||
* @return a negative integer, zero, or a positive integer as this object
|
* @return a negative integer, zero, or a positive integer as this object
|
||||||
* is less than, equal to, or greater than the specified object.
|
* is less than, equal to, or greater than the specified object.
|
||||||
|
@ -412,7 +413,8 @@ public class FileStatus implements Writable, Comparable<Object>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compare this FileStatus to another FileStatus.
|
* Compare this FileStatus to another FileStatus based on lexicographical
|
||||||
|
* order of path.
|
||||||
* This method was added back by HADOOP-14683 to keep binary compatibility.
|
* This method was added back by HADOOP-14683 to keep binary compatibility.
|
||||||
*
|
*
|
||||||
* @param o the FileStatus to be compared.
|
* @param o the FileStatus to be compared.
|
||||||
|
|
|
@ -21,7 +21,6 @@ import javax.annotation.Nonnull;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InterruptedIOException;
|
|
||||||
import java.lang.ref.WeakReference;
|
import java.lang.ref.WeakReference;
|
||||||
import java.lang.ref.ReferenceQueue;
|
import java.lang.ref.ReferenceQueue;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
@ -1544,6 +1543,39 @@ public abstract class FileSystem extends Configured
|
||||||
public abstract FSDataOutputStream append(Path f, int bufferSize,
|
public abstract FSDataOutputStream append(Path f, int bufferSize,
|
||||||
Progressable progress) throws IOException;
|
Progressable progress) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Append to an existing file (optional operation).
|
||||||
|
* @param f the existing file to be appended.
|
||||||
|
* @param appendToNewBlock whether to append data to a new block
|
||||||
|
* instead of the end of the last partial block
|
||||||
|
* @throws IOException IO failure
|
||||||
|
* @throws UnsupportedOperationException if the operation is unsupported
|
||||||
|
* (default).
|
||||||
|
* @return output stream.
|
||||||
|
*/
|
||||||
|
public FSDataOutputStream append(Path f, boolean appendToNewBlock) throws IOException {
|
||||||
|
return append(f, getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
|
||||||
|
IO_FILE_BUFFER_SIZE_DEFAULT), null, appendToNewBlock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Append to an existing file (optional operation).
|
||||||
|
* This function is used for being overridden by some FileSystem like DistributedFileSystem
|
||||||
|
* @param f the existing file to be appended.
|
||||||
|
* @param bufferSize the size of the buffer to be used.
|
||||||
|
* @param progress for reporting progress if it is not null.
|
||||||
|
* @param appendToNewBlock whether to append data to a new block
|
||||||
|
* instead of the end of the last partial block
|
||||||
|
* @throws IOException IO failure
|
||||||
|
* @throws UnsupportedOperationException if the operation is unsupported
|
||||||
|
* (default).
|
||||||
|
* @return output stream.
|
||||||
|
*/
|
||||||
|
public FSDataOutputStream append(Path f, int bufferSize,
|
||||||
|
Progressable progress, boolean appendToNewBlock) throws IOException {
|
||||||
|
return append(f, bufferSize, progress);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Concat existing files together.
|
* Concat existing files together.
|
||||||
* @param trg the path to the target destination.
|
* @param trg the path to the target destination.
|
||||||
|
@ -3647,11 +3679,7 @@ public abstract class FileSystem extends Configured
|
||||||
// to construct an instance.
|
// to construct an instance.
|
||||||
try (DurationInfo d = new DurationInfo(LOGGER, false,
|
try (DurationInfo d = new DurationInfo(LOGGER, false,
|
||||||
"Acquiring creator semaphore for %s", uri)) {
|
"Acquiring creator semaphore for %s", uri)) {
|
||||||
creatorPermits.acquire();
|
creatorPermits.acquireUninterruptibly();
|
||||||
} catch (InterruptedException e) {
|
|
||||||
// acquisition was interrupted; convert to an IOE.
|
|
||||||
throw (IOException)new InterruptedIOException(e.toString())
|
|
||||||
.initCause(e);
|
|
||||||
}
|
}
|
||||||
FileSystem fsToClose = null;
|
FileSystem fsToClose = null;
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class Trash extends Configured {
|
||||||
* Hence we get the file system of the fully-qualified resolved-path and
|
* Hence we get the file system of the fully-qualified resolved-path and
|
||||||
* then move the path p to the trashbin in that volume,
|
* then move the path p to the trashbin in that volume,
|
||||||
* @param fs - the filesystem of path p
|
* @param fs - the filesystem of path p
|
||||||
* @param p - the path being deleted - to be moved to trasg
|
* @param p - the path being deleted - to be moved to trash
|
||||||
* @param conf - configuration
|
* @param conf - configuration
|
||||||
* @return false if the item is already in the trash or trash is disabled
|
* @return false if the item is already in the trash or trash is disabled
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
|
|
|
@ -307,9 +307,16 @@ public final class VectoredReadUtils {
|
||||||
FileRange request) {
|
FileRange request) {
|
||||||
int offsetChange = (int) (request.getOffset() - readOffset);
|
int offsetChange = (int) (request.getOffset() - readOffset);
|
||||||
int requestLength = request.getLength();
|
int requestLength = request.getLength();
|
||||||
|
// Create a new buffer that is backed by the original contents
|
||||||
|
// The buffer will have position 0 and the same limit as the original one
|
||||||
readData = readData.slice();
|
readData = readData.slice();
|
||||||
|
// Change the offset and the limit of the buffer as the reader wants to see
|
||||||
|
// only relevant data
|
||||||
readData.position(offsetChange);
|
readData.position(offsetChange);
|
||||||
readData.limit(offsetChange + requestLength);
|
readData.limit(offsetChange + requestLength);
|
||||||
|
// Create a new buffer after the limit change so that only that portion of the data is
|
||||||
|
// returned to the reader.
|
||||||
|
readData = readData.slice();
|
||||||
return readData;
|
return readData;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -90,6 +90,11 @@ public final class AuditConstants {
|
||||||
*/
|
*/
|
||||||
public static final String PARAM_PROCESS = "ps";
|
public static final String PARAM_PROCESS = "ps";
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Header: Range for GET request data: {@value}.
|
||||||
|
*/
|
||||||
|
public static final String PARAM_RANGE = "rg";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Task Attempt ID query header: {@value}.
|
* Task Attempt ID query header: {@value}.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -29,10 +29,10 @@ import java.util.List;
|
||||||
* together into a single read for efficiency.
|
* together into a single read for efficiency.
|
||||||
*/
|
*/
|
||||||
public class CombinedFileRange extends FileRangeImpl {
|
public class CombinedFileRange extends FileRangeImpl {
|
||||||
private ArrayList<FileRange> underlying = new ArrayList<>();
|
private List<FileRange> underlying = new ArrayList<>();
|
||||||
|
|
||||||
public CombinedFileRange(long offset, long end, FileRange original) {
|
public CombinedFileRange(long offset, long end, FileRange original) {
|
||||||
super(offset, (int) (end - offset));
|
super(offset, (int) (end - offset), null);
|
||||||
this.underlying.add(original);
|
this.underlying.add(original);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,9 +34,21 @@ public class FileRangeImpl implements FileRange {
|
||||||
private int length;
|
private int length;
|
||||||
private CompletableFuture<ByteBuffer> reader;
|
private CompletableFuture<ByteBuffer> reader;
|
||||||
|
|
||||||
public FileRangeImpl(long offset, int length) {
|
/**
|
||||||
|
* nullable reference to store in the range.
|
||||||
|
*/
|
||||||
|
private final Object reference;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create.
|
||||||
|
* @param offset offset in file
|
||||||
|
* @param length length of data to read.
|
||||||
|
* @param reference nullable reference to store in the range.
|
||||||
|
*/
|
||||||
|
public FileRangeImpl(long offset, int length, Object reference) {
|
||||||
this.offset = offset;
|
this.offset = offset;
|
||||||
this.length = length;
|
this.length = length;
|
||||||
|
this.reference = reference;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -71,4 +83,9 @@ public class FileRangeImpl implements FileRange {
|
||||||
public CompletableFuture<ByteBuffer> getData() {
|
public CompletableFuture<ByteBuffer> getData() {
|
||||||
return reader;
|
return reader;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getReference() {
|
||||||
|
return reference;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.fs.impl;
|
||||||
|
|
||||||
|
import java.lang.ref.WeakReference;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||||
|
import org.apache.hadoop.metrics2.MetricsSource;
|
||||||
|
|
||||||
|
import static java.util.Objects.requireNonNull;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A weak referenced metrics source which avoids hanging on to large objects
|
||||||
|
* if somehow they don't get fully closed/cleaned up.
|
||||||
|
* The JVM may clean up all objects which are only weakly referenced whenever
|
||||||
|
* it does a GC, <i>even if there is no memory pressure</i>.
|
||||||
|
* To avoid these refs being removed, always keep a strong reference around
|
||||||
|
* somewhere.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class WeakRefMetricsSource implements MetricsSource {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Name to know when unregistering.
|
||||||
|
*/
|
||||||
|
private final String name;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Underlying metrics source.
|
||||||
|
*/
|
||||||
|
private final WeakReference<MetricsSource> sourceWeakReference;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Constructor.
|
||||||
|
* @param name Name to know when unregistering.
|
||||||
|
* @param source metrics source
|
||||||
|
*/
|
||||||
|
public WeakRefMetricsSource(final String name, final MetricsSource source) {
|
||||||
|
this.name = name;
|
||||||
|
this.sourceWeakReference = new WeakReference<>(requireNonNull(source));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the weak reference is non null, update the metrics.
|
||||||
|
* @param collector to contain the resulting metrics snapshot
|
||||||
|
* @param all if true, return all metrics even if unchanged.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public void getMetrics(final MetricsCollector collector, final boolean all) {
|
||||||
|
MetricsSource metricsSource = sourceWeakReference.get();
|
||||||
|
if (metricsSource != null) {
|
||||||
|
metricsSource.getMetrics(collector, all);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Name to know when unregistering.
|
||||||
|
* @return the name passed in during construction.
|
||||||
|
*/
|
||||||
|
public String getName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the source, will be null if the reference has been GC'd
|
||||||
|
* @return the source reference
|
||||||
|
*/
|
||||||
|
public MetricsSource getSource() {
|
||||||
|
return sourceWeakReference.get();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "WeakRefMetricsSource{" +
|
||||||
|
"name='" + name + '\'' +
|
||||||
|
", sourceWeakReference is " +
|
||||||
|
(sourceWeakReference.get() == null ? "unset" : "set") +
|
||||||
|
'}';
|
||||||
|
}
|
||||||
|
}
|
|
@ -116,7 +116,7 @@ public final class FilePosition {
|
||||||
readOffset,
|
readOffset,
|
||||||
"readOffset",
|
"readOffset",
|
||||||
startOffset,
|
startOffset,
|
||||||
startOffset + bufferData.getBuffer().limit() - 1);
|
startOffset + bufferData.getBuffer().limit());
|
||||||
|
|
||||||
data = bufferData;
|
data = bufferData;
|
||||||
buffer = bufferData.getBuffer().duplicate();
|
buffer = bufferData.getBuffer().duplicate();
|
||||||
|
@ -182,7 +182,7 @@ public final class FilePosition {
|
||||||
*/
|
*/
|
||||||
public boolean isWithinCurrentBuffer(long pos) {
|
public boolean isWithinCurrentBuffer(long pos) {
|
||||||
throwIfInvalidBuffer();
|
throwIfInvalidBuffer();
|
||||||
long bufferEndOffset = bufferStartOffset + buffer.limit() - 1;
|
long bufferEndOffset = bufferStartOffset + buffer.limit();
|
||||||
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
|
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filesystem implementations that allow Hadoop to read directly from
|
||||||
|
* the local file system.
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.fs.local;
|
package org.apache.hadoop.fs.local;
|
||||||
|
|
|
@ -333,15 +333,24 @@ class CopyCommands {
|
||||||
*/
|
*/
|
||||||
public static class AppendToFile extends CommandWithDestination {
|
public static class AppendToFile extends CommandWithDestination {
|
||||||
public static final String NAME = "appendToFile";
|
public static final String NAME = "appendToFile";
|
||||||
public static final String USAGE = "<localsrc> ... <dst>";
|
public static final String USAGE = "[-n] <localsrc> ... <dst>";
|
||||||
public static final String DESCRIPTION =
|
public static final String DESCRIPTION =
|
||||||
"Appends the contents of all the given local files to the " +
|
"Appends the contents of all the given local files to the " +
|
||||||
"given dst file. The dst file will be created if it does " +
|
"given dst file. The dst file will be created if it does " +
|
||||||
"not exist. If <localSrc> is -, then the input is read " +
|
"not exist. If <localSrc> is -, then the input is read " +
|
||||||
"from stdin.";
|
"from stdin. Option -n represents that use NEW_BLOCK create flag to append file.";
|
||||||
|
|
||||||
private static final int DEFAULT_IO_LENGTH = 1024 * 1024;
|
private static final int DEFAULT_IO_LENGTH = 1024 * 1024;
|
||||||
boolean readStdin = false;
|
boolean readStdin = false;
|
||||||
|
private boolean appendToNewBlock = false;
|
||||||
|
|
||||||
|
public boolean isAppendToNewBlock() {
|
||||||
|
return appendToNewBlock;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setAppendToNewBlock(boolean appendToNewBlock) {
|
||||||
|
this.appendToNewBlock = appendToNewBlock;
|
||||||
|
}
|
||||||
|
|
||||||
// commands operating on local paths have no need for glob expansion
|
// commands operating on local paths have no need for glob expansion
|
||||||
@Override
|
@Override
|
||||||
|
@ -372,6 +381,9 @@ class CopyCommands {
|
||||||
throw new IOException("missing destination argument");
|
throw new IOException("missing destination argument");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "n");
|
||||||
|
cf.parse(args);
|
||||||
|
appendToNewBlock = cf.getOpt("n");
|
||||||
getRemoteDestination(args);
|
getRemoteDestination(args);
|
||||||
super.processOptions(args);
|
super.processOptions(args);
|
||||||
}
|
}
|
||||||
|
@ -385,7 +397,8 @@ class CopyCommands {
|
||||||
}
|
}
|
||||||
|
|
||||||
InputStream is = null;
|
InputStream is = null;
|
||||||
try (FSDataOutputStream fos = dst.fs.append(dst.path)) {
|
try (FSDataOutputStream fos = appendToNewBlock ?
|
||||||
|
dst.fs.append(dst.path, true) : dst.fs.append(dst.path)) {
|
||||||
if (readStdin) {
|
if (readStdin) {
|
||||||
if (args.size() == 0) {
|
if (args.size() == 0) {
|
||||||
IOUtils.copyBytes(System.in, fos, DEFAULT_IO_LENGTH);
|
IOUtils.copyBytes(System.in, fos, DEFAULT_IO_LENGTH);
|
||||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.io.DataInputBuffer;
|
||||||
import org.apache.hadoop.io.DataOutputBuffer;
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.io.SequenceFile;
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
import org.apache.hadoop.io.Writable;
|
|
||||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
@ -217,8 +216,8 @@ class Display extends FsCommand {
|
||||||
|
|
||||||
protected class TextRecordInputStream extends InputStream {
|
protected class TextRecordInputStream extends InputStream {
|
||||||
SequenceFile.Reader r;
|
SequenceFile.Reader r;
|
||||||
Writable key;
|
Object key;
|
||||||
Writable val;
|
Object val;
|
||||||
|
|
||||||
DataInputBuffer inbuf;
|
DataInputBuffer inbuf;
|
||||||
DataOutputBuffer outbuf;
|
DataOutputBuffer outbuf;
|
||||||
|
@ -228,10 +227,8 @@ class Display extends FsCommand {
|
||||||
final Configuration lconf = getConf();
|
final Configuration lconf = getConf();
|
||||||
r = new SequenceFile.Reader(lconf,
|
r = new SequenceFile.Reader(lconf,
|
||||||
SequenceFile.Reader.file(fpath));
|
SequenceFile.Reader.file(fpath));
|
||||||
key = ReflectionUtils.newInstance(
|
key = ReflectionUtils.newInstance(r.getKeyClass(), lconf);
|
||||||
r.getKeyClass().asSubclass(Writable.class), lconf);
|
val = ReflectionUtils.newInstance(r.getValueClass(), lconf);
|
||||||
val = ReflectionUtils.newInstance(
|
|
||||||
r.getValueClass().asSubclass(Writable.class), lconf);
|
|
||||||
inbuf = new DataInputBuffer();
|
inbuf = new DataInputBuffer();
|
||||||
outbuf = new DataOutputBuffer();
|
outbuf = new DataOutputBuffer();
|
||||||
}
|
}
|
||||||
|
@ -240,8 +237,11 @@ class Display extends FsCommand {
|
||||||
public int read() throws IOException {
|
public int read() throws IOException {
|
||||||
int ret;
|
int ret;
|
||||||
if (null == inbuf || -1 == (ret = inbuf.read())) {
|
if (null == inbuf || -1 == (ret = inbuf.read())) {
|
||||||
if (!r.next(key, val)) {
|
key = r.next(key);
|
||||||
|
if (key == null) {
|
||||||
return -1;
|
return -1;
|
||||||
|
} else {
|
||||||
|
val = r.getCurrentValue(val);
|
||||||
}
|
}
|
||||||
byte[] tmp = key.toString().getBytes(StandardCharsets.UTF_8);
|
byte[] tmp = key.toString().getBytes(StandardCharsets.UTF_8);
|
||||||
outbuf.write(tmp, 0, tmp.length);
|
outbuf.write(tmp, 0, tmp.length);
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for the execution of a file system command.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.fs.shell;
|
package org.apache.hadoop.fs.shell;
|
||||||
|
|
|
@ -190,7 +190,7 @@ final class IOStatisticsStoreImpl extends WrappedIOStatistics
|
||||||
return counter.get();
|
return counter.get();
|
||||||
} else {
|
} else {
|
||||||
long l = incAtomicLong(counter, value);
|
long l = incAtomicLong(counter, value);
|
||||||
LOG.debug("Incrementing counter {} by {} with final value {}",
|
LOG.trace("Incrementing counter {} by {} with final value {}",
|
||||||
key, value, l);
|
key, value, l);
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for embedded HTTP services.
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.http;
|
package org.apache.hadoop.http;
|
||||||
|
|
|
@ -158,6 +158,9 @@ public class DefaultStringifier<T> implements Stringifier<T> {
|
||||||
public static <K> void storeArray(Configuration conf, K[] items,
|
public static <K> void storeArray(Configuration conf, K[] items,
|
||||||
String keyName) throws IOException {
|
String keyName) throws IOException {
|
||||||
|
|
||||||
|
if (items.length == 0) {
|
||||||
|
throw new IndexOutOfBoundsException();
|
||||||
|
}
|
||||||
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
|
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
|
||||||
GenericsUtil.getClass(items[0]));
|
GenericsUtil.getClass(items[0]));
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -15,6 +15,11 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression for the BZip2
|
||||||
|
* compression algorithm.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.bzip2;
|
package org.apache.hadoop.io.compress.bzip2;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression for the LZ4
|
||||||
|
* compression algorithm.
|
||||||
|
*
|
||||||
|
* @see <a href="http://code.google.com/p/lz4/">LZ4</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.lz4;
|
package org.apache.hadoop.io.compress.lz4;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression for the Snappy
|
||||||
|
* compression algorithm.
|
||||||
|
*
|
||||||
|
* @see <a href="http://code.google.com/p/snappy/">Snappy</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.snappy;
|
package org.apache.hadoop.io.compress.snappy;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression based on the popular
|
||||||
|
* gzip compressed file format.
|
||||||
|
*
|
||||||
|
* @see <a href="http://www.gzip.org/">gzip</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.zlib;
|
package org.apache.hadoop.io.compress.zlib;
|
||||||
|
|
|
@ -15,6 +15,13 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Implementation of compression/decompression based on the zStandard
|
||||||
|
* compression algorithm.
|
||||||
|
*
|
||||||
|
* @see <a href="https://github.com/facebook/zstd">zStandard</a>
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.compress.zstd;
|
package org.apache.hadoop.io.compress.zstd;
|
||||||
|
|
|
@ -15,6 +15,12 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Various native IO-related calls not available in Java. These
|
||||||
|
* functions should generally be used alongside a fallback to another
|
||||||
|
* more portable mechanism.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.io.nativeio;
|
package org.apache.hadoop.io.nativeio;
|
||||||
|
|
|
@ -140,12 +140,8 @@ public final class CallerContext {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder(String context, Configuration conf) {
|
public Builder(String context, Configuration conf) {
|
||||||
if (isValid(context)) {
|
this(context, conf.get(HADOOP_CALLER_CONTEXT_SEPARATOR_KEY,
|
||||||
sb.append(context);
|
HADOOP_CALLER_CONTEXT_SEPARATOR_DEFAULT));
|
||||||
}
|
|
||||||
fieldSeparator = conf.get(HADOOP_CALLER_CONTEXT_SEPARATOR_KEY,
|
|
||||||
HADOOP_CALLER_CONTEXT_SEPARATOR_DEFAULT);
|
|
||||||
checkFieldSeparator(fieldSeparator);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Builder(String context, String separator) {
|
public Builder(String context, String separator) {
|
||||||
|
|
|
@ -18,10 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.ipc;
|
package org.apache.hadoop.ipc;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.tuple.Pair;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.apache.hadoop.util.Preconditions;
|
import org.apache.hadoop.util.Preconditions;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
@ -166,73 +166,6 @@ public class Client implements AutoCloseable {
|
||||||
private final int maxAsyncCalls;
|
private final int maxAsyncCalls;
|
||||||
private final AtomicInteger asyncCallCounter = new AtomicInteger(0);
|
private final AtomicInteger asyncCallCounter = new AtomicInteger(0);
|
||||||
|
|
||||||
/**
|
|
||||||
* Executor on which IPC calls' parameters are sent.
|
|
||||||
* Deferring the sending of parameters to a separate
|
|
||||||
* thread isolates them from thread interruptions in the
|
|
||||||
* calling code.
|
|
||||||
*/
|
|
||||||
private final ExecutorService sendParamsExecutor;
|
|
||||||
private final static ClientExecutorServiceFactory clientExcecutorFactory =
|
|
||||||
new ClientExecutorServiceFactory();
|
|
||||||
|
|
||||||
private static class ClientExecutorServiceFactory {
|
|
||||||
private int executorRefCount = 0;
|
|
||||||
private ExecutorService clientExecutor = null;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get Executor on which IPC calls' parameters are sent.
|
|
||||||
* If the internal reference counter is zero, this method
|
|
||||||
* creates the instance of Executor. If not, this method
|
|
||||||
* just returns the reference of clientExecutor.
|
|
||||||
*
|
|
||||||
* @return An ExecutorService instance
|
|
||||||
*/
|
|
||||||
synchronized ExecutorService refAndGetInstance() {
|
|
||||||
if (executorRefCount == 0) {
|
|
||||||
clientExecutor = Executors.newCachedThreadPool(
|
|
||||||
new ThreadFactoryBuilder()
|
|
||||||
.setDaemon(true)
|
|
||||||
.setNameFormat("IPC Parameter Sending Thread #%d")
|
|
||||||
.build());
|
|
||||||
}
|
|
||||||
executorRefCount++;
|
|
||||||
|
|
||||||
return clientExecutor;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Cleanup Executor on which IPC calls' parameters are sent.
|
|
||||||
* If reference counter is zero, this method discards the
|
|
||||||
* instance of the Executor. If not, this method
|
|
||||||
* just decrements the internal reference counter.
|
|
||||||
*
|
|
||||||
* @return An ExecutorService instance if it exists.
|
|
||||||
* Null is returned if not.
|
|
||||||
*/
|
|
||||||
synchronized ExecutorService unrefAndCleanup() {
|
|
||||||
executorRefCount--;
|
|
||||||
assert(executorRefCount >= 0);
|
|
||||||
|
|
||||||
if (executorRefCount == 0) {
|
|
||||||
clientExecutor.shutdown();
|
|
||||||
try {
|
|
||||||
if (!clientExecutor.awaitTermination(1, TimeUnit.MINUTES)) {
|
|
||||||
clientExecutor.shutdownNow();
|
|
||||||
}
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
LOG.warn("Interrupted while waiting for clientExecutor" +
|
|
||||||
" to stop");
|
|
||||||
clientExecutor.shutdownNow();
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
}
|
|
||||||
clientExecutor = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
return clientExecutor;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* set the ping interval value in configuration
|
* set the ping interval value in configuration
|
||||||
*
|
*
|
||||||
|
@ -301,11 +234,6 @@ public class Client implements AutoCloseable {
|
||||||
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
|
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
public static final ExecutorService getClientExecutor() {
|
|
||||||
return Client.clientExcecutorFactory.clientExecutor;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Increment this client's reference count
|
* Increment this client's reference count
|
||||||
*/
|
*/
|
||||||
|
@ -462,8 +390,10 @@ public class Client implements AutoCloseable {
|
||||||
private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
|
private AtomicLong lastActivity = new AtomicLong();// last I/O activity time
|
||||||
private AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
private AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
||||||
private IOException closeException; // close reason
|
private IOException closeException; // close reason
|
||||||
|
|
||||||
private final Object sendRpcRequestLock = new Object();
|
private final Thread rpcRequestThread;
|
||||||
|
private final SynchronousQueue<Pair<Call, ResponseBuffer>> rpcRequestQueue =
|
||||||
|
new SynchronousQueue<>(true);
|
||||||
|
|
||||||
private AtomicReference<Thread> connectingThread = new AtomicReference<>();
|
private AtomicReference<Thread> connectingThread = new AtomicReference<>();
|
||||||
private final Consumer<Connection> removeMethod;
|
private final Consumer<Connection> removeMethod;
|
||||||
|
@ -472,6 +402,9 @@ public class Client implements AutoCloseable {
|
||||||
Consumer<Connection> removeMethod) {
|
Consumer<Connection> removeMethod) {
|
||||||
this.remoteId = remoteId;
|
this.remoteId = remoteId;
|
||||||
this.server = remoteId.getAddress();
|
this.server = remoteId.getAddress();
|
||||||
|
this.rpcRequestThread = new Thread(new RpcRequestSender(),
|
||||||
|
"IPC Parameter Sending Thread for " + remoteId);
|
||||||
|
this.rpcRequestThread.setDaemon(true);
|
||||||
|
|
||||||
this.maxResponseLength = remoteId.conf.getInt(
|
this.maxResponseLength = remoteId.conf.getInt(
|
||||||
CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH,
|
CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH,
|
||||||
|
@ -771,7 +704,7 @@ public class Client implements AutoCloseable {
|
||||||
* handle that, a relogin is attempted.
|
* handle that, a relogin is attempted.
|
||||||
*/
|
*/
|
||||||
private synchronized void handleSaslConnectionFailure(
|
private synchronized void handleSaslConnectionFailure(
|
||||||
final int currRetries, final int maxRetries, final Exception ex,
|
final int currRetries, final int maxRetries, final IOException ex,
|
||||||
final Random rand, final UserGroupInformation ugi) throws IOException,
|
final Random rand, final UserGroupInformation ugi) throws IOException,
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
ugi.doAs(new PrivilegedExceptionAction<Object>() {
|
ugi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||||
|
@ -782,10 +715,7 @@ public class Client implements AutoCloseable {
|
||||||
disposeSasl();
|
disposeSasl();
|
||||||
if (shouldAuthenticateOverKrb()) {
|
if (shouldAuthenticateOverKrb()) {
|
||||||
if (currRetries < maxRetries) {
|
if (currRetries < maxRetries) {
|
||||||
if(LOG.isDebugEnabled()) {
|
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||||
LOG.debug("Exception encountered while connecting to "
|
|
||||||
+ "the server : " + ex);
|
|
||||||
}
|
|
||||||
// try re-login
|
// try re-login
|
||||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||||
|
@ -803,7 +733,11 @@ public class Client implements AutoCloseable {
|
||||||
+ UserGroupInformation.getLoginUser().getUserName() + " to "
|
+ UserGroupInformation.getLoginUser().getUserName() + " to "
|
||||||
+ remoteId;
|
+ remoteId;
|
||||||
LOG.warn(msg, ex);
|
LOG.warn(msg, ex);
|
||||||
throw (IOException) new IOException(msg).initCause(ex);
|
throw NetUtils.wrapException(remoteId.getAddress().getHostName(),
|
||||||
|
remoteId.getAddress().getPort(),
|
||||||
|
NetUtils.getHostname(),
|
||||||
|
0,
|
||||||
|
ex);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// With RequestHedgingProxyProvider, one rpc call will send multiple
|
// With RequestHedgingProxyProvider, one rpc call will send multiple
|
||||||
|
@ -811,11 +745,9 @@ public class Client implements AutoCloseable {
|
||||||
// all other requests will be interrupted. It's not a big problem,
|
// all other requests will be interrupted. It's not a big problem,
|
||||||
// and should not print a warning log.
|
// and should not print a warning log.
|
||||||
if (ex instanceof InterruptedIOException) {
|
if (ex instanceof InterruptedIOException) {
|
||||||
LOG.debug("Exception encountered while connecting to the server",
|
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||||
ex);
|
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Exception encountered while connecting to the server ",
|
LOG.warn("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||||
ex);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (ex instanceof RemoteException)
|
if (ex instanceof RemoteException)
|
||||||
|
@ -1150,6 +1082,10 @@ public class Client implements AutoCloseable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
|
// Don't start the ipc parameter sending thread until we start this
|
||||||
|
// thread, because the shutdown logic only gets triggered if this
|
||||||
|
// thread is started.
|
||||||
|
rpcRequestThread.start();
|
||||||
if (LOG.isDebugEnabled())
|
if (LOG.isDebugEnabled())
|
||||||
LOG.debug(getName() + ": starting, having connections "
|
LOG.debug(getName() + ": starting, having connections "
|
||||||
+ connections.size());
|
+ connections.size());
|
||||||
|
@ -1173,9 +1109,52 @@ public class Client implements AutoCloseable {
|
||||||
+ connections.size());
|
+ connections.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A thread to write rpc requests to the socket.
|
||||||
|
*/
|
||||||
|
private class RpcRequestSender implements Runnable {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
while (!shouldCloseConnection.get()) {
|
||||||
|
ResponseBuffer buf = null;
|
||||||
|
try {
|
||||||
|
Pair<Call, ResponseBuffer> pair =
|
||||||
|
rpcRequestQueue.poll(maxIdleTime, TimeUnit.MILLISECONDS);
|
||||||
|
if (pair == null || shouldCloseConnection.get()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
buf = pair.getRight();
|
||||||
|
synchronized (ipcStreams.out) {
|
||||||
|
if (LOG.isDebugEnabled()) {
|
||||||
|
Call call = pair.getLeft();
|
||||||
|
LOG.debug(getName() + "{} sending #{} {}", getName(), call.id,
|
||||||
|
call.rpcRequest);
|
||||||
|
}
|
||||||
|
// RpcRequestHeader + RpcRequest
|
||||||
|
ipcStreams.sendRequest(buf.toByteArray());
|
||||||
|
ipcStreams.flush();
|
||||||
|
}
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
// stop this thread
|
||||||
|
return;
|
||||||
|
} catch (IOException e) {
|
||||||
|
// exception at this point would leave the connection in an
|
||||||
|
// unrecoverable state (eg half a call left on the wire).
|
||||||
|
// So, close the connection, killing any outstanding calls
|
||||||
|
markClosed(e);
|
||||||
|
} finally {
|
||||||
|
//the buffer is just an in-memory buffer, but it is still polite to
|
||||||
|
// close early
|
||||||
|
IOUtils.closeStream(buf);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** Initiates a rpc call by sending the rpc request to the remote server.
|
/** Initiates a rpc call by sending the rpc request to the remote server.
|
||||||
* Note: this is not called from the Connection thread, but by other
|
* Note: this is not called from the current thread, but by another
|
||||||
* threads.
|
* thread, so that if the current thread is interrupted that the socket
|
||||||
|
* state isn't corrupted with a partially written message.
|
||||||
* @param call - the rpc request
|
* @param call - the rpc request
|
||||||
*/
|
*/
|
||||||
public void sendRpcRequest(final Call call)
|
public void sendRpcRequest(final Call call)
|
||||||
|
@ -1185,8 +1164,7 @@ public class Client implements AutoCloseable {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize the call to be sent. This is done from the actual
|
// Serialize the call to be sent. This is done from the actual
|
||||||
// caller thread, rather than the sendParamsExecutor thread,
|
// caller thread, rather than the rpcRequestThread in the connection,
|
||||||
|
|
||||||
// so that if the serialization throws an error, it is reported
|
// so that if the serialization throws an error, it is reported
|
||||||
// properly. This also parallelizes the serialization.
|
// properly. This also parallelizes the serialization.
|
||||||
//
|
//
|
||||||
|
@ -1203,51 +1181,7 @@ public class Client implements AutoCloseable {
|
||||||
final ResponseBuffer buf = new ResponseBuffer();
|
final ResponseBuffer buf = new ResponseBuffer();
|
||||||
header.writeDelimitedTo(buf);
|
header.writeDelimitedTo(buf);
|
||||||
RpcWritable.wrap(call.rpcRequest).writeTo(buf);
|
RpcWritable.wrap(call.rpcRequest).writeTo(buf);
|
||||||
|
rpcRequestQueue.put(Pair.of(call, buf));
|
||||||
synchronized (sendRpcRequestLock) {
|
|
||||||
Future<?> senderFuture = sendParamsExecutor.submit(new Runnable() {
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
synchronized (ipcStreams.out) {
|
|
||||||
if (shouldCloseConnection.get()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (LOG.isDebugEnabled()) {
|
|
||||||
LOG.debug(getName() + " sending #" + call.id
|
|
||||||
+ " " + call.rpcRequest);
|
|
||||||
}
|
|
||||||
// RpcRequestHeader + RpcRequest
|
|
||||||
ipcStreams.sendRequest(buf.toByteArray());
|
|
||||||
ipcStreams.flush();
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
|
||||||
// exception at this point would leave the connection in an
|
|
||||||
// unrecoverable state (eg half a call left on the wire).
|
|
||||||
// So, close the connection, killing any outstanding calls
|
|
||||||
markClosed(e);
|
|
||||||
} finally {
|
|
||||||
//the buffer is just an in-memory buffer, but it is still polite to
|
|
||||||
// close early
|
|
||||||
IOUtils.closeStream(buf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
|
||||||
senderFuture.get();
|
|
||||||
} catch (ExecutionException e) {
|
|
||||||
Throwable cause = e.getCause();
|
|
||||||
|
|
||||||
// cause should only be a RuntimeException as the Runnable above
|
|
||||||
// catches IOException
|
|
||||||
if (cause instanceof RuntimeException) {
|
|
||||||
throw (RuntimeException) cause;
|
|
||||||
} else {
|
|
||||||
throw new RuntimeException("unexpected checked exception", cause);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Receive a response.
|
/* Receive a response.
|
||||||
|
@ -1396,7 +1330,6 @@ public class Client implements AutoCloseable {
|
||||||
CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
|
CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
|
||||||
|
|
||||||
this.clientId = ClientId.getClientId();
|
this.clientId = ClientId.getClientId();
|
||||||
this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
|
|
||||||
this.maxAsyncCalls = conf.getInt(
|
this.maxAsyncCalls = conf.getInt(
|
||||||
CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
|
CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
|
||||||
CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
|
CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
|
||||||
|
@ -1440,6 +1373,7 @@ public class Client implements AutoCloseable {
|
||||||
// wake up all connections
|
// wake up all connections
|
||||||
for (Connection conn : connections.values()) {
|
for (Connection conn : connections.values()) {
|
||||||
conn.interrupt();
|
conn.interrupt();
|
||||||
|
conn.rpcRequestThread.interrupt();
|
||||||
conn.interruptConnectingThread();
|
conn.interruptConnectingThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1456,7 +1390,6 @@ public class Client implements AutoCloseable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
clientExcecutorFactory.unrefAndCleanup();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -123,6 +123,7 @@ import org.apache.hadoop.util.ExitUtil;
|
||||||
import org.apache.hadoop.util.ProtoUtil;
|
import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import org.apache.hadoop.tracing.Span;
|
import org.apache.hadoop.tracing.Span;
|
||||||
import org.apache.hadoop.tracing.SpanContext;
|
import org.apache.hadoop.tracing.SpanContext;
|
||||||
import org.apache.hadoop.tracing.TraceScope;
|
import org.apache.hadoop.tracing.TraceScope;
|
||||||
|
@ -153,6 +154,13 @@ public abstract class Server {
|
||||||
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
|
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
|
||||||
private Tracer tracer;
|
private Tracer tracer;
|
||||||
private AlignmentContext alignmentContext;
|
private AlignmentContext alignmentContext;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Allow server to do force Kerberos re-login once after failure irrespective
|
||||||
|
* of the last login time.
|
||||||
|
*/
|
||||||
|
private final AtomicBoolean canTryForceLogin = new AtomicBoolean(true);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Logical name of the server used in metrics and monitor.
|
* Logical name of the server used in metrics and monitor.
|
||||||
*/
|
*/
|
||||||
|
@ -1393,8 +1401,7 @@ public abstract class Server {
|
||||||
bind(acceptChannel.socket(), address, backlogLength, conf, portRangeConfig);
|
bind(acceptChannel.socket(), address, backlogLength, conf, portRangeConfig);
|
||||||
//Could be an ephemeral port
|
//Could be an ephemeral port
|
||||||
this.listenPort = acceptChannel.socket().getLocalPort();
|
this.listenPort = acceptChannel.socket().getLocalPort();
|
||||||
Thread.currentThread().setName("Listener at " +
|
LOG.info("Listener at {}:{}", bindAddress, this.listenPort);
|
||||||
bindAddress + "/" + this.listenPort);
|
|
||||||
// create a selector;
|
// create a selector;
|
||||||
selector= Selector.open();
|
selector= Selector.open();
|
||||||
readers = new Reader[readThreads];
|
readers = new Reader[readThreads];
|
||||||
|
@ -2207,7 +2214,23 @@ public abstract class Server {
|
||||||
AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
|
AUDITLOG.warn(AUTH_FAILED_FOR + this.toString() + ":"
|
||||||
+ attemptingUser + " (" + e.getLocalizedMessage()
|
+ attemptingUser + " (" + e.getLocalizedMessage()
|
||||||
+ ") with true cause: (" + tce.getLocalizedMessage() + ")");
|
+ ") with true cause: (" + tce.getLocalizedMessage() + ")");
|
||||||
throw tce;
|
if (!UserGroupInformation.getLoginUser().isLoginSuccess()) {
|
||||||
|
doKerberosRelogin();
|
||||||
|
try {
|
||||||
|
// try processing message again
|
||||||
|
LOG.debug("Reprocessing sasl message for {}:{} after re-login",
|
||||||
|
this.toString(), attemptingUser);
|
||||||
|
saslResponse = processSaslMessage(saslMessage);
|
||||||
|
AUDITLOG.info("Retry {}{}:{} after failure", AUTH_SUCCESSFUL_FOR,
|
||||||
|
this.toString(), attemptingUser);
|
||||||
|
canTryForceLogin.set(true);
|
||||||
|
} catch (IOException exp) {
|
||||||
|
tce = (IOException) getTrueCause(e);
|
||||||
|
throw tce;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw tce;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (saslServer != null && saslServer.isComplete()) {
|
if (saslServer != null && saslServer.isComplete()) {
|
||||||
|
@ -3323,6 +3346,26 @@ public abstract class Server {
|
||||||
metricsUpdaterInterval, metricsUpdaterInterval, TimeUnit.MILLISECONDS);
|
metricsUpdaterInterval, metricsUpdaterInterval, TimeUnit.MILLISECONDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private synchronized void doKerberosRelogin() throws IOException {
|
||||||
|
if(UserGroupInformation.getLoginUser().isLoginSuccess()){
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
LOG.warn("Initiating re-login from IPC Server");
|
||||||
|
if (canTryForceLogin.compareAndSet(true, false)) {
|
||||||
|
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||||
|
UserGroupInformation.getLoginUser().forceReloginFromKeytab();
|
||||||
|
} else if (UserGroupInformation.isLoginTicketBased()) {
|
||||||
|
UserGroupInformation.getLoginUser().forceReloginFromTicketCache();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||||
|
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||||
|
} else if (UserGroupInformation.isLoginTicketBased()) {
|
||||||
|
UserGroupInformation.getLoginUser().reloginFromTicketCache();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public synchronized void addAuxiliaryListener(int auxiliaryPort)
|
public synchronized void addAuxiliaryListener(int auxiliaryPort)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (auxiliaryListenerMap == null) {
|
if (auxiliaryListenerMap == null) {
|
||||||
|
|
|
@ -65,7 +65,7 @@ import org.apache.hadoop.util.Timer;
|
||||||
* <p>This class can also be used to coordinate multiple logging points; see
|
* <p>This class can also be used to coordinate multiple logging points; see
|
||||||
* {@link #record(String, long, double...)} for more details.
|
* {@link #record(String, long, double...)} for more details.
|
||||||
*
|
*
|
||||||
* <p>This class is not thread-safe.
|
* <p>This class is thread-safe.
|
||||||
*/
|
*/
|
||||||
public class LogThrottlingHelper {
|
public class LogThrottlingHelper {
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ public class LogThrottlingHelper {
|
||||||
* @return A LogAction indicating whether or not the caller should write to
|
* @return A LogAction indicating whether or not the caller should write to
|
||||||
* its log.
|
* its log.
|
||||||
*/
|
*/
|
||||||
public LogAction record(double... values) {
|
public synchronized LogAction record(double... values) {
|
||||||
return record(DEFAULT_RECORDER_NAME, timer.monotonicNow(), values);
|
return record(DEFAULT_RECORDER_NAME, timer.monotonicNow(), values);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -244,7 +244,7 @@ public class LogThrottlingHelper {
|
||||||
*
|
*
|
||||||
* @see #record(double...)
|
* @see #record(double...)
|
||||||
*/
|
*/
|
||||||
public LogAction record(String recorderName, long currentTimeMs,
|
public synchronized LogAction record(String recorderName, long currentTimeMs,
|
||||||
double... values) {
|
double... values) {
|
||||||
if (primaryRecorderName == null) {
|
if (primaryRecorderName == null) {
|
||||||
primaryRecorderName = recorderName;
|
primaryRecorderName = recorderName;
|
||||||
|
@ -262,9 +262,15 @@ public class LogThrottlingHelper {
|
||||||
if (primaryRecorderName.equals(recorderName) &&
|
if (primaryRecorderName.equals(recorderName) &&
|
||||||
currentTimeMs - minLogPeriodMs >= lastLogTimestampMs) {
|
currentTimeMs - minLogPeriodMs >= lastLogTimestampMs) {
|
||||||
lastLogTimestampMs = currentTimeMs;
|
lastLogTimestampMs = currentTimeMs;
|
||||||
for (LoggingAction log : currentLogs.values()) {
|
currentLogs.replaceAll((key, log) -> {
|
||||||
log.setShouldLog();
|
LoggingAction newLog = log;
|
||||||
}
|
if (log.hasLogged()) {
|
||||||
|
// create a fresh log since the old one has already been logged
|
||||||
|
newLog = new LoggingAction(log.getValueCount());
|
||||||
|
}
|
||||||
|
newLog.setShouldLog();
|
||||||
|
return newLog;
|
||||||
|
});
|
||||||
}
|
}
|
||||||
if (currentLog.shouldLog()) {
|
if (currentLog.shouldLog()) {
|
||||||
currentLog.setHasLogged();
|
currentLog.setHasLogged();
|
||||||
|
@ -281,7 +287,7 @@ public class LogThrottlingHelper {
|
||||||
* @param idx The index value.
|
* @param idx The index value.
|
||||||
* @return The summary information.
|
* @return The summary information.
|
||||||
*/
|
*/
|
||||||
public SummaryStatistics getCurrentStats(String recorderName, int idx) {
|
public synchronized SummaryStatistics getCurrentStats(String recorderName, int idx) {
|
||||||
LoggingAction currentLog = currentLogs.get(recorderName);
|
LoggingAction currentLog = currentLogs.get(recorderName);
|
||||||
if (currentLog != null) {
|
if (currentLog != null) {
|
||||||
return currentLog.getStats(idx);
|
return currentLog.getStats(idx);
|
||||||
|
@ -308,6 +314,13 @@ public class LogThrottlingHelper {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public synchronized void reset() {
|
||||||
|
primaryRecorderName = null;
|
||||||
|
currentLogs.clear();
|
||||||
|
lastLogTimestampMs = Long.MIN_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A standard log action which keeps track of all of the values which have
|
* A standard log action which keeps track of all of the values which have
|
||||||
* been logged. This is also used for internal bookkeeping via its private
|
* been logged. This is also used for internal bookkeeping via its private
|
||||||
|
@ -357,6 +370,10 @@ public class LogThrottlingHelper {
|
||||||
hasLogged = true;
|
hasLogged = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private int getValueCount() {
|
||||||
|
return stats.length;
|
||||||
|
}
|
||||||
|
|
||||||
private void recordValues(double... values) {
|
private void recordValues(double... values) {
|
||||||
if (values.length != stats.length) {
|
if (values.length != stats.length) {
|
||||||
throw new IllegalArgumentException("received " + values.length +
|
throw new IllegalArgumentException("received " + values.length +
|
||||||
|
|
|
@ -280,7 +280,6 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
||||||
}
|
}
|
||||||
return sink;
|
return sink;
|
||||||
}
|
}
|
||||||
allSinks.put(name, sink);
|
|
||||||
if (config != null) {
|
if (config != null) {
|
||||||
registerSink(name, description, sink);
|
registerSink(name, description, sink);
|
||||||
}
|
}
|
||||||
|
@ -301,6 +300,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
||||||
? newSink(name, desc, sink, conf)
|
? newSink(name, desc, sink, conf)
|
||||||
: newSink(name, desc, sink, config.subset(SINK_KEY));
|
: newSink(name, desc, sink, config.subset(SINK_KEY));
|
||||||
sinks.put(name, sa);
|
sinks.put(name, sa);
|
||||||
|
allSinks.put(name, sink);
|
||||||
sa.start();
|
sa.start();
|
||||||
LOG.info("Registered sink "+ name);
|
LOG.info("Registered sink "+ name);
|
||||||
}
|
}
|
||||||
|
@ -508,6 +508,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
|
||||||
conf.getString(DESC_KEY, sinkName), conf);
|
conf.getString(DESC_KEY, sinkName), conf);
|
||||||
sa.start();
|
sa.start();
|
||||||
sinks.put(sinkName, sa);
|
sinks.put(sinkName, sa);
|
||||||
|
allSinks.put(sinkName, sa.sink());
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.warn("Error creating sink '"+ sinkName +"'", e);
|
LOG.warn("Error creating sink '"+ sinkName +"'", e);
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,7 +69,7 @@ public class MutableGaugeFloat extends MutableGauge {
|
||||||
|
|
||||||
private void incr(float delta) {
|
private void incr(float delta) {
|
||||||
while (true) {
|
while (true) {
|
||||||
float current = value.get();
|
float current = Float.intBitsToFloat(value.get());
|
||||||
float next = current + delta;
|
float next = current + delta;
|
||||||
if (compareAndSet(current, next)) {
|
if (compareAndSet(current, next)) {
|
||||||
setChanged();
|
setChanged();
|
||||||
|
|
|
@ -140,14 +140,14 @@ public class MutableStat extends MutableMetric {
|
||||||
if (all || changed()) {
|
if (all || changed()) {
|
||||||
numSamples += intervalStat.numSamples();
|
numSamples += intervalStat.numSamples();
|
||||||
builder.addCounter(numInfo, numSamples)
|
builder.addCounter(numInfo, numSamples)
|
||||||
.addGauge(avgInfo, lastStat().mean());
|
.addGauge(avgInfo, intervalStat.mean());
|
||||||
if (extended) {
|
if (extended) {
|
||||||
builder.addGauge(stdevInfo, lastStat().stddev())
|
builder.addGauge(stdevInfo, intervalStat.stddev())
|
||||||
.addGauge(iMinInfo, lastStat().min())
|
.addGauge(iMinInfo, intervalStat.min())
|
||||||
.addGauge(iMaxInfo, lastStat().max())
|
.addGauge(iMaxInfo, intervalStat.max())
|
||||||
.addGauge(minInfo, minMax.min())
|
.addGauge(minInfo, minMax.min())
|
||||||
.addGauge(maxInfo, minMax.max())
|
.addGauge(maxInfo, minMax.max())
|
||||||
.addGauge(iNumInfo, lastStat().numSamples());
|
.addGauge(iNumInfo, intervalStat.numSamples());
|
||||||
}
|
}
|
||||||
if (changed()) {
|
if (changed()) {
|
||||||
if (numSamples > 0) {
|
if (numSamples > 0) {
|
||||||
|
|
|
@ -38,6 +38,8 @@ import org.apache.hadoop.thirdparty.com.google.common.collect.HashBiMap;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.util.Shell.bashQuote;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A simple shell-based implementation of {@link IdMappingServiceProvider}
|
* A simple shell-based implementation of {@link IdMappingServiceProvider}
|
||||||
* Map id to user name or group name. It does update every 15 minutes. Only a
|
* Map id to user name or group name. It does update every 15 minutes. Only a
|
||||||
|
@ -472,26 +474,27 @@ public class ShellBasedIdMapping implements IdMappingServiceProvider {
|
||||||
|
|
||||||
boolean updated = false;
|
boolean updated = false;
|
||||||
updateStaticMapping();
|
updateStaticMapping();
|
||||||
|
String name2 = bashQuote(name);
|
||||||
|
|
||||||
if (OS.startsWith("Linux") || OS.equals("SunOS") || OS.contains("BSD")) {
|
if (OS.startsWith("Linux") || OS.equals("SunOS") || OS.contains("BSD")) {
|
||||||
if (isGrp) {
|
if (isGrp) {
|
||||||
updated = updateMapInternal(gidNameMap, "group",
|
updated = updateMapInternal(gidNameMap, "group",
|
||||||
getName2IdCmdNIX(name, true), ":",
|
getName2IdCmdNIX(name2, true), ":",
|
||||||
staticMapping.gidMapping);
|
staticMapping.gidMapping);
|
||||||
} else {
|
} else {
|
||||||
updated = updateMapInternal(uidNameMap, "user",
|
updated = updateMapInternal(uidNameMap, "user",
|
||||||
getName2IdCmdNIX(name, false), ":",
|
getName2IdCmdNIX(name2, false), ":",
|
||||||
staticMapping.uidMapping);
|
staticMapping.uidMapping);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Mac
|
// Mac
|
||||||
if (isGrp) {
|
if (isGrp) {
|
||||||
updated = updateMapInternal(gidNameMap, "group",
|
updated = updateMapInternal(gidNameMap, "group",
|
||||||
getName2IdCmdMac(name, true), "\\s+",
|
getName2IdCmdMac(name2, true), "\\s+",
|
||||||
staticMapping.gidMapping);
|
staticMapping.gidMapping);
|
||||||
} else {
|
} else {
|
||||||
updated = updateMapInternal(uidNameMap, "user",
|
updated = updateMapInternal(uidNameMap, "user",
|
||||||
getName2IdCmdMac(name, false), "\\s+",
|
getName2IdCmdMac(name2, false), "\\s+",
|
||||||
staticMapping.uidMapping);
|
staticMapping.uidMapping);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -529,6 +529,18 @@ public class UserGroupInformation {
|
||||||
user.setLogin(login);
|
user.setLogin(login);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** This method checks for a successful Kerberos login
|
||||||
|
* and returns true by default if it is not using Kerberos.
|
||||||
|
*
|
||||||
|
* @return true on successful login
|
||||||
|
*/
|
||||||
|
public boolean isLoginSuccess() {
|
||||||
|
LoginContext login = user.getLogin();
|
||||||
|
return (login instanceof HadoopLoginContext)
|
||||||
|
? ((HadoopLoginContext) login).isLoginSuccess()
|
||||||
|
: true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the last login time for logged in user
|
* Set the last login time for logged in user
|
||||||
* @param loginTime the number of milliseconds since the beginning of time
|
* @param loginTime the number of milliseconds since the beginning of time
|
||||||
|
@ -1276,6 +1288,23 @@ public class UserGroupInformation {
|
||||||
relogin(login, ignoreLastLoginTime);
|
relogin(login, ignoreLastLoginTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Force re-Login a user in from the ticket cache irrespective of the last
|
||||||
|
* login time. This method assumes that login had happened already. The
|
||||||
|
* Subject field of this UserGroupInformation object is updated to have the
|
||||||
|
* new credentials.
|
||||||
|
*
|
||||||
|
* @throws IOException
|
||||||
|
* raised on errors performing I/O.
|
||||||
|
* @throws KerberosAuthException
|
||||||
|
* on a failure
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Public
|
||||||
|
@InterfaceStability.Evolving
|
||||||
|
public void forceReloginFromTicketCache() throws IOException {
|
||||||
|
reloginFromTicketCache(true);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Re-Login a user in from the ticket cache. This
|
* Re-Login a user in from the ticket cache. This
|
||||||
* method assumes that login had happened already.
|
* method assumes that login had happened already.
|
||||||
|
@ -1287,6 +1316,11 @@ public class UserGroupInformation {
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public void reloginFromTicketCache() throws IOException {
|
public void reloginFromTicketCache() throws IOException {
|
||||||
|
reloginFromTicketCache(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
private void reloginFromTicketCache(boolean ignoreLastLoginTime)
|
||||||
|
throws IOException {
|
||||||
if (!shouldRelogin() || !isFromTicket()) {
|
if (!shouldRelogin() || !isFromTicket()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1294,7 +1328,7 @@ public class UserGroupInformation {
|
||||||
if (login == null) {
|
if (login == null) {
|
||||||
throw new KerberosAuthException(MUST_FIRST_LOGIN);
|
throw new KerberosAuthException(MUST_FIRST_LOGIN);
|
||||||
}
|
}
|
||||||
relogin(login, false);
|
relogin(login, ignoreLastLoginTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void relogin(HadoopLoginContext login, boolean ignoreLastLoginTime)
|
private void relogin(HadoopLoginContext login, boolean ignoreLastLoginTime)
|
||||||
|
@ -2083,6 +2117,11 @@ public class UserGroupInformation {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Get the login status. */
|
||||||
|
public boolean isLoginSuccess() {
|
||||||
|
return isLoggedIn.get();
|
||||||
|
}
|
||||||
|
|
||||||
String getAppName() {
|
String getAppName() {
|
||||||
return appName;
|
return appName;
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for service-level authorization.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.authorize;
|
package org.apache.hadoop.security.authorize;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Filters for HTTP service security.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.http;
|
package org.apache.hadoop.security.http;
|
||||||
|
|
|
@ -25,7 +25,7 @@ import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import static org.apache.hadoop.util.PlatformName.JAVA_VENDOR_NAME;
|
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
|
||||||
|
|
||||||
import javax.net.ssl.HostnameVerifier;
|
import javax.net.ssl.HostnameVerifier;
|
||||||
import javax.net.ssl.HttpsURLConnection;
|
import javax.net.ssl.HttpsURLConnection;
|
||||||
|
@ -102,11 +102,11 @@ public class SSLFactory implements ConnectionConfigurator {
|
||||||
"ssl.server.exclude.cipher.list";
|
"ssl.server.exclude.cipher.list";
|
||||||
|
|
||||||
public static final String KEY_MANAGER_SSLCERTIFICATE =
|
public static final String KEY_MANAGER_SSLCERTIFICATE =
|
||||||
JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" :
|
IBM_JAVA ? "ibmX509" :
|
||||||
KeyManagerFactory.getDefaultAlgorithm();
|
KeyManagerFactory.getDefaultAlgorithm();
|
||||||
|
|
||||||
public static final String TRUST_MANAGER_SSLCERTIFICATE =
|
public static final String TRUST_MANAGER_SSLCERTIFICATE =
|
||||||
JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" :
|
IBM_JAVA ? "ibmX509" :
|
||||||
TrustManagerFactory.getDefaultAlgorithm();
|
TrustManagerFactory.getDefaultAlgorithm();
|
||||||
|
|
||||||
public static final String KEYSTORES_FACTORY_CLASS_KEY =
|
public static final String KEYSTORES_FACTORY_CLASS_KEY =
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ZooKeeper secret manager for TokenIdentifiers and DelegationKeys.
|
||||||
|
*/
|
||||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.token.delegation;
|
package org.apache.hadoop.security.token.delegation;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for delegation tokens.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
package org.apache.hadoop.security.token;
|
package org.apache.hadoop.security.token;
|
||||||
|
|
|
@ -15,6 +15,10 @@
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for services.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
package org.apache.hadoop.service;
|
package org.apache.hadoop.service;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
|
@ -108,7 +108,7 @@ public class ApplicationClassLoader extends URLClassLoader {
|
||||||
throws MalformedURLException {
|
throws MalformedURLException {
|
||||||
List<URL> urls = new ArrayList<URL>();
|
List<URL> urls = new ArrayList<URL>();
|
||||||
for (String element : classpath.split(File.pathSeparator)) {
|
for (String element : classpath.split(File.pathSeparator)) {
|
||||||
if (element.endsWith("/*")) {
|
if (element.endsWith(File.separator + "*")) {
|
||||||
List<Path> jars = FileUtil.getJarsInDirectory(element);
|
List<Path> jars = FileUtil.getJarsInDirectory(element);
|
||||||
if (!jars.isEmpty()) {
|
if (!jars.isEmpty()) {
|
||||||
for (Path jar: jars) {
|
for (Path jar: jars) {
|
||||||
|
|
|
@ -147,8 +147,8 @@ public class HostsFileReader {
|
||||||
String filename, InputStream fileInputStream, Map<String, Integer> map)
|
String filename, InputStream fileInputStream, Map<String, Integer> map)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
Document dom;
|
Document dom;
|
||||||
DocumentBuilderFactory builder = DocumentBuilderFactory.newInstance();
|
|
||||||
try {
|
try {
|
||||||
|
DocumentBuilderFactory builder = XMLUtils.newSecureDocumentBuilderFactory();
|
||||||
DocumentBuilder db = builder.newDocumentBuilder();
|
DocumentBuilder db = builder.newDocumentBuilder();
|
||||||
dom = db.parse(fileInputStream);
|
dom = db.parse(fileInputStream);
|
||||||
// Examples:
|
// Examples:
|
||||||
|
|
|
@ -44,12 +44,7 @@ public class InstrumentedReadLock extends InstrumentedLock {
|
||||||
* there can be multiple threads that hold the read lock concurrently.
|
* there can be multiple threads that hold the read lock concurrently.
|
||||||
*/
|
*/
|
||||||
private final ThreadLocal<Long> readLockHeldTimeStamp =
|
private final ThreadLocal<Long> readLockHeldTimeStamp =
|
||||||
new ThreadLocal<Long>() {
|
ThreadLocal.withInitial(() -> Long.MAX_VALUE);
|
||||||
@Override
|
|
||||||
protected Long initialValue() {
|
|
||||||
return Long.MAX_VALUE;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
public InstrumentedReadLock(String name, Logger logger,
|
public InstrumentedReadLock(String name, Logger logger,
|
||||||
ReentrantReadWriteLock readWriteLock,
|
ReentrantReadWriteLock readWriteLock,
|
||||||
|
|
|
@ -37,6 +37,9 @@ import org.slf4j.Logger;
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public class InstrumentedWriteLock extends InstrumentedLock {
|
public class InstrumentedWriteLock extends InstrumentedLock {
|
||||||
|
|
||||||
|
private final ReentrantReadWriteLock readWriteLock;
|
||||||
|
private volatile long writeLockHeldTimeStamp = 0;
|
||||||
|
|
||||||
public InstrumentedWriteLock(String name, Logger logger,
|
public InstrumentedWriteLock(String name, Logger logger,
|
||||||
ReentrantReadWriteLock readWriteLock,
|
ReentrantReadWriteLock readWriteLock,
|
||||||
long minLoggingGapMs, long lockWarningThresholdMs) {
|
long minLoggingGapMs, long lockWarningThresholdMs) {
|
||||||
|
@ -50,5 +53,28 @@ public class InstrumentedWriteLock extends InstrumentedLock {
|
||||||
long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
|
long minLoggingGapMs, long lockWarningThresholdMs, Timer clock) {
|
||||||
super(name, logger, readWriteLock.writeLock(), minLoggingGapMs,
|
super(name, logger, readWriteLock.writeLock(), minLoggingGapMs,
|
||||||
lockWarningThresholdMs, clock);
|
lockWarningThresholdMs, clock);
|
||||||
|
this.readWriteLock = readWriteLock;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void unlock() {
|
||||||
|
boolean needReport = readWriteLock.getWriteHoldCount() == 1;
|
||||||
|
long localWriteReleaseTime = getTimer().monotonicNow();
|
||||||
|
long localWriteAcquireTime = writeLockHeldTimeStamp;
|
||||||
|
getLock().unlock();
|
||||||
|
if (needReport) {
|
||||||
|
writeLockHeldTimeStamp = 0;
|
||||||
|
check(localWriteAcquireTime, localWriteReleaseTime, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts timing for the instrumented write lock.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
protected void startLockTiming() {
|
||||||
|
if (readWriteLock.getWriteHoldCount() == 1) {
|
||||||
|
writeLockHeldTimeStamp = getTimer().monotonicNow();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ import java.util.function.Consumer;
|
||||||
*
|
*
|
||||||
* This class does not support null element.
|
* This class does not support null element.
|
||||||
*
|
*
|
||||||
* This class is not thread safe.
|
* This class is thread safe.
|
||||||
*
|
*
|
||||||
* @param <K> Key type for looking up the elements
|
* @param <K> Key type for looking up the elements
|
||||||
* @param <E> Element type, which must be
|
* @param <E> Element type, which must be
|
||||||
|
|
|
@ -146,7 +146,8 @@ public abstract class Shell {
|
||||||
* @param arg the argument to quote
|
* @param arg the argument to quote
|
||||||
* @return the quoted string
|
* @return the quoted string
|
||||||
*/
|
*/
|
||||||
static String bashQuote(String arg) {
|
@InterfaceAudience.Private
|
||||||
|
public static String bashQuote(String arg) {
|
||||||
StringBuilder buffer = new StringBuilder(arg.length() + 2);
|
StringBuilder buffer = new StringBuilder(arg.length() + 2);
|
||||||
buffer.append('\'')
|
buffer.append('\'')
|
||||||
.append(arg.replace("'", "'\\''"))
|
.append(arg.replace("'", "'\\''"))
|
||||||
|
|
|
@ -93,6 +93,10 @@ public class VersionInfo {
|
||||||
return info.getProperty("protocVersion", "Unknown");
|
return info.getProperty("protocVersion", "Unknown");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected String _getCompilePlatform() {
|
||||||
|
return info.getProperty("compilePlatform", "Unknown");
|
||||||
|
}
|
||||||
|
|
||||||
private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
|
private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
|
||||||
/**
|
/**
|
||||||
* Get the Hadoop version.
|
* Get the Hadoop version.
|
||||||
|
@ -167,12 +171,21 @@ public class VersionInfo {
|
||||||
return COMMON_VERSION_INFO._getProtocVersion();
|
return COMMON_VERSION_INFO._getProtocVersion();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the OS platform used for the build.
|
||||||
|
* @return the OS platform
|
||||||
|
*/
|
||||||
|
public static String getCompilePlatform() {
|
||||||
|
return COMMON_VERSION_INFO._getCompilePlatform();
|
||||||
|
}
|
||||||
|
|
||||||
public static void main(String[] args) {
|
public static void main(String[] args) {
|
||||||
LOG.debug("version: "+ getVersion());
|
LOG.debug("version: "+ getVersion());
|
||||||
System.out.println("Hadoop " + getVersion());
|
System.out.println("Hadoop " + getVersion());
|
||||||
System.out.println("Source code repository " + getUrl() + " -r " +
|
System.out.println("Source code repository " + getUrl() + " -r " +
|
||||||
getRevision());
|
getRevision());
|
||||||
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
||||||
|
System.out.println("Compiled on platform " + getCompilePlatform());
|
||||||
System.out.println("Compiled with protoc " + getProtocVersion());
|
System.out.println("Compiled with protoc " + getProtocVersion());
|
||||||
System.out.println("From source with checksum " + getSrcChecksum());
|
System.out.println("From source with checksum " + getSrcChecksum());
|
||||||
System.out.println("This command was run using " +
|
System.out.println("This command was run using " +
|
||||||
|
|
|
@ -18,13 +18,23 @@
|
||||||
|
|
||||||
package org.apache.hadoop.util;
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import javax.xml.XMLConstants;
|
||||||
|
import javax.xml.parsers.DocumentBuilderFactory;
|
||||||
|
import javax.xml.parsers.ParserConfigurationException;
|
||||||
|
import javax.xml.parsers.SAXParserFactory;
|
||||||
import javax.xml.transform.*;
|
import javax.xml.transform.*;
|
||||||
|
import javax.xml.transform.sax.SAXTransformerFactory;
|
||||||
import javax.xml.transform.stream.*;
|
import javax.xml.transform.stream.*;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.xml.sax.SAXException;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.*;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* General xml utilities.
|
* General xml utilities.
|
||||||
|
@ -33,6 +43,28 @@ import java.io.*;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public class XMLUtils {
|
public class XMLUtils {
|
||||||
|
|
||||||
|
private static final Logger LOG =
|
||||||
|
LoggerFactory.getLogger(XMLUtils.class);
|
||||||
|
|
||||||
|
public static final String DISALLOW_DOCTYPE_DECL =
|
||||||
|
"http://apache.org/xml/features/disallow-doctype-decl";
|
||||||
|
public static final String LOAD_EXTERNAL_DECL =
|
||||||
|
"http://apache.org/xml/features/nonvalidating/load-external-dtd";
|
||||||
|
public static final String EXTERNAL_GENERAL_ENTITIES =
|
||||||
|
"http://xml.org/sax/features/external-general-entities";
|
||||||
|
public static final String EXTERNAL_PARAMETER_ENTITIES =
|
||||||
|
"http://xml.org/sax/features/external-parameter-entities";
|
||||||
|
public static final String CREATE_ENTITY_REF_NODES =
|
||||||
|
"http://apache.org/xml/features/dom/create-entity-ref-nodes";
|
||||||
|
public static final String VALIDATION =
|
||||||
|
"http://xml.org/sax/features/validation";
|
||||||
|
|
||||||
|
private static final AtomicBoolean CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_DTD =
|
||||||
|
new AtomicBoolean(true);
|
||||||
|
private static final AtomicBoolean CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_STYLESHEET =
|
||||||
|
new AtomicBoolean(true);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Transform input xml given a stylesheet.
|
* Transform input xml given a stylesheet.
|
||||||
*
|
*
|
||||||
|
@ -49,7 +81,7 @@ public class XMLUtils {
|
||||||
)
|
)
|
||||||
throws TransformerConfigurationException, TransformerException {
|
throws TransformerConfigurationException, TransformerException {
|
||||||
// Instantiate a TransformerFactory
|
// Instantiate a TransformerFactory
|
||||||
TransformerFactory tFactory = TransformerFactory.newInstance();
|
TransformerFactory tFactory = newSecureTransformerFactory();
|
||||||
|
|
||||||
// Use the TransformerFactory to process the
|
// Use the TransformerFactory to process the
|
||||||
// stylesheet and generate a Transformer
|
// stylesheet and generate a Transformer
|
||||||
|
@ -61,4 +93,118 @@ public class XMLUtils {
|
||||||
// and send the output to a Result object.
|
// and send the output to a Result object.
|
||||||
transformer.transform(new StreamSource(xml), new StreamResult(out));
|
transformer.transform(new StreamSource(xml), new StreamResult(out));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method should be used if you need a {@link DocumentBuilderFactory}. Use this method
|
||||||
|
* instead of {@link DocumentBuilderFactory#newInstance()}. The factory that is returned has
|
||||||
|
* secure configuration enabled.
|
||||||
|
*
|
||||||
|
* @return a {@link DocumentBuilderFactory} with secure configuration enabled
|
||||||
|
* @throws ParserConfigurationException if the {@code JAXP} parser does not support the
|
||||||
|
* secure configuration
|
||||||
|
*/
|
||||||
|
public static DocumentBuilderFactory newSecureDocumentBuilderFactory()
|
||||||
|
throws ParserConfigurationException {
|
||||||
|
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
|
||||||
|
dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
|
||||||
|
dbf.setFeature(DISALLOW_DOCTYPE_DECL, true);
|
||||||
|
dbf.setFeature(LOAD_EXTERNAL_DECL, false);
|
||||||
|
dbf.setFeature(EXTERNAL_GENERAL_ENTITIES, false);
|
||||||
|
dbf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false);
|
||||||
|
dbf.setFeature(CREATE_ENTITY_REF_NODES, false);
|
||||||
|
return dbf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method should be used if you need a {@link SAXParserFactory}. Use this method
|
||||||
|
* instead of {@link SAXParserFactory#newInstance()}. The factory that is returned has
|
||||||
|
* secure configuration enabled.
|
||||||
|
*
|
||||||
|
* @return a {@link SAXParserFactory} with secure configuration enabled
|
||||||
|
* @throws ParserConfigurationException if the {@code JAXP} parser does not support the
|
||||||
|
* secure configuration
|
||||||
|
* @throws SAXException if there are another issues when creating the factory
|
||||||
|
*/
|
||||||
|
public static SAXParserFactory newSecureSAXParserFactory()
|
||||||
|
throws SAXException, ParserConfigurationException {
|
||||||
|
SAXParserFactory spf = SAXParserFactory.newInstance();
|
||||||
|
spf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
|
||||||
|
spf.setFeature(DISALLOW_DOCTYPE_DECL, true);
|
||||||
|
spf.setFeature(LOAD_EXTERNAL_DECL, false);
|
||||||
|
spf.setFeature(EXTERNAL_GENERAL_ENTITIES, false);
|
||||||
|
spf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false);
|
||||||
|
return spf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method should be used if you need a {@link TransformerFactory}. Use this method
|
||||||
|
* instead of {@link TransformerFactory#newInstance()}. The factory that is returned has
|
||||||
|
* secure configuration enabled.
|
||||||
|
*
|
||||||
|
* @return a {@link TransformerFactory} with secure configuration enabled
|
||||||
|
* @throws TransformerConfigurationException if the {@code JAXP} transformer does not
|
||||||
|
* support the secure configuration
|
||||||
|
*/
|
||||||
|
public static TransformerFactory newSecureTransformerFactory()
|
||||||
|
throws TransformerConfigurationException {
|
||||||
|
TransformerFactory trfactory = TransformerFactory.newInstance();
|
||||||
|
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
|
||||||
|
setOptionalSecureTransformerAttributes(trfactory);
|
||||||
|
return trfactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method should be used if you need a {@link SAXTransformerFactory}. Use this method
|
||||||
|
* instead of {@link SAXTransformerFactory#newInstance()}. The factory that is returned has
|
||||||
|
* secure configuration enabled.
|
||||||
|
*
|
||||||
|
* @return a {@link SAXTransformerFactory} with secure configuration enabled
|
||||||
|
* @throws TransformerConfigurationException if the {@code JAXP} transformer does not
|
||||||
|
* support the secure configuration
|
||||||
|
*/
|
||||||
|
public static SAXTransformerFactory newSecureSAXTransformerFactory()
|
||||||
|
throws TransformerConfigurationException {
|
||||||
|
SAXTransformerFactory trfactory = (SAXTransformerFactory) SAXTransformerFactory.newInstance();
|
||||||
|
trfactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
|
||||||
|
setOptionalSecureTransformerAttributes(trfactory);
|
||||||
|
return trfactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* These attributes are recommended for maximum security but some JAXP transformers do
|
||||||
|
* not support them. If at any stage, we fail to set these attributes, then we won't try again
|
||||||
|
* for subsequent transformers.
|
||||||
|
*
|
||||||
|
* @param transformerFactory to update
|
||||||
|
*/
|
||||||
|
private static void setOptionalSecureTransformerAttributes(
|
||||||
|
TransformerFactory transformerFactory) {
|
||||||
|
bestEffortSetAttribute(transformerFactory, CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_DTD,
|
||||||
|
XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||||
|
bestEffortSetAttribute(transformerFactory, CAN_SET_TRANSFORMER_ACCESS_EXTERNAL_STYLESHEET,
|
||||||
|
XMLConstants.ACCESS_EXTERNAL_STYLESHEET, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Set an attribute value on a {@link TransformerFactory}. If the TransformerFactory
|
||||||
|
* does not support the attribute, the method just returns <code>false</code> and
|
||||||
|
* logs the issue at debug level.
|
||||||
|
*
|
||||||
|
* @param transformerFactory to update
|
||||||
|
* @param flag that indicates whether to do the update and the flag can be set to
|
||||||
|
* <code>false</code> if an update fails
|
||||||
|
* @param name of the attribute to set
|
||||||
|
* @param value to set on the attribute
|
||||||
|
*/
|
||||||
|
static void bestEffortSetAttribute(TransformerFactory transformerFactory, AtomicBoolean flag,
|
||||||
|
String name, Object value) {
|
||||||
|
if (flag.get()) {
|
||||||
|
try {
|
||||||
|
transformerFactory.setAttribute(name, value);
|
||||||
|
} catch (Throwable t) {
|
||||||
|
flag.set(false);
|
||||||
|
LOG.debug("Issue setting TransformerFactory attribute {}: {}", name, t.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
/*
|
/*
|
||||||
* *
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
* or more contributor license agreements. See the NOTICE file
|
* or more contributor license agreements. See the NOTICE file
|
||||||
* distributed with this work for additional information
|
* distributed with this work for additional information
|
||||||
|
@ -15,9 +14,11 @@
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
* /
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Support for concurrent execution.
|
||||||
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
package org.apache.hadoop.util.concurrent;
|
package org.apache.hadoop.util.concurrent;
|
||||||
|
|
|
@ -24,3 +24,4 @@ date=${version-info.build.time}
|
||||||
url=${version-info.scm.uri}
|
url=${version-info.scm.uri}
|
||||||
srcChecksum=${version-info.source.md5}
|
srcChecksum=${version-info.source.md5}
|
||||||
protocVersion=${hadoop.protobuf.version}
|
protocVersion=${hadoop.protobuf.version}
|
||||||
|
compilePlatform=${os.detected.classifier}
|
||||||
|
|
|
@ -1094,14 +1094,6 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.viewfs.overload.scheme.target.swift.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
|
|
||||||
<description>The SwiftNativeFileSystem for view file system overload scheme
|
|
||||||
when child file system and ViewFSOverloadScheme's schemes are swift.
|
|
||||||
</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>fs.viewfs.overload.scheme.target.oss.impl</name>
|
<name>fs.viewfs.overload.scheme.target.oss.impl</name>
|
||||||
<value>org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem</value>
|
<value>org.apache.hadoop.fs.aliyun.oss.AliyunOSSFileSystem</value>
|
||||||
|
@ -1211,12 +1203,6 @@
|
||||||
<description>File space usage statistics refresh interval in msec.</description>
|
<description>File space usage statistics refresh interval in msec.</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.swift.impl</name>
|
|
||||||
<value>org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem</value>
|
|
||||||
<description>The implementation class of the OpenStack Swift Filesystem</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>fs.automatic.close</name>
|
<name>fs.automatic.close</name>
|
||||||
<value>true</value>
|
<value>true</value>
|
||||||
|
@ -2180,6 +2166,12 @@ The switch to turn S3A auditing on or off.
|
||||||
<description>The AbstractFileSystem for gs: uris.</description>
|
<description>The AbstractFileSystem for gs: uris.</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.azure.enable.readahead</name>
|
||||||
|
<value>true</value>
|
||||||
|
<description>Enabled readahead/prefetching in AbfsInputStream.</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>io.seqfile.compress.blocksize</name>
|
<name>io.seqfile.compress.blocksize</name>
|
||||||
<value>1000000</value>
|
<value>1000000</value>
|
||||||
|
|
|
@ -22,7 +22,17 @@ Purpose
|
||||||
|
|
||||||
This document describes how to install and configure Hadoop clusters ranging from a few nodes to extremely large clusters with thousands of nodes. To play with Hadoop, you may first want to install it on a single machine (see [Single Node Setup](./SingleCluster.html)).
|
This document describes how to install and configure Hadoop clusters ranging from a few nodes to extremely large clusters with thousands of nodes. To play with Hadoop, you may first want to install it on a single machine (see [Single Node Setup](./SingleCluster.html)).
|
||||||
|
|
||||||
This document does not cover advanced topics such as [Security](./SecureMode.html) or High Availability.
|
This document does not cover advanced topics such as High Availability.
|
||||||
|
|
||||||
|
*Important*: all production Hadoop clusters use Kerberos to authenticate callers
|
||||||
|
and secure access to HDFS data as well as restriction access to computation
|
||||||
|
services (YARN etc.).
|
||||||
|
|
||||||
|
These instructions do not cover integration with any Kerberos services,
|
||||||
|
-everyone bringing up a production cluster should include connecting to their
|
||||||
|
organisation's Kerberos infrastructure as a key part of the deployment.
|
||||||
|
|
||||||
|
See [Security](./SecureMode.html) for details on how to secure a cluster.
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
-------------
|
-------------
|
||||||
|
|
|
@ -208,7 +208,8 @@ The following table lists the configuration property names that are deprecated i
|
||||||
| mapred.task.profile.params | mapreduce.task.profile.params |
|
| mapred.task.profile.params | mapreduce.task.profile.params |
|
||||||
| mapred.task.profile.reduces | mapreduce.task.profile.reduces |
|
| mapred.task.profile.reduces | mapreduce.task.profile.reduces |
|
||||||
| mapred.task.timeout | mapreduce.task.timeout |
|
| mapred.task.timeout | mapreduce.task.timeout |
|
||||||
| mapred.tasktracker.indexcache.mb | mapreduce.tasktracker.indexcache.mb |
|
| mapred.tasktracker.indexcache.mb | mapreduce.reduce.shuffle.indexcache.mb |
|
||||||
|
| mapreduce.tasktracker.indexcache.mb | mapreduce.reduce.shuffle.indexcache.mb |
|
||||||
| mapred.tasktracker.map.tasks.maximum | mapreduce.tasktracker.map.tasks.maximum |
|
| mapred.tasktracker.map.tasks.maximum | mapreduce.tasktracker.map.tasks.maximum |
|
||||||
| mapred.tasktracker.memory\_calculator\_plugin | mapreduce.tasktracker.resourcecalculatorplugin |
|
| mapred.tasktracker.memory\_calculator\_plugin | mapreduce.tasktracker.resourcecalculatorplugin |
|
||||||
| mapred.tasktracker.memorycalculatorplugin | mapreduce.tasktracker.resourcecalculatorplugin |
|
| mapred.tasktracker.memorycalculatorplugin | mapreduce.tasktracker.resourcecalculatorplugin |
|
||||||
|
|
|
@ -59,7 +59,7 @@ Copies source paths to stdout.
|
||||||
|
|
||||||
Options
|
Options
|
||||||
|
|
||||||
* The `-ignoreCrc` option disables checkshum verification.
|
* The `-ignoreCrc` option disables checksum verification.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
|
@ -73,18 +73,19 @@ Returns 0 on success and -1 on error.
|
||||||
checksum
|
checksum
|
||||||
--------
|
--------
|
||||||
|
|
||||||
Usage: `hadoop fs -checksum [-v] URI`
|
Usage: `hadoop fs -checksum [-v] URI [URI ...]`
|
||||||
|
|
||||||
Returns the checksum information of a file.
|
Returns the checksum information of the file(s).
|
||||||
|
|
||||||
Options
|
Options
|
||||||
|
|
||||||
* The `-v` option displays blocks size for the file.
|
* The `-v` option displays blocks size for the file(s).
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
* `hadoop fs -checksum hdfs://nn1.example.com/file1`
|
* `hadoop fs -checksum hdfs://nn1.example.com/file1`
|
||||||
* `hadoop fs -checksum file:///etc/hosts`
|
* `hadoop fs -checksum file:///etc/hosts`
|
||||||
|
* `hadoop fs -checksum file:///etc/hosts hdfs://nn1.example.com/file1`
|
||||||
|
|
||||||
chgrp
|
chgrp
|
||||||
-----
|
-----
|
||||||
|
@ -177,7 +178,7 @@ Returns 0 on success and -1 on error.
|
||||||
cp
|
cp
|
||||||
----
|
----
|
||||||
|
|
||||||
Usage: `hadoop fs -cp [-f] [-p | -p[topax]] [-t <thread count>] [-q <thread pool queue size>] URI [URI ...] <dest>`
|
Usage: `hadoop fs -cp [-f] [-p | -p[topax]] [-d] [-t <thread count>] [-q <thread pool queue size>] URI [URI ...] <dest>`
|
||||||
|
|
||||||
Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory.
|
Copy files from source to destination. This command allows multiple sources as well in which case the destination must be a directory.
|
||||||
|
|
||||||
|
@ -187,13 +188,14 @@ Options:
|
||||||
|
|
||||||
* `-f` : Overwrite the destination if it already exists.
|
* `-f` : Overwrite the destination if it already exists.
|
||||||
* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
|
* `-d` : Skip creation of temporary file with the suffix `._COPYING_`.
|
||||||
* `-p` : Preserve file attributes [topx] (timestamps, ownership, permission, ACL, XAttr). If -p is specified with no *arg*, then preserves timestamps, ownership, permission. If -pa is specified, then preserves permission also because ACL is a super-set of permission. Determination of whether raw namespace extended attributes are preserved is independent of the -p flag.
|
* `-p` : Preserve file attributes [topax] (timestamps, ownership, permission, ACL, XAttr). If -p is specified with no *arg*, then preserves timestamps, ownership, permission. If -pa is specified, then preserves permission also because ACL is a super-set of permission. Determination of whether raw namespace extended attributes are preserved is independent of the -p flag.
|
||||||
* `-t <thread count>` : Number of threads to be used, default is 1. Useful when copying directories containing more than 1 file.
|
* `-t <thread count>` : Number of threads to be used, default is 1. Useful when copying directories containing more than 1 file.
|
||||||
* `-q <thread pool queue size>` : Thread pool queue size to be used, default is 1024. It takes effect only when thread count greater than 1.
|
* `-q <thread pool queue size>` : Thread pool queue size to be used, default is 1024. It takes effect only when thread count greater than 1.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
* `hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2`
|
* `hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2`
|
||||||
|
* `hadoop fs -cp -f -d /user/hadoop/file1 /user/hadoop/file2`
|
||||||
* `hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
|
* `hadoop fs -cp /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
|
||||||
* `hadoop fs -cp -t 5 /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
|
* `hadoop fs -cp -t 5 /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
|
||||||
* `hadoop fs -cp -t 10 -q 2048 /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
|
* `hadoop fs -cp -t 10 -q 2048 /user/hadoop/file1 /user/hadoop/file2 /user/hadoop/dir`
|
||||||
|
@ -403,7 +405,7 @@ Returns 0 on success and non-zero on error.
|
||||||
getmerge
|
getmerge
|
||||||
--------
|
--------
|
||||||
|
|
||||||
Usage: `hadoop fs -getmerge [-nl] <src> <localdst>`
|
Usage: `hadoop fs -getmerge [-nl] [-skip-empty-file] <src> <localdst>`
|
||||||
|
|
||||||
Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally -nl can be set to enable adding a newline character (LF) at the end of each file.
|
Takes a source directory and a destination file as input and concatenates files in src into the destination local file. Optionally -nl can be set to enable adding a newline character (LF) at the end of each file.
|
||||||
-skip-empty-file can be used to avoid unwanted newline characters in case of empty files.
|
-skip-empty-file can be used to avoid unwanted newline characters in case of empty files.
|
||||||
|
@ -412,6 +414,7 @@ Examples:
|
||||||
|
|
||||||
* `hadoop fs -getmerge -nl /src /opt/output.txt`
|
* `hadoop fs -getmerge -nl /src /opt/output.txt`
|
||||||
* `hadoop fs -getmerge -nl /src/file1.txt /src/file2.txt /output.txt`
|
* `hadoop fs -getmerge -nl /src/file1.txt /src/file2.txt /output.txt`
|
||||||
|
* `hadoop fs -getmerge -nl -skip-empty-file /src/file1.txt /src/file2.txt /output.txt`
|
||||||
|
|
||||||
Exit Code:
|
Exit Code:
|
||||||
|
|
||||||
|
@ -852,7 +855,7 @@ Return the help for an individual command.
|
||||||
====================================================
|
====================================================
|
||||||
|
|
||||||
The Hadoop FileSystem shell works with Object Stores such as Amazon S3,
|
The Hadoop FileSystem shell works with Object Stores such as Amazon S3,
|
||||||
Azure WASB and OpenStack Swift.
|
Azure ABFS and Google GCS.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -972,7 +975,7 @@ this will be in the bucket; the `rm` operation will then take time proportional
|
||||||
to the size of the data. Furthermore, the deleted files will continue to incur
|
to the size of the data. Furthermore, the deleted files will continue to incur
|
||||||
storage costs.
|
storage costs.
|
||||||
|
|
||||||
To avoid this, use the the `-skipTrash` option.
|
To avoid this, use the `-skipTrash` option.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
hadoop fs -rm -skipTrash s3a://bucket/dataset
|
hadoop fs -rm -skipTrash s3a://bucket/dataset
|
||||||
|
|
|
@ -220,7 +220,7 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
||||||
| `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
|
| `WarmUpEDEKTimeNumOps` | Total number of warming up EDEK |
|
||||||
| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
|
| `WarmUpEDEKTimeAvgTime` | Average time of warming up EDEK in milliseconds |
|
||||||
| `WarmUpEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in warming up EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
| `WarmUpEDEKTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in warming up EDEK in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||||
| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of of NameNode resource check latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
| `ResourceCheckTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of NameNode resource check latency in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||||
| `EditLogTailTimeNumOps` | Total number of times the standby NameNode tailed the edit log |
|
| `EditLogTailTimeNumOps` | Total number of times the standby NameNode tailed the edit log |
|
||||||
| `EditLogTailTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in tailing edit log |
|
| `EditLogTailTimeAvgTime` | Average time (in milliseconds) spent by standby NameNode in tailing edit log |
|
||||||
| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
| `EditLogTailTime`*num*`s(50/75/90/95/99)thPercentileLatency` | The 50/75/90/95/99th percentile of time spent in tailing edit logs by standby NameNode in milliseconds (*num* seconds granularity). Percentile measurement is off by default, by watching no intervals. The intervals are specified by `dfs.metrics.percentiles.intervals`. |
|
||||||
|
|
|
@ -595,7 +595,7 @@ hadoop kdiag \
|
||||||
--keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM
|
--keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM
|
||||||
```
|
```
|
||||||
|
|
||||||
This attempts to to perform all diagnostics without failing early, load in
|
This attempts to perform all diagnostics without failing early, load in
|
||||||
the HDFS and YARN XML resources, require a minimum key length of 1024 bytes,
|
the HDFS and YARN XML resources, require a minimum key length of 1024 bytes,
|
||||||
and log in as the principal `zookeeper/devix.example.org@REALM`, whose key must be in
|
and log in as the principal `zookeeper/devix.example.org@REALM`, whose key must be in
|
||||||
the keytab `zk.service.keytab`
|
the keytab `zk.service.keytab`
|
||||||
|
|
|
@ -26,6 +26,15 @@ Purpose
|
||||||
|
|
||||||
This document describes how to set up and configure a single-node Hadoop installation so that you can quickly perform simple operations using Hadoop MapReduce and the Hadoop Distributed File System (HDFS).
|
This document describes how to set up and configure a single-node Hadoop installation so that you can quickly perform simple operations using Hadoop MapReduce and the Hadoop Distributed File System (HDFS).
|
||||||
|
|
||||||
|
|
||||||
|
*Important*: all production Hadoop clusters use Kerberos to authenticate callers
|
||||||
|
and secure access to HDFS data as well as restriction access to computation
|
||||||
|
services (YARN etc.).
|
||||||
|
|
||||||
|
These instructions do not cover integration with any Kerberos services,
|
||||||
|
-everyone bringing up a production cluster should include connecting to their
|
||||||
|
organisation's Kerberos infrastructure as a key part of the deployment.
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
-------------
|
-------------
|
||||||
|
|
||||||
|
@ -33,8 +42,6 @@ $H3 Supported Platforms
|
||||||
|
|
||||||
* GNU/Linux is supported as a development and production platform. Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
|
* GNU/Linux is supported as a development and production platform. Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
|
||||||
|
|
||||||
* Windows is also a supported platform but the followings steps are for Linux only. To set up Hadoop on Windows, see [wiki page](http://wiki.apache.org/hadoop/Hadoop2OnWindows).
|
|
||||||
|
|
||||||
$H3 Required Software
|
$H3 Required Software
|
||||||
|
|
||||||
Required software for Linux include:
|
Required software for Linux include:
|
||||||
|
|
|
@ -501,7 +501,7 @@ Where
|
||||||
def blocks(FS, p, s, s + l) = a list of the blocks containing data(FS, path)[s:s+l]
|
def blocks(FS, p, s, s + l) = a list of the blocks containing data(FS, path)[s:s+l]
|
||||||
|
|
||||||
|
|
||||||
Note that that as `length(FS, f) ` is defined as `0` if `isDir(FS, f)`, the result
|
Note that as `length(FS, f) ` is defined as `0` if `isDir(FS, f)`, the result
|
||||||
of `getFileBlockLocations()` on a directory is `[]`
|
of `getFileBlockLocations()` on a directory is `[]`
|
||||||
|
|
||||||
|
|
||||||
|
@ -701,13 +701,13 @@ The behavior of the returned stream is covered in [Output](outputstream.html).
|
||||||
clients creating files with `overwrite==true` to fail if the file is created
|
clients creating files with `overwrite==true` to fail if the file is created
|
||||||
by another client between the two tests.
|
by another client between the two tests.
|
||||||
|
|
||||||
* S3A, Swift and potentially other Object Stores do not currently change the `FS` state
|
* The S3A and potentially other Object Stores connectors not currently change the `FS` state
|
||||||
until the output stream `close()` operation is completed.
|
until the output stream `close()` operation is completed.
|
||||||
This is a significant difference between the behavior of object stores
|
This is a significant difference between the behavior of object stores
|
||||||
and that of filesystems, as it allows >1 client to create a file with `overwrite=false`,
|
and that of filesystems, as it allows >1 client to create a file with `overwrite=false`,
|
||||||
and potentially confuse file/directory logic. In particular, using `create()` to acquire
|
and potentially confuse file/directory logic. In particular, using `create()` to acquire
|
||||||
an exclusive lock on a file (whoever creates the file without an error is considered
|
an exclusive lock on a file (whoever creates the file without an error is considered
|
||||||
the holder of the lock) may not not a safe algorithm to use when working with object stores.
|
the holder of the lock) may not be a safe algorithm to use when working with object stores.
|
||||||
|
|
||||||
* Object stores may create an empty file as a marker when a file is created.
|
* Object stores may create an empty file as a marker when a file is created.
|
||||||
However, object stores with `overwrite=true` semantics may not implement this atomically,
|
However, object stores with `overwrite=true` semantics may not implement this atomically,
|
||||||
|
@ -1225,7 +1225,7 @@ the parent directories of the destination then exist:
|
||||||
There is a check for and rejection if the `parent(dest)` is a file, but
|
There is a check for and rejection if the `parent(dest)` is a file, but
|
||||||
no checks for any other ancestors.
|
no checks for any other ancestors.
|
||||||
|
|
||||||
*Other Filesystems (including Swift) *
|
*Other Filesystems*
|
||||||
|
|
||||||
Other filesystems strictly reject the operation, raising a `FileNotFoundException`
|
Other filesystems strictly reject the operation, raising a `FileNotFoundException`
|
||||||
|
|
||||||
|
|
|
@ -167,7 +167,7 @@ rather than just any FS-specific subclass implemented by the implementation
|
||||||
custom subclasses.
|
custom subclasses.
|
||||||
|
|
||||||
This is critical to ensure safe use of the feature: directory listing/
|
This is critical to ensure safe use of the feature: directory listing/
|
||||||
status serialization/deserialization can result result in the `withFileStatus()`
|
status serialization/deserialization can result in the `withFileStatus()`
|
||||||
argument not being the custom subclass returned by the Filesystem instance's
|
argument not being the custom subclass returned by the Filesystem instance's
|
||||||
own `getFileStatus()`, `listFiles()`, `listLocatedStatus()` calls, etc.
|
own `getFileStatus()`, `listFiles()`, `listLocatedStatus()` calls, etc.
|
||||||
|
|
||||||
|
@ -686,4 +686,4 @@ public T load(FileSystem fs,
|
||||||
*Note:* : in Hadoop 3.3.2 and earlier, the `withFileStatus(status)` call
|
*Note:* : in Hadoop 3.3.2 and earlier, the `withFileStatus(status)` call
|
||||||
required a non-null parameter; this has since been relaxed.
|
required a non-null parameter; this has since been relaxed.
|
||||||
For maximum compatibility across versions, only invoke the method
|
For maximum compatibility across versions, only invoke the method
|
||||||
when the file status is known to be non-null.
|
when the file status is known to be non-null.
|
||||||
|
|
|
@ -228,7 +228,7 @@ Accordingly: *Use if and only if you are confident that the conditions are met.*
|
||||||
|
|
||||||
### `fs.s3a.create.header` User-supplied header support
|
### `fs.s3a.create.header` User-supplied header support
|
||||||
|
|
||||||
Options with the prefix `fs.s3a.create.header.` will be added to to the
|
Options with the prefix `fs.s3a.create.header.` will be added to the
|
||||||
S3 object metadata as "user defined metadata".
|
S3 object metadata as "user defined metadata".
|
||||||
This metadata is visible to all applications. It can also be retrieved through the
|
This metadata is visible to all applications. It can also be retrieved through the
|
||||||
FileSystem/FileContext `listXAttrs()` and `getXAttrs()` API calls with the prefix `header.`
|
FileSystem/FileContext `listXAttrs()` and `getXAttrs()` API calls with the prefix `header.`
|
||||||
|
@ -236,4 +236,4 @@ FileSystem/FileContext `listXAttrs()` and `getXAttrs()` API calls with the prefi
|
||||||
When an object is renamed, the metadata is propagated the copy created.
|
When an object is renamed, the metadata is propagated the copy created.
|
||||||
|
|
||||||
It is possible to probe an S3A Filesystem instance for this capability through
|
It is possible to probe an S3A Filesystem instance for this capability through
|
||||||
the `hasPathCapability(path, "fs.s3a.create.header")` check.
|
the `hasPathCapability(path, "fs.s3a.create.header")` check.
|
||||||
|
|
|
@ -30,8 +30,8 @@ are places where HDFS diverges from the expected behaviour of a POSIX
|
||||||
filesystem.
|
filesystem.
|
||||||
|
|
||||||
The bundled S3A FileSystem clients make Amazon's S3 Object Store ("blobstore")
|
The bundled S3A FileSystem clients make Amazon's S3 Object Store ("blobstore")
|
||||||
accessible through the FileSystem API. The Swift FileSystem driver provides similar
|
accessible through the FileSystem API.
|
||||||
functionality for the OpenStack Swift blobstore. The Azure WASB and ADL object
|
The Azure ABFS, WASB and ADL object
|
||||||
storage FileSystems talks to Microsoft's Azure storage. All of these
|
storage FileSystems talks to Microsoft's Azure storage. All of these
|
||||||
bind to object stores, which do have different behaviors, especially regarding
|
bind to object stores, which do have different behaviors, especially regarding
|
||||||
consistency guarantees, and atomicity of operations.
|
consistency guarantees, and atomicity of operations.
|
||||||
|
@ -314,10 +314,10 @@ child entries
|
||||||
|
|
||||||
This specification refers to *Object Stores* in places, often using the
|
This specification refers to *Object Stores* in places, often using the
|
||||||
term *Blobstore*. Hadoop does provide FileSystem client classes for some of these
|
term *Blobstore*. Hadoop does provide FileSystem client classes for some of these
|
||||||
even though they violate many of the requirements. This is why, although
|
even though they violate many of the requirements.
|
||||||
Hadoop can read and write data in an object store, the two which Hadoop ships
|
|
||||||
with direct support for — Amazon S3 and OpenStack Swift — cannot
|
Consult the documentation for a specific store to determine its compatibility
|
||||||
be used as direct replacements for HDFS.
|
with specific applications and services.
|
||||||
|
|
||||||
*What is an Object Store?*
|
*What is an Object Store?*
|
||||||
|
|
||||||
|
|
|
@ -980,7 +980,7 @@ throw `UnsupportedOperationException`.
|
||||||
### `StreamCapabilities`
|
### `StreamCapabilities`
|
||||||
|
|
||||||
Implementors of filesystem clients SHOULD implement the `StreamCapabilities`
|
Implementors of filesystem clients SHOULD implement the `StreamCapabilities`
|
||||||
interface and its `hasCapabilities()` method to to declare whether or not
|
interface and its `hasCapabilities()` method to declare whether or not
|
||||||
an output streams offer the visibility and durability guarantees of `Syncable`.
|
an output streams offer the visibility and durability guarantees of `Syncable`.
|
||||||
|
|
||||||
Implementors of `StreamCapabilities.hasCapabilities()` MUST NOT declare that
|
Implementors of `StreamCapabilities.hasCapabilities()` MUST NOT declare that
|
||||||
|
@ -1013,4 +1013,4 @@ all data to the datanodes.
|
||||||
|
|
||||||
1. `close()` SHALL return once the guarantees of `hflush()` are met: the data is
|
1. `close()` SHALL return once the guarantees of `hflush()` are met: the data is
|
||||||
visible to others.
|
visible to others.
|
||||||
1. For durability guarantees, `hsync()` MUST be called first.
|
1. For durability guarantees, `hsync()` MUST be called first.
|
||||||
|
|
|
@ -66,55 +66,6 @@ Example:
|
||||||
</property>
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
||||||
|
|
||||||
### swift://
|
|
||||||
|
|
||||||
The OpenStack Swift login details must be defined in the file
|
|
||||||
`/hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml`.
|
|
||||||
The standard hadoop-common `contract-test-options.xml` resource file cannot be
|
|
||||||
used, as that file does not get included in `hadoop-common-test.jar`.
|
|
||||||
|
|
||||||
|
|
||||||
In `/hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml`
|
|
||||||
the Swift bucket name must be defined in the property `fs.contract.test.fs.swift`,
|
|
||||||
along with the login details for the specific Swift service provider in which the
|
|
||||||
bucket is posted.
|
|
||||||
|
|
||||||
<configuration>
|
|
||||||
<property>
|
|
||||||
<name>fs.contract.test.fs.swift</name>
|
|
||||||
<value>swift://swiftbucket.rackspace/</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.swift.service.rackspace.auth.url</name>
|
|
||||||
<value>https://auth.api.rackspacecloud.com/v2.0/tokens</value>
|
|
||||||
<description>Rackspace US (multiregion)</description>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.swift.service.rackspace.username</name>
|
|
||||||
<value>this-is-your-username</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.swift.service.rackspace.region</name>
|
|
||||||
<value>DFW</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
<property>
|
|
||||||
<name>fs.swift.service.rackspace.apikey</name>
|
|
||||||
<value>ab0bceyoursecretapikeyffef</value>
|
|
||||||
</property>
|
|
||||||
|
|
||||||
</configuration>
|
|
||||||
|
|
||||||
1. Often the different public cloud Swift infrastructures exhibit different behaviors
|
|
||||||
(authentication and throttling in particular). We recommand that testers create
|
|
||||||
accounts on as many of these providers as possible and test against each of them.
|
|
||||||
1. They can be slow, especially remotely. Remote links are also the most likely
|
|
||||||
to make eventual-consistency behaviors visible, which is a mixed benefit.
|
|
||||||
|
|
||||||
## Testing a new filesystem
|
## Testing a new filesystem
|
||||||
|
|
||||||
The core of adding a new FileSystem to the contract tests is adding a
|
The core of adding a new FileSystem to the contract tests is adding a
|
||||||
|
@ -228,8 +179,6 @@ Passing all the FileSystem contract tests does not mean that a filesystem can be
|
||||||
* Scalability: does it support files as large as HDFS, or as many in a single directory?
|
* Scalability: does it support files as large as HDFS, or as many in a single directory?
|
||||||
* Durability: do files actually last -and how long for?
|
* Durability: do files actually last -and how long for?
|
||||||
|
|
||||||
Proof that this is is true is the fact that the Amazon S3 and OpenStack Swift object stores are eventually consistent object stores with non-atomic rename and delete operations. Single threaded test cases are unlikely to see some of the concurrency issues, while consistency is very often only visible in tests that span a datacenter.
|
|
||||||
|
|
||||||
There are also some specific aspects of the use of the FileSystem API:
|
There are also some specific aspects of the use of the FileSystem API:
|
||||||
|
|
||||||
* Compatibility with the `hadoop -fs` CLI.
|
* Compatibility with the `hadoop -fs` CLI.
|
||||||
|
|
|
@ -143,7 +143,7 @@ too must have this context defined.
|
||||||
|
|
||||||
### Identifying the system accounts `hadoop.registry.system.acls`
|
### Identifying the system accounts `hadoop.registry.system.acls`
|
||||||
|
|
||||||
These are the the accounts which are given full access to the base of the
|
These are the accounts which are given full access to the base of the
|
||||||
registry. The Resource Manager needs this option to create the root paths.
|
registry. The Resource Manager needs this option to create the root paths.
|
||||||
|
|
||||||
Client applications writing to the registry access to the nodes it creates.
|
Client applications writing to the registry access to the nodes it creates.
|
||||||
|
|
|
@ -29,7 +29,7 @@ a secure registry:
|
||||||
1. Allow the RM to create per-user regions of the registration space
|
1. Allow the RM to create per-user regions of the registration space
|
||||||
1. Allow applications belonging to a user to write registry entries
|
1. Allow applications belonging to a user to write registry entries
|
||||||
into their part of the space. These may be short-lived or long-lived
|
into their part of the space. These may be short-lived or long-lived
|
||||||
YARN applications, or they may be be static applications.
|
YARN applications, or they may be static applications.
|
||||||
1. Prevent other users from writing into another user's part of the registry.
|
1. Prevent other users from writing into another user's part of the registry.
|
||||||
1. Allow system services to register to a `/services` section of the registry.
|
1. Allow system services to register to a `/services` section of the registry.
|
||||||
1. Provide read access to clients of a registry.
|
1. Provide read access to clients of a registry.
|
||||||
|
|
|
@ -24,6 +24,8 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.hadoop.util.XMLUtils;
|
||||||
|
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
@ -34,7 +36,6 @@ import org.xml.sax.SAXException;
|
||||||
import org.xml.sax.helpers.DefaultHandler;
|
import org.xml.sax.helpers.DefaultHandler;
|
||||||
|
|
||||||
import javax.xml.parsers.SAXParser;
|
import javax.xml.parsers.SAXParser;
|
||||||
import javax.xml.parsers.SAXParserFactory;
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
|
||||||
|
@ -76,7 +77,7 @@ public class CLITestHelper {
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
testConfigFile = TEST_CACHE_DATA_DIR + File.separator + testConfigFile;
|
testConfigFile = TEST_CACHE_DATA_DIR + File.separator + testConfigFile;
|
||||||
try {
|
try {
|
||||||
SAXParser p = (SAXParserFactory.newInstance()).newSAXParser();
|
SAXParser p = XMLUtils.newSecureSAXParserFactory().newSAXParser();
|
||||||
p.parse(testConfigFile, getConfigParser());
|
p.parse(testConfigFile, getConfigParser());
|
||||||
success = true;
|
success = true;
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
|
|
|
@ -135,7 +135,6 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
||||||
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl");
|
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.s3a.impl");
|
||||||
xmlPropsToSkipCompare.
|
xmlPropsToSkipCompare.
|
||||||
add("fs.viewfs.overload.scheme.target.swebhdfs.impl");
|
add("fs.viewfs.overload.scheme.target.swebhdfs.impl");
|
||||||
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.swift.impl");
|
|
||||||
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.webhdfs.impl");
|
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.webhdfs.impl");
|
||||||
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.wasb.impl");
|
xmlPropsToSkipCompare.add("fs.viewfs.overload.scheme.target.wasb.impl");
|
||||||
|
|
||||||
|
@ -223,8 +222,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase {
|
||||||
xmlPropsToSkipCompare.add("hadoop.common.configuration.version");
|
xmlPropsToSkipCompare.add("hadoop.common.configuration.version");
|
||||||
// - org.apache.hadoop.fs.FileSystem
|
// - org.apache.hadoop.fs.FileSystem
|
||||||
xmlPropsToSkipCompare.add("fs.har.impl.disable.cache");
|
xmlPropsToSkipCompare.add("fs.har.impl.disable.cache");
|
||||||
// - org.apache.hadoop.fs.FileSystem#getFileSystemClass()
|
|
||||||
xmlPropsToSkipCompare.add("fs.swift.impl");
|
|
||||||
// - package org.apache.hadoop.tracing.TraceUtils ?
|
// - package org.apache.hadoop.tracing.TraceUtils ?
|
||||||
xmlPropsToSkipCompare.add("hadoop.htrace.span.receiver.classes");
|
xmlPropsToSkipCompare.add("hadoop.htrace.span.receiver.classes");
|
||||||
// Private keys
|
// Private keys
|
||||||
|
|
|
@ -41,9 +41,12 @@ import org.xml.sax.InputSource;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||||
|
|
||||||
import org.apache.hadoop.http.HttpServer2;
|
import org.apache.hadoop.http.HttpServer2;
|
||||||
|
import org.apache.hadoop.util.XMLUtils;
|
||||||
|
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
@ -223,8 +226,7 @@ public class TestConfServlet {
|
||||||
ConfServlet.writeResponse(getTestConf(), sw, "xml");
|
ConfServlet.writeResponse(getTestConf(), sw, "xml");
|
||||||
String xml = sw.toString();
|
String xml = sw.toString();
|
||||||
|
|
||||||
DocumentBuilderFactory docBuilderFactory
|
DocumentBuilderFactory docBuilderFactory = XMLUtils.newSecureDocumentBuilderFactory();
|
||||||
= DocumentBuilderFactory.newInstance();
|
|
||||||
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
|
DocumentBuilder builder = docBuilderFactory.newDocumentBuilder();
|
||||||
Document doc = builder.parse(new InputSource(new StringReader(xml)));
|
Document doc = builder.parse(new InputSource(new StringReader(xml)));
|
||||||
NodeList nameNodes = doc.getElementsByTagName("name");
|
NodeList nameNodes = doc.getElementsByTagName("name");
|
||||||
|
|
|
@ -194,7 +194,7 @@ public abstract class TestConfigurationFieldsBase {
|
||||||
HashMap<String,String> retVal = new HashMap<>();
|
HashMap<String,String> retVal = new HashMap<>();
|
||||||
|
|
||||||
// Setup regexp for valid properties
|
// Setup regexp for valid properties
|
||||||
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z0-9_-]+)+$";
|
String propRegex = "^[A-Za-z][A-Za-z0-9_-]+(\\.[A-Za-z%s0-9_-]+)+$";
|
||||||
Pattern p = Pattern.compile(propRegex);
|
Pattern p = Pattern.compile(propRegex);
|
||||||
|
|
||||||
// Iterate through class member variables
|
// Iterate through class member variables
|
||||||
|
|
|
@ -36,6 +36,7 @@ import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNull;
|
import static org.junit.Assert.assertNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
@ -62,6 +63,8 @@ public class TestKeyProvider {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertTrue(true);
|
assertTrue(true);
|
||||||
}
|
}
|
||||||
|
intercept(NullPointerException.class, () ->
|
||||||
|
KeyProvider.getBaseName(null));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -1321,16 +1321,16 @@ public class TestFileUtil {
|
||||||
if (wildcardPath.equals(classPath)) {
|
if (wildcardPath.equals(classPath)) {
|
||||||
// add wildcard matches
|
// add wildcard matches
|
||||||
for (File wildcardMatch: wildcardMatches) {
|
for (File wildcardMatch: wildcardMatches) {
|
||||||
expectedClassPaths.add(wildcardMatch.toURI().toURL()
|
expectedClassPaths.add(wildcardMatch.getCanonicalFile().toURI().toURL()
|
||||||
.toExternalForm());
|
.toExternalForm());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
File fileCp = null;
|
File fileCp = null;
|
||||||
if(!new Path(classPath).isAbsolute()) {
|
if(!new Path(classPath).isAbsolute()) {
|
||||||
fileCp = new File(tmp, classPath);
|
fileCp = new File(tmp, classPath).getCanonicalFile();
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
fileCp = new File(classPath);
|
fileCp = new File(classPath).getCanonicalFile();
|
||||||
}
|
}
|
||||||
if (nonExistentSubdir.equals(classPath)) {
|
if (nonExistentSubdir.equals(classPath)) {
|
||||||
// expect to maintain trailing path separator if present in input, even
|
// expect to maintain trailing path separator if present in input, even
|
||||||
|
@ -1385,7 +1385,8 @@ public class TestFileUtil {
|
||||||
for (Path jar: jars) {
|
for (Path jar: jars) {
|
||||||
URL url = jar.toUri().toURL();
|
URL url = jar.toUri().toURL();
|
||||||
assertTrue("the jar should match either of the jars",
|
assertTrue("the jar should match either of the jars",
|
||||||
url.equals(jar1.toURI().toURL()) || url.equals(jar2.toURI().toURL()));
|
url.equals(jar1.getCanonicalFile().toURI().toURL()) ||
|
||||||
|
url.equals(jar2.getCanonicalFile().toURI().toURL()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -143,6 +143,11 @@ public class TestFilterFileSystem {
|
||||||
of the filter such as checksums.
|
of the filter such as checksums.
|
||||||
*/
|
*/
|
||||||
MultipartUploaderBuilder createMultipartUploader(Path basePath);
|
MultipartUploaderBuilder createMultipartUploader(Path basePath);
|
||||||
|
|
||||||
|
FSDataOutputStream append(Path f, boolean appendToNewBlock) throws IOException;
|
||||||
|
|
||||||
|
FSDataOutputStream append(Path f, int bufferSize,
|
||||||
|
Progressable progress, boolean appendToNewBlock) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -250,6 +250,11 @@ public class TestHarFileSystem {
|
||||||
|
|
||||||
MultipartUploaderBuilder createMultipartUploader(Path basePath)
|
MultipartUploaderBuilder createMultipartUploader(Path basePath)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
|
||||||
|
FSDataOutputStream append(Path f, boolean appendToNewBlock) throws IOException;
|
||||||
|
|
||||||
|
FSDataOutputStream append(Path f, int bufferSize,
|
||||||
|
Progressable progress, boolean appendToNewBlock) throws IOException;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -61,6 +61,9 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
.describedAs("Slicing on the same offset shouldn't " +
|
.describedAs("Slicing on the same offset shouldn't " +
|
||||||
"create a new buffer")
|
"create a new buffer")
|
||||||
.isEqualTo(slice);
|
.isEqualTo(slice);
|
||||||
|
Assertions.assertThat(slice.position())
|
||||||
|
.describedAs("Slicing should return buffers starting from position 0")
|
||||||
|
.isEqualTo(0);
|
||||||
|
|
||||||
// try slicing a range
|
// try slicing a range
|
||||||
final int offset = 100;
|
final int offset = 100;
|
||||||
|
@ -77,6 +80,9 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
.describedAs("Slicing should use the same underlying " +
|
.describedAs("Slicing should use the same underlying " +
|
||||||
"data")
|
"data")
|
||||||
.isEqualTo(slice.array());
|
.isEqualTo(slice.array());
|
||||||
|
Assertions.assertThat(slice.position())
|
||||||
|
.describedAs("Slicing should return buffers starting from position 0")
|
||||||
|
.isEqualTo(0);
|
||||||
// test the contents of the slice
|
// test the contents of the slice
|
||||||
intBuffer = slice.asIntBuffer();
|
intBuffer = slice.asIntBuffer();
|
||||||
for(int i=0; i < sliceLength / Integer.BYTES; ++i) {
|
for(int i=0; i < sliceLength / Integer.BYTES; ++i) {
|
||||||
|
@ -96,7 +102,10 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMerge() {
|
public void testMerge() {
|
||||||
FileRange base = FileRange.createFileRange(2000, 1000);
|
// a reference to use for tracking
|
||||||
|
Object tracker1 = "one";
|
||||||
|
Object tracker2 = "two";
|
||||||
|
FileRange base = FileRange.createFileRange(2000, 1000, tracker1);
|
||||||
CombinedFileRange mergeBase = new CombinedFileRange(2000, 3000, base);
|
CombinedFileRange mergeBase = new CombinedFileRange(2000, 3000, base);
|
||||||
|
|
||||||
// test when the gap between is too big
|
// test when the gap between is too big
|
||||||
|
@ -104,44 +113,48 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
FileRange.createFileRange(5000, 1000), 2000, 4000));
|
FileRange.createFileRange(5000, 1000), 2000, 4000));
|
||||||
assertEquals("Number of ranges in merged range shouldn't increase",
|
assertEquals("Number of ranges in merged range shouldn't increase",
|
||||||
1, mergeBase.getUnderlying().size());
|
1, mergeBase.getUnderlying().size());
|
||||||
assertEquals("post merge offset", 2000, mergeBase.getOffset());
|
assertFileRange(mergeBase, 2000, 1000);
|
||||||
assertEquals("post merge length", 1000, mergeBase.getLength());
|
|
||||||
|
|
||||||
// test when the total size gets exceeded
|
// test when the total size gets exceeded
|
||||||
assertFalse("Large size ranges shouldn't get merged", mergeBase.merge(5000, 6000,
|
assertFalse("Large size ranges shouldn't get merged", mergeBase.merge(5000, 6000,
|
||||||
FileRange.createFileRange(5000, 1000), 2001, 3999));
|
FileRange.createFileRange(5000, 1000), 2001, 3999));
|
||||||
assertEquals("Number of ranges in merged range shouldn't increase",
|
assertEquals("Number of ranges in merged range shouldn't increase",
|
||||||
1, mergeBase.getUnderlying().size());
|
1, mergeBase.getUnderlying().size());
|
||||||
assertEquals("post merge offset", 2000, mergeBase.getOffset());
|
assertFileRange(mergeBase, 2000, 1000);
|
||||||
assertEquals("post merge length", 1000, mergeBase.getLength());
|
|
||||||
|
|
||||||
// test when the merge works
|
// test when the merge works
|
||||||
assertTrue("ranges should get merged ", mergeBase.merge(5000, 6000,
|
assertTrue("ranges should get merged ", mergeBase.merge(5000, 6000,
|
||||||
FileRange.createFileRange(5000, 1000), 2001, 4000));
|
FileRange.createFileRange(5000, 1000, tracker2),
|
||||||
|
2001, 4000));
|
||||||
assertEquals("post merge size", 2, mergeBase.getUnderlying().size());
|
assertEquals("post merge size", 2, mergeBase.getUnderlying().size());
|
||||||
assertEquals("post merge offset", 2000, mergeBase.getOffset());
|
assertFileRange(mergeBase, 2000, 4000);
|
||||||
assertEquals("post merge length", 4000, mergeBase.getLength());
|
|
||||||
|
Assertions.assertThat(mergeBase.getUnderlying().get(0).getReference())
|
||||||
|
.describedAs("reference of range %s", mergeBase.getUnderlying().get(0))
|
||||||
|
.isSameAs(tracker1);
|
||||||
|
Assertions.assertThat(mergeBase.getUnderlying().get(1).getReference())
|
||||||
|
.describedAs("reference of range %s", mergeBase.getUnderlying().get(1))
|
||||||
|
.isSameAs(tracker2);
|
||||||
|
|
||||||
// reset the mergeBase and test with a 10:1 reduction
|
// reset the mergeBase and test with a 10:1 reduction
|
||||||
mergeBase = new CombinedFileRange(200, 300, base);
|
mergeBase = new CombinedFileRange(200, 300, base);
|
||||||
assertEquals(200, mergeBase.getOffset());
|
assertFileRange(mergeBase, 200, 100);
|
||||||
assertEquals(100, mergeBase.getLength());
|
|
||||||
assertTrue("ranges should get merged ", mergeBase.merge(500, 600,
|
assertTrue("ranges should get merged ", mergeBase.merge(500, 600,
|
||||||
FileRange.createFileRange(5000, 1000), 201, 400));
|
FileRange.createFileRange(5000, 1000), 201, 400));
|
||||||
assertEquals("post merge size", 2, mergeBase.getUnderlying().size());
|
assertEquals("post merge size", 2, mergeBase.getUnderlying().size());
|
||||||
assertEquals("post merge offset", 200, mergeBase.getOffset());
|
assertFileRange(mergeBase, 200, 400);
|
||||||
assertEquals("post merge length", 400, mergeBase.getLength());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSortAndMerge() {
|
public void testSortAndMerge() {
|
||||||
List<FileRange> input = Arrays.asList(
|
List<FileRange> input = Arrays.asList(
|
||||||
FileRange.createFileRange(3000, 100),
|
FileRange.createFileRange(3000, 100, "1"),
|
||||||
FileRange.createFileRange(2100, 100),
|
FileRange.createFileRange(2100, 100, null),
|
||||||
FileRange.createFileRange(1000, 100)
|
FileRange.createFileRange(1000, 100, "3")
|
||||||
);
|
);
|
||||||
assertFalse("Ranges are non disjoint", VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
|
assertFalse("Ranges are non disjoint", VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
|
||||||
List<CombinedFileRange> outputList = VectoredReadUtils.mergeSortedRanges(
|
final List<CombinedFileRange> outputList = VectoredReadUtils.mergeSortedRanges(
|
||||||
Arrays.asList(sortRanges(input)), 100, 1001, 2500);
|
Arrays.asList(sortRanges(input)), 100, 1001, 2500);
|
||||||
Assertions.assertThat(outputList)
|
Assertions.assertThat(outputList)
|
||||||
.describedAs("merged range size")
|
.describedAs("merged range size")
|
||||||
|
@ -150,51 +163,105 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
Assertions.assertThat(output.getUnderlying())
|
Assertions.assertThat(output.getUnderlying())
|
||||||
.describedAs("merged range underlying size")
|
.describedAs("merged range underlying size")
|
||||||
.hasSize(3);
|
.hasSize(3);
|
||||||
assertEquals("range[1000,3100)", output.toString());
|
// range[1000,3100)
|
||||||
|
assertFileRange(output, 1000, 2100);
|
||||||
assertTrue("merged output ranges are disjoint",
|
assertTrue("merged output ranges are disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(outputList, 100, 800));
|
VectoredReadUtils.isOrderedDisjoint(outputList, 100, 800));
|
||||||
|
|
||||||
// the minSeek doesn't allow the first two to merge
|
// the minSeek doesn't allow the first two to merge
|
||||||
assertFalse("Ranges are non disjoint",
|
assertFalse("Ranges are non disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(input, 100, 1000));
|
VectoredReadUtils.isOrderedDisjoint(input, 100, 1000));
|
||||||
outputList = VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(input)),
|
final List<CombinedFileRange> list2 = VectoredReadUtils.mergeSortedRanges(
|
||||||
|
Arrays.asList(sortRanges(input)),
|
||||||
100, 1000, 2100);
|
100, 1000, 2100);
|
||||||
Assertions.assertThat(outputList)
|
Assertions.assertThat(list2)
|
||||||
.describedAs("merged range size")
|
.describedAs("merged range size")
|
||||||
.hasSize(2);
|
.hasSize(2);
|
||||||
assertEquals("range[1000,1100)", outputList.get(0).toString());
|
assertFileRange(list2.get(0), 1000, 100);
|
||||||
assertEquals("range[2100,3100)", outputList.get(1).toString());
|
|
||||||
|
// range[2100,3100)
|
||||||
|
assertFileRange(list2.get(1), 2100, 1000);
|
||||||
|
|
||||||
assertTrue("merged output ranges are disjoint",
|
assertTrue("merged output ranges are disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(outputList, 100, 1000));
|
VectoredReadUtils.isOrderedDisjoint(list2, 100, 1000));
|
||||||
|
|
||||||
// the maxSize doesn't allow the third range to merge
|
// the maxSize doesn't allow the third range to merge
|
||||||
assertFalse("Ranges are non disjoint",
|
assertFalse("Ranges are non disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
|
VectoredReadUtils.isOrderedDisjoint(input, 100, 800));
|
||||||
outputList = VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(input)),
|
final List<CombinedFileRange> list3 = VectoredReadUtils.mergeSortedRanges(
|
||||||
|
Arrays.asList(sortRanges(input)),
|
||||||
100, 1001, 2099);
|
100, 1001, 2099);
|
||||||
Assertions.assertThat(outputList)
|
Assertions.assertThat(list3)
|
||||||
.describedAs("merged range size")
|
.describedAs("merged range size")
|
||||||
.hasSize(2);
|
.hasSize(2);
|
||||||
assertEquals("range[1000,2200)", outputList.get(0).toString());
|
// range[1000,2200)
|
||||||
assertEquals("range[3000,3100)", outputList.get(1).toString());
|
CombinedFileRange range0 = list3.get(0);
|
||||||
|
assertFileRange(range0, 1000, 1200);
|
||||||
|
assertFileRange(range0.getUnderlying().get(0),
|
||||||
|
1000, 100, "3");
|
||||||
|
assertFileRange(range0.getUnderlying().get(1),
|
||||||
|
2100, 100, null);
|
||||||
|
CombinedFileRange range1 = list3.get(1);
|
||||||
|
// range[3000,3100)
|
||||||
|
assertFileRange(range1, 3000, 100);
|
||||||
|
assertFileRange(range1.getUnderlying().get(0),
|
||||||
|
3000, 100, "1");
|
||||||
|
|
||||||
assertTrue("merged output ranges are disjoint",
|
assertTrue("merged output ranges are disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(outputList, 100, 800));
|
VectoredReadUtils.isOrderedDisjoint(list3, 100, 800));
|
||||||
|
|
||||||
// test the round up and round down (the maxSize doesn't allow any merges)
|
// test the round up and round down (the maxSize doesn't allow any merges)
|
||||||
assertFalse("Ranges are non disjoint",
|
assertFalse("Ranges are non disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(input, 16, 700));
|
VectoredReadUtils.isOrderedDisjoint(input, 16, 700));
|
||||||
outputList = VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(input)),
|
final List<CombinedFileRange> list4 = VectoredReadUtils.mergeSortedRanges(
|
||||||
|
Arrays.asList(sortRanges(input)),
|
||||||
16, 1001, 100);
|
16, 1001, 100);
|
||||||
Assertions.assertThat(outputList)
|
Assertions.assertThat(list4)
|
||||||
.describedAs("merged range size")
|
.describedAs("merged range size")
|
||||||
.hasSize(3);
|
.hasSize(3);
|
||||||
assertEquals("range[992,1104)", outputList.get(0).toString());
|
// range[992,1104)
|
||||||
assertEquals("range[2096,2208)", outputList.get(1).toString());
|
assertFileRange(list4.get(0), 992, 112);
|
||||||
assertEquals("range[2992,3104)", outputList.get(2).toString());
|
// range[2096,2208)
|
||||||
|
assertFileRange(list4.get(1), 2096, 112);
|
||||||
|
// range[2992,3104)
|
||||||
|
assertFileRange(list4.get(2), 2992, 112);
|
||||||
assertTrue("merged output ranges are disjoint",
|
assertTrue("merged output ranges are disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(outputList, 16, 700));
|
VectoredReadUtils.isOrderedDisjoint(list4, 16, 700));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file range satisfies the conditions.
|
||||||
|
* @param range range to validate
|
||||||
|
* @param offset offset of range
|
||||||
|
* @param length range length
|
||||||
|
*/
|
||||||
|
private void assertFileRange(FileRange range, long offset, int length) {
|
||||||
|
Assertions.assertThat(range)
|
||||||
|
.describedAs("file range %s", range)
|
||||||
|
.isNotNull();
|
||||||
|
Assertions.assertThat(range.getOffset())
|
||||||
|
.describedAs("offset of %s", range)
|
||||||
|
.isEqualTo(offset);
|
||||||
|
Assertions.assertThat(range.getLength())
|
||||||
|
.describedAs("length of %s", range)
|
||||||
|
.isEqualTo(length);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Assert that a file range satisfies the conditions.
|
||||||
|
* @param range range to validate
|
||||||
|
* @param offset offset of range
|
||||||
|
* @param length range length
|
||||||
|
* @param reference reference; may be null.
|
||||||
|
*/
|
||||||
|
private void assertFileRange(FileRange range, long offset, int length, Object reference) {
|
||||||
|
assertFileRange(range, offset, length);
|
||||||
|
Assertions.assertThat(range.getReference())
|
||||||
|
.describedAs("reference field of file range %s", range)
|
||||||
|
.isEqualTo(reference);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSortAndMergeMoreCases() throws Exception {
|
public void testSortAndMergeMoreCases() throws Exception {
|
||||||
List<FileRange> input = Arrays.asList(
|
List<FileRange> input = Arrays.asList(
|
||||||
|
@ -214,7 +281,9 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
Assertions.assertThat(output.getUnderlying())
|
Assertions.assertThat(output.getUnderlying())
|
||||||
.describedAs("merged range underlying size")
|
.describedAs("merged range underlying size")
|
||||||
.hasSize(4);
|
.hasSize(4);
|
||||||
assertEquals("range[1000,3110)", output.toString());
|
|
||||||
|
assertFileRange(output, 1000, 2110);
|
||||||
|
|
||||||
assertTrue("merged output ranges are disjoint",
|
assertTrue("merged output ranges are disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(outputList, 1, 800));
|
VectoredReadUtils.isOrderedDisjoint(outputList, 1, 800));
|
||||||
|
|
||||||
|
@ -227,7 +296,8 @@ public class TestVectoredReadUtils extends HadoopTestBase {
|
||||||
Assertions.assertThat(output.getUnderlying())
|
Assertions.assertThat(output.getUnderlying())
|
||||||
.describedAs("merged range underlying size")
|
.describedAs("merged range underlying size")
|
||||||
.hasSize(4);
|
.hasSize(4);
|
||||||
assertEquals("range[1000,3200)", output.toString());
|
assertFileRange(output, 1000, 2200);
|
||||||
|
|
||||||
assertTrue("merged output ranges are disjoint",
|
assertTrue("merged output ranges are disjoint",
|
||||||
VectoredReadUtils.isOrderedDisjoint(outputList, 1, 800));
|
VectoredReadUtils.isOrderedDisjoint(outputList, 1, 800));
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.junit.Test;
|
||||||
import org.apache.hadoop.test.AbstractHadoopTestBase;
|
import org.apache.hadoop.test.AbstractHadoopTestBase;
|
||||||
|
|
||||||
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
@ -43,6 +44,7 @@ public class TestFilePosition extends AbstractHadoopTestBase {
|
||||||
new FilePosition(10, 5);
|
new FilePosition(10, 5);
|
||||||
new FilePosition(5, 10);
|
new FilePosition(5, 10);
|
||||||
new FilePosition(10, 5).setData(data, 3, 4);
|
new FilePosition(10, 5).setData(data, 3, 4);
|
||||||
|
new FilePosition(10, 10).setData(data, 3, 13);
|
||||||
|
|
||||||
// Verify it throws correctly.
|
// Verify it throws correctly.
|
||||||
|
|
||||||
|
@ -94,11 +96,11 @@ public class TestFilePosition extends AbstractHadoopTestBase {
|
||||||
"'readOffset' must not be negative", () -> pos.setData(data, 4, -4));
|
"'readOffset' must not be negative", () -> pos.setData(data, 4, -4));
|
||||||
|
|
||||||
intercept(IllegalArgumentException.class,
|
intercept(IllegalArgumentException.class,
|
||||||
"'readOffset' (15) must be within the range [4, 13]",
|
"'readOffset' (15) must be within the range [4, 14]",
|
||||||
() -> pos.setData(data, 4, 15));
|
() -> pos.setData(data, 4, 15));
|
||||||
|
|
||||||
intercept(IllegalArgumentException.class,
|
intercept(IllegalArgumentException.class,
|
||||||
"'readOffset' (3) must be within the range [4, 13]",
|
"'readOffset' (3) must be within the range [4, 14]",
|
||||||
() -> pos.setData(data, 4, 3));
|
() -> pos.setData(data, 4, 3));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -192,4 +194,31 @@ public class TestFilePosition extends AbstractHadoopTestBase {
|
||||||
}
|
}
|
||||||
assertTrue(pos.bufferFullyRead());
|
assertTrue(pos.bufferFullyRead());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBounds() {
|
||||||
|
int bufferSize = 8;
|
||||||
|
long fileSize = bufferSize;
|
||||||
|
|
||||||
|
ByteBuffer buffer = ByteBuffer.allocate(bufferSize);
|
||||||
|
BufferData data = new BufferData(0, buffer);
|
||||||
|
FilePosition pos = new FilePosition(fileSize, bufferSize);
|
||||||
|
|
||||||
|
long eofOffset = fileSize;
|
||||||
|
pos.setData(data, 0, eofOffset);
|
||||||
|
|
||||||
|
assertThat(pos.isWithinCurrentBuffer(eofOffset))
|
||||||
|
.describedAs("EOF offset %d should be within the current buffer", eofOffset)
|
||||||
|
.isTrue();
|
||||||
|
assertThat(pos.absolute())
|
||||||
|
.describedAs("absolute() should return the EOF offset")
|
||||||
|
.isEqualTo(eofOffset);
|
||||||
|
|
||||||
|
assertThat(pos.setAbsolute(eofOffset))
|
||||||
|
.describedAs("setAbsolute() should return true on the EOF offset %d", eofOffset)
|
||||||
|
.isTrue();
|
||||||
|
assertThat(pos.absolute())
|
||||||
|
.describedAs("absolute() should return the EOF offset")
|
||||||
|
.isEqualTo(eofOffset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,8 @@ import java.nio.file.Files;
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
import org.apache.commons.io.IOUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.io.SequenceFile;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -46,22 +48,19 @@ public class TestTextCommand {
|
||||||
private static final String TEXT_FILENAME =
|
private static final String TEXT_FILENAME =
|
||||||
new File(TEST_ROOT_DIR, "testtextfile.txt").toURI().getPath();
|
new File(TEST_ROOT_DIR, "testtextfile.txt").toURI().getPath();
|
||||||
|
|
||||||
|
private static final String SEPARATOR = System.getProperty("line.separator");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Tests whether binary Avro data files are displayed correctly.
|
* Tests whether binary Avro data files are displayed correctly.
|
||||||
*/
|
*/
|
||||||
@Test (timeout = 30000)
|
@Test (timeout = 30000)
|
||||||
public void testDisplayForAvroFiles() throws Exception {
|
public void testDisplayForAvroFiles() throws Exception {
|
||||||
String expectedOutput =
|
String expectedOutput =
|
||||||
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
|
"{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" + SEPARATOR
|
||||||
System.getProperty("line.separator") +
|
+ "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" + SEPARATOR
|
||||||
"{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" +
|
+ "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" + SEPARATOR
|
||||||
System.getProperty("line.separator") +
|
+ "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" + SEPARATOR
|
||||||
"{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" +
|
+ "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" + SEPARATOR;
|
||||||
System.getProperty("line.separator") +
|
|
||||||
"{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" +
|
|
||||||
System.getProperty("line.separator") +
|
|
||||||
"{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
|
|
||||||
System.getProperty("line.separator");
|
|
||||||
|
|
||||||
String output = readUsingTextCommand(AVRO_FILENAME,
|
String output = readUsingTextCommand(AVRO_FILENAME,
|
||||||
generateWeatherAvroBinaryData());
|
generateWeatherAvroBinaryData());
|
||||||
|
@ -104,11 +103,16 @@ public class TestTextCommand {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
createFile(fileName, fileContents);
|
createFile(fileName, fileContents);
|
||||||
|
|
||||||
// Prepare and call the Text command's protected getInputStream method
|
|
||||||
// using reflection.
|
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
URI localPath = new URI(fileName);
|
URI localPath = new URI(fileName);
|
||||||
PathData pathData = new PathData(localPath, conf);
|
return readUsingTextCommand(localPath, conf);
|
||||||
|
}
|
||||||
|
// Read a file using Display.Text class.
|
||||||
|
private String readUsingTextCommand(URI uri, Configuration conf)
|
||||||
|
throws Exception {
|
||||||
|
// Prepare and call the Text command's protected getInputStream method
|
||||||
|
// using reflection.
|
||||||
|
PathData pathData = new PathData(uri, conf);
|
||||||
Display.Text text = new Display.Text() {
|
Display.Text text = new Display.Text() {
|
||||||
@Override
|
@Override
|
||||||
public InputStream getInputStream(PathData item) throws IOException {
|
public InputStream getInputStream(PathData item) throws IOException {
|
||||||
|
@ -116,7 +120,7 @@ public class TestTextCommand {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
text.setConf(conf);
|
text.setConf(conf);
|
||||||
InputStream stream = (InputStream) text.getInputStream(pathData);
|
InputStream stream = text.getInputStream(pathData);
|
||||||
return inputStreamToString(stream);
|
return inputStreamToString(stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,5 +236,21 @@ public class TestTextCommand {
|
||||||
|
|
||||||
return contents;
|
return contents;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDisplayForNonWritableSequenceFile() throws Exception {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
|
||||||
|
Path path = new Path(String.valueOf(TEST_ROOT_DIR), "NonWritableSequenceFile");
|
||||||
|
SequenceFile.Writer writer = SequenceFile.createWriter(conf, SequenceFile.Writer.file(path),
|
||||||
|
SequenceFile.Writer.keyClass(String.class), SequenceFile.Writer.valueClass(String.class));
|
||||||
|
writer.append("Key1", "Value1");
|
||||||
|
writer.append("Key2", "Value2");
|
||||||
|
writer.close();
|
||||||
|
String expected = "Key1\tValue1" + SEPARATOR + "Key2\tValue2" + SEPARATOR;
|
||||||
|
URI uri = path.toUri();
|
||||||
|
System.out.println(expected);
|
||||||
|
assertEquals(expected, readUsingTextCommand(uri, conf));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
|
|
||||||
public class TestDefaultStringifier {
|
public class TestDefaultStringifier {
|
||||||
|
@ -98,7 +99,7 @@ public class TestDefaultStringifier {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testStoreLoadArray() throws IOException {
|
public void testStoreLoadArray() throws Exception {
|
||||||
LOG.info("Testing DefaultStringifier#storeArray() and #loadArray()");
|
LOG.info("Testing DefaultStringifier#storeArray() and #loadArray()");
|
||||||
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
|
conf.set("io.serializations", "org.apache.hadoop.io.serializer.JavaSerialization");
|
||||||
|
|
||||||
|
@ -107,6 +108,8 @@ public class TestDefaultStringifier {
|
||||||
Integer[] array = new Integer[] {1,2,3,4,5};
|
Integer[] array = new Integer[] {1,2,3,4,5};
|
||||||
|
|
||||||
|
|
||||||
|
intercept(IndexOutOfBoundsException.class, () ->
|
||||||
|
DefaultStringifier.storeArray(conf, new Integer[] {}, keyName));
|
||||||
DefaultStringifier.storeArray(conf, array, keyName);
|
DefaultStringifier.storeArray(conf, array, keyName);
|
||||||
|
|
||||||
Integer[] claimedArray = DefaultStringifier.<Integer>loadArray(conf, keyName, Integer.class);
|
Integer[] claimedArray = DefaultStringifier.<Integer>loadArray(conf, keyName, Integer.class);
|
||||||
|
|
|
@ -1216,11 +1216,6 @@ public class TestIPC {
|
||||||
@Test(timeout=30000)
|
@Test(timeout=30000)
|
||||||
public void testInterrupted() {
|
public void testInterrupted() {
|
||||||
Client client = new Client(LongWritable.class, conf);
|
Client client = new Client(LongWritable.class, conf);
|
||||||
Client.getClientExecutor().submit(new Runnable() {
|
|
||||||
public void run() {
|
|
||||||
while(true);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
client.stop();
|
client.stop();
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.test.MockitoUtil;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
import org.mockito.Mockito;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
import org.slf4j.event.Level;
|
import org.slf4j.event.Level;
|
||||||
|
@ -62,13 +63,16 @@ import org.slf4j.event.Level;
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
import java.io.InterruptedIOException;
|
import java.io.InterruptedIOException;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.lang.reflect.InvocationHandler;
|
import java.lang.reflect.InvocationHandler;
|
||||||
import java.lang.reflect.Method;
|
import java.lang.reflect.Method;
|
||||||
import java.lang.reflect.Proxy;
|
import java.lang.reflect.Proxy;
|
||||||
import java.net.ConnectException;
|
import java.net.ConnectException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.Socket;
|
||||||
import java.net.SocketTimeoutException;
|
import java.net.SocketTimeoutException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
import java.security.PrivilegedAction;
|
import java.security.PrivilegedAction;
|
||||||
|
@ -89,6 +93,7 @@ import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
|
||||||
|
@ -993,6 +998,196 @@ public class TestRPC extends TestRpcBase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This tests the case where the server isn't receiving new data and
|
||||||
|
* multiple threads queue up to send rpc requests. Only one of the requests
|
||||||
|
* should be written and all of the calling threads should be interrupted.
|
||||||
|
*
|
||||||
|
* We use a mock SocketFactory so that we can control when the input and
|
||||||
|
* output streams are frozen.
|
||||||
|
*/
|
||||||
|
@Test(timeout=30000)
|
||||||
|
public void testSlowConnection() throws Exception {
|
||||||
|
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
|
||||||
|
Socket mockSocket = Mockito.mock(Socket.class);
|
||||||
|
Mockito.when(mockFactory.createSocket()).thenReturn(mockSocket);
|
||||||
|
Mockito.when(mockSocket.getPort()).thenReturn(1234);
|
||||||
|
Mockito.when(mockSocket.getLocalPort()).thenReturn(2345);
|
||||||
|
MockOutputStream mockOutputStream = new MockOutputStream();
|
||||||
|
Mockito.when(mockSocket.getOutputStream()).thenReturn(mockOutputStream);
|
||||||
|
// Use an input stream that always blocks
|
||||||
|
Mockito.when(mockSocket.getInputStream()).thenReturn(new InputStream() {
|
||||||
|
@Override
|
||||||
|
public int read() throws IOException {
|
||||||
|
// wait forever
|
||||||
|
while (true) {
|
||||||
|
try {
|
||||||
|
Thread.sleep(TimeUnit.DAYS.toMillis(1));
|
||||||
|
} catch (InterruptedException ie) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
throw new InterruptedIOException("test");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
Configuration clientConf = new Configuration();
|
||||||
|
// disable ping & timeout to minimize traffic
|
||||||
|
clientConf.setBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, false);
|
||||||
|
clientConf.setInt(CommonConfigurationKeys.IPC_CLIENT_RPC_TIMEOUT_KEY, 0);
|
||||||
|
RPC.setProtocolEngine(clientConf, TestRpcService.class, ProtobufRpcEngine.class);
|
||||||
|
// set async mode so that we don't need to implement the input stream
|
||||||
|
final boolean wasAsync = Client.isAsynchronousMode();
|
||||||
|
TestRpcService client = null;
|
||||||
|
try {
|
||||||
|
Client.setAsynchronousMode(true);
|
||||||
|
client = RPC.getProtocolProxy(
|
||||||
|
TestRpcService.class,
|
||||||
|
0,
|
||||||
|
new InetSocketAddress("localhost", 1234),
|
||||||
|
UserGroupInformation.getCurrentUser(),
|
||||||
|
clientConf,
|
||||||
|
mockFactory).getProxy();
|
||||||
|
// The connection isn't actually made until the first call.
|
||||||
|
client.ping(null, newEmptyRequest());
|
||||||
|
mockOutputStream.waitForFlush(1);
|
||||||
|
final long headerAndFirst = mockOutputStream.getBytesWritten();
|
||||||
|
client.ping(null, newEmptyRequest());
|
||||||
|
mockOutputStream.waitForFlush(2);
|
||||||
|
final long second = mockOutputStream.getBytesWritten() - headerAndFirst;
|
||||||
|
// pause the writer thread
|
||||||
|
mockOutputStream.pause();
|
||||||
|
// create a set of threads to create calls that will back up
|
||||||
|
ExecutorService pool = Executors.newCachedThreadPool();
|
||||||
|
Future[] futures = new Future[numThreads];
|
||||||
|
final AtomicInteger doneThreads = new AtomicInteger(0);
|
||||||
|
for(int thread = 0; thread < numThreads; ++thread) {
|
||||||
|
final TestRpcService finalClient = client;
|
||||||
|
futures[thread] = pool.submit(new Callable<Void>() {
|
||||||
|
@Override
|
||||||
|
public Void call() throws Exception {
|
||||||
|
finalClient.ping(null, newEmptyRequest());
|
||||||
|
doneThreads.incrementAndGet();
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// wait until the threads have started writing
|
||||||
|
mockOutputStream.waitForWriters();
|
||||||
|
// interrupt all the threads
|
||||||
|
for(int thread=0; thread < numThreads; ++thread) {
|
||||||
|
assertTrue("cancel thread " + thread,
|
||||||
|
futures[thread].cancel(true));
|
||||||
|
}
|
||||||
|
// wait until all the writers are cancelled
|
||||||
|
pool.shutdown();
|
||||||
|
pool.awaitTermination(10, TimeUnit.SECONDS);
|
||||||
|
mockOutputStream.resume();
|
||||||
|
// wait for the in flight rpc request to be flushed
|
||||||
|
mockOutputStream.waitForFlush(3);
|
||||||
|
// All the threads should have been interrupted
|
||||||
|
assertEquals(0, doneThreads.get());
|
||||||
|
// make sure that only one additional rpc request was sent
|
||||||
|
assertEquals(headerAndFirst + second * 2,
|
||||||
|
mockOutputStream.getBytesWritten());
|
||||||
|
} finally {
|
||||||
|
Client.setAsynchronousMode(wasAsync);
|
||||||
|
if (client != null) {
|
||||||
|
RPC.stopProxy(client);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final class MockOutputStream extends OutputStream {
|
||||||
|
private long bytesWritten = 0;
|
||||||
|
private AtomicInteger flushCount = new AtomicInteger(0);
|
||||||
|
private ReentrantLock lock = new ReentrantLock(true);
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public synchronized void write(int b) throws IOException {
|
||||||
|
lock.lock();
|
||||||
|
bytesWritten += 1;
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void flush() {
|
||||||
|
flushCount.incrementAndGet();
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized long getBytesWritten() {
|
||||||
|
return bytesWritten;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void pause() {
|
||||||
|
lock.lock();
|
||||||
|
}
|
||||||
|
|
||||||
|
public void resume() {
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final int DELAY_MS = 250;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wait for the Nth flush, which we assume will happen exactly when the
|
||||||
|
* Nth RPC request is sent.
|
||||||
|
* @param flush the total flush count to wait for
|
||||||
|
* @throws InterruptedException
|
||||||
|
*/
|
||||||
|
public void waitForFlush(int flush) throws InterruptedException {
|
||||||
|
while (flushCount.get() < flush) {
|
||||||
|
Thread.sleep(DELAY_MS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void waitForWriters() throws InterruptedException {
|
||||||
|
while (!lock.hasQueuedThreads()) {
|
||||||
|
Thread.sleep(DELAY_MS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This test causes an exception in the RPC connection setup to make
|
||||||
|
* sure that threads aren't leaked.
|
||||||
|
*/
|
||||||
|
@Test(timeout=30000)
|
||||||
|
public void testBadSetup() throws Exception {
|
||||||
|
SocketFactory mockFactory = Mockito.mock(SocketFactory.class);
|
||||||
|
Mockito.when(mockFactory.createSocket())
|
||||||
|
.thenThrow(new IOException("can't connect"));
|
||||||
|
Configuration clientConf = new Configuration();
|
||||||
|
// Set an illegal value to cause an exception in the constructor
|
||||||
|
clientConf.set(CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH,
|
||||||
|
"xxx");
|
||||||
|
RPC.setProtocolEngine(clientConf, TestRpcService.class,
|
||||||
|
ProtobufRpcEngine.class);
|
||||||
|
TestRpcService client = null;
|
||||||
|
int threadCount = Thread.getAllStackTraces().size();
|
||||||
|
try {
|
||||||
|
try {
|
||||||
|
client = RPC.getProtocolProxy(
|
||||||
|
TestRpcService.class,
|
||||||
|
0,
|
||||||
|
new InetSocketAddress("localhost", 1234),
|
||||||
|
UserGroupInformation.getCurrentUser(),
|
||||||
|
clientConf,
|
||||||
|
mockFactory).getProxy();
|
||||||
|
client.ping(null, newEmptyRequest());
|
||||||
|
assertTrue("Didn't throw exception!", false);
|
||||||
|
} catch (ServiceException nfe) {
|
||||||
|
// ensure no extra threads are running.
|
||||||
|
assertEquals(threadCount, Thread.getAllStackTraces().size());
|
||||||
|
} catch (Throwable t) {
|
||||||
|
assertTrue("wrong exception: " + t, false);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
if (client != null) {
|
||||||
|
RPC.stopProxy(client);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConnectionPing() throws Exception {
|
public void testConnectionPing() throws Exception {
|
||||||
Server server;
|
Server server;
|
||||||
|
|
|
@ -142,6 +142,18 @@ public class TestLogThrottlingHelper {
|
||||||
assertTrue(helper.record("bar", 0).shouldLog());
|
assertTrue(helper.record("bar", 0).shouldLog());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testInfrequentPrimaryAndDependentLoggers() {
|
||||||
|
helper = new LogThrottlingHelper(LOG_PERIOD, "foo", timer);
|
||||||
|
|
||||||
|
assertTrue(helper.record("foo", 0).shouldLog());
|
||||||
|
assertTrue(helper.record("bar", 0).shouldLog());
|
||||||
|
|
||||||
|
// Both should log once the period has elapsed
|
||||||
|
assertTrue(helper.record("foo", LOG_PERIOD).shouldLog());
|
||||||
|
assertTrue(helper.record("bar", LOG_PERIOD).shouldLog());
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMultipleLoggersWithValues() {
|
public void testMultipleLoggersWithValues() {
|
||||||
helper = new LogThrottlingHelper(LOG_PERIOD, "foo", timer);
|
helper = new LogThrottlingHelper(LOG_PERIOD, "foo", timer);
|
||||||
|
|
|
@ -438,6 +438,8 @@ public class TestMetricsSystemImpl {
|
||||||
r = recs.get(1);
|
r = recs.get(1);
|
||||||
assertTrue("NumActiveSinks should be 3", Iterables.contains(r.metrics(),
|
assertTrue("NumActiveSinks should be 3", Iterables.contains(r.metrics(),
|
||||||
new MetricGaugeInt(MsInfo.NumActiveSinks, 3)));
|
new MetricGaugeInt(MsInfo.NumActiveSinks, 3)));
|
||||||
|
assertTrue("NumAllSinks should be 3",
|
||||||
|
Iterables.contains(r.metrics(), new MetricGaugeInt(MsInfo.NumAllSinks, 3)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
|
|
||||||
package org.apache.hadoop.metrics2.lib;
|
package org.apache.hadoop.metrics2.lib;
|
||||||
|
|
||||||
|
import static org.apache.hadoop.metrics2.impl.MsInfo.Context;
|
||||||
import static org.apache.hadoop.metrics2.lib.Interns.info;
|
import static org.apache.hadoop.metrics2.lib.Interns.info;
|
||||||
import static org.apache.hadoop.test.MetricsAsserts.*;
|
import static org.apache.hadoop.test.MetricsAsserts.*;
|
||||||
import static org.mockito.AdditionalMatchers.eq;
|
import static org.mockito.AdditionalMatchers.eq;
|
||||||
|
@ -290,6 +291,27 @@ public class TestMutableMetrics {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* MutableStat should output 0 instead of the previous state when there is no change.
|
||||||
|
*/
|
||||||
|
@Test public void testMutableWithoutChanged() {
|
||||||
|
MetricsRecordBuilder builderWithChange = mockMetricsRecordBuilder();
|
||||||
|
MetricsRecordBuilder builderWithoutChange = mockMetricsRecordBuilder();
|
||||||
|
MetricsRegistry registry = new MetricsRegistry("test");
|
||||||
|
MutableStat stat = registry.newStat("Test", "Test", "Ops", "Val", true);
|
||||||
|
stat.add(1000, 1000);
|
||||||
|
stat.add(1000, 2000);
|
||||||
|
registry.snapshot(builderWithChange, true);
|
||||||
|
|
||||||
|
assertCounter("TestNumOps", 2000L, builderWithChange);
|
||||||
|
assertGauge("TestINumOps", 2000L, builderWithChange);
|
||||||
|
assertGauge("TestAvgVal", 1.5, builderWithChange);
|
||||||
|
|
||||||
|
registry.snapshot(builderWithoutChange, true);
|
||||||
|
assertGauge("TestINumOps", 0L, builderWithoutChange);
|
||||||
|
assertGauge("TestAvgVal", 0.0, builderWithoutChange);
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testDuplicateMetrics() {
|
public void testDuplicateMetrics() {
|
||||||
MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
|
MutableRatesWithAggregation rates = new MutableRatesWithAggregation();
|
||||||
|
@ -479,4 +501,15 @@ public class TestMutableMetrics {
|
||||||
verify(mb, times(2)).addGauge(
|
verify(mb, times(2)).addGauge(
|
||||||
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
|
info("FooNumOps", "Number of ops for stat with 5s interval"), (long) 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test {@link MutableGaugeFloat#incr()}.
|
||||||
|
*/
|
||||||
|
@Test(timeout = 30000)
|
||||||
|
public void testMutableGaugeFloat() {
|
||||||
|
MutableGaugeFloat mgf = new MutableGaugeFloat(Context, 3.2f);
|
||||||
|
assertEquals(3.2f, mgf.value(), 0.0);
|
||||||
|
mgf.incr();
|
||||||
|
assertEquals(4.2f, mgf.value(), 0.0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -233,4 +233,111 @@ public class TestInstrumentedReadWriteLock {
|
||||||
assertEquals(2, wlogged.get());
|
assertEquals(2, wlogged.get());
|
||||||
assertEquals(1, wsuppresed.get());
|
assertEquals(1, wsuppresed.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests the warning when the write lock is held longer than threshold.
|
||||||
|
*/
|
||||||
|
@Test(timeout=10000)
|
||||||
|
public void testWriteLockLongHoldingReportWithReentrant() {
|
||||||
|
String testname = name.getMethodName();
|
||||||
|
final AtomicLong time = new AtomicLong(0);
|
||||||
|
Timer mclock = new Timer() {
|
||||||
|
@Override
|
||||||
|
public long monotonicNow() {
|
||||||
|
return time.get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
final AtomicLong wlogged = new AtomicLong(0);
|
||||||
|
final AtomicLong wsuppresed = new AtomicLong(0);
|
||||||
|
final AtomicLong totalHeldTime = new AtomicLong(0);
|
||||||
|
ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
|
||||||
|
InstrumentedWriteLock writeLock = new InstrumentedWriteLock(testname, LOG,
|
||||||
|
readWriteLock, 2000, 300, mclock) {
|
||||||
|
@Override
|
||||||
|
protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
|
||||||
|
totalHeldTime.addAndGet(lockHeldTime);
|
||||||
|
wlogged.incrementAndGet();
|
||||||
|
wsuppresed.set(stats.getSuppressedCount());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG,
|
||||||
|
readWriteLock, 2000, 300, mclock) {
|
||||||
|
@Override
|
||||||
|
protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
|
||||||
|
totalHeldTime.addAndGet(lockHeldTime);
|
||||||
|
wlogged.incrementAndGet();
|
||||||
|
wsuppresed.set(stats.getSuppressedCount());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
writeLock.lock(); // t = 0
|
||||||
|
time.set(100);
|
||||||
|
|
||||||
|
writeLock.lock(); // t = 100
|
||||||
|
time.set(500);
|
||||||
|
|
||||||
|
writeLock.lock(); // t = 500
|
||||||
|
time.set(2900);
|
||||||
|
writeLock.unlock(); // t = 2900
|
||||||
|
|
||||||
|
readLock.lock(); // t = 2900
|
||||||
|
time.set(3000);
|
||||||
|
readLock.unlock(); // t = 3000
|
||||||
|
|
||||||
|
writeLock.unlock(); // t = 3000
|
||||||
|
|
||||||
|
writeLock.unlock(); // t = 3000
|
||||||
|
assertEquals(1, wlogged.get());
|
||||||
|
assertEquals(0, wsuppresed.get());
|
||||||
|
assertEquals(3000, totalHeldTime.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Tests the warning when the read lock is held longer than threshold.
|
||||||
|
*/
|
||||||
|
@Test(timeout=10000)
|
||||||
|
public void testReadLockLongHoldingReportWithReentrant() {
|
||||||
|
String testname = name.getMethodName();
|
||||||
|
final AtomicLong time = new AtomicLong(0);
|
||||||
|
Timer mclock = new Timer() {
|
||||||
|
@Override
|
||||||
|
public long monotonicNow() {
|
||||||
|
return time.get();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
final AtomicLong wlogged = new AtomicLong(0);
|
||||||
|
final AtomicLong wsuppresed = new AtomicLong(0);
|
||||||
|
final AtomicLong totalHelpTime = new AtomicLong(0);
|
||||||
|
ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(true);
|
||||||
|
InstrumentedReadLock readLock = new InstrumentedReadLock(testname, LOG,
|
||||||
|
readWriteLock, 2000, 300, mclock) {
|
||||||
|
@Override
|
||||||
|
protected void logWarning(long lockHeldTime, SuppressedSnapshot stats) {
|
||||||
|
totalHelpTime.addAndGet(lockHeldTime);
|
||||||
|
wlogged.incrementAndGet();
|
||||||
|
wsuppresed.set(stats.getSuppressedCount());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
readLock.lock(); // t = 0
|
||||||
|
time.set(100);
|
||||||
|
|
||||||
|
readLock.lock(); // t = 100
|
||||||
|
time.set(500);
|
||||||
|
|
||||||
|
readLock.lock(); // t = 500
|
||||||
|
time.set(3000);
|
||||||
|
readLock.unlock(); // t = 3000
|
||||||
|
|
||||||
|
readLock.unlock(); // t = 3000
|
||||||
|
|
||||||
|
readLock.unlock(); // t = 3000
|
||||||
|
assertEquals(1, wlogged.get());
|
||||||
|
assertEquals(0, wsuppresed.get());
|
||||||
|
assertEquals(3000, totalHelpTime.get());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,153 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.util;
|
||||||
|
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.StringReader;
|
||||||
|
import java.io.StringWriter;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
import javax.xml.XMLConstants;
|
||||||
|
import javax.xml.parsers.DocumentBuilder;
|
||||||
|
import javax.xml.parsers.SAXParser;
|
||||||
|
import javax.xml.transform.Transformer;
|
||||||
|
import javax.xml.transform.TransformerException;
|
||||||
|
import javax.xml.transform.TransformerFactory;
|
||||||
|
import javax.xml.transform.dom.DOMSource;
|
||||||
|
import javax.xml.transform.stream.StreamResult;
|
||||||
|
import javax.xml.transform.stream.StreamSource;
|
||||||
|
|
||||||
|
import org.apache.hadoop.test.AbstractHadoopTestBase;
|
||||||
|
|
||||||
|
import org.assertj.core.api.Assertions;
|
||||||
|
import org.junit.Assert;
|
||||||
|
import org.junit.Test;
|
||||||
|
import org.w3c.dom.Document;
|
||||||
|
import org.xml.sax.InputSource;
|
||||||
|
import org.xml.sax.SAXException;
|
||||||
|
import org.xml.sax.helpers.DefaultHandler;
|
||||||
|
|
||||||
|
public class TestXMLUtils extends AbstractHadoopTestBase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSecureDocumentBuilderFactory() throws Exception {
|
||||||
|
DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder();
|
||||||
|
Document doc = db.parse(new InputSource(new StringReader("<root/>")));
|
||||||
|
Assertions.assertThat(doc).describedAs("parsed document").isNotNull();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SAXException.class)
|
||||||
|
public void testExternalDtdWithSecureDocumentBuilderFactory() throws Exception {
|
||||||
|
DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder();
|
||||||
|
try (InputStream stream = getResourceStream("/xml/external-dtd.xml")) {
|
||||||
|
Document doc = db.parse(stream);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SAXException.class)
|
||||||
|
public void testEntityDtdWithSecureDocumentBuilderFactory() throws Exception {
|
||||||
|
DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder();
|
||||||
|
try (InputStream stream = getResourceStream("/xml/entity-dtd.xml")) {
|
||||||
|
Document doc = db.parse(stream);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSecureSAXParserFactory() throws Exception {
|
||||||
|
SAXParser parser = XMLUtils.newSecureSAXParserFactory().newSAXParser();
|
||||||
|
parser.parse(new InputSource(new StringReader("<root/>")), new DefaultHandler());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SAXException.class)
|
||||||
|
public void testExternalDtdWithSecureSAXParserFactory() throws Exception {
|
||||||
|
SAXParser parser = XMLUtils.newSecureSAXParserFactory().newSAXParser();
|
||||||
|
try (InputStream stream = getResourceStream("/xml/external-dtd.xml")) {
|
||||||
|
parser.parse(stream, new DefaultHandler());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = SAXException.class)
|
||||||
|
public void testEntityDtdWithSecureSAXParserFactory() throws Exception {
|
||||||
|
SAXParser parser = XMLUtils.newSecureSAXParserFactory().newSAXParser();
|
||||||
|
try (InputStream stream = getResourceStream("/xml/entity-dtd.xml")) {
|
||||||
|
parser.parse(stream, new DefaultHandler());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSecureTransformerFactory() throws Exception {
|
||||||
|
Transformer transformer = XMLUtils.newSecureTransformerFactory().newTransformer();
|
||||||
|
DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder();
|
||||||
|
Document doc = db.parse(new InputSource(new StringReader("<root/>")));
|
||||||
|
try (StringWriter stringWriter = new StringWriter()) {
|
||||||
|
transformer.transform(new DOMSource(doc), new StreamResult(stringWriter));
|
||||||
|
Assertions.assertThat(stringWriter.toString()).contains("<root");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = TransformerException.class)
|
||||||
|
public void testExternalDtdWithSecureTransformerFactory() throws Exception {
|
||||||
|
Transformer transformer = XMLUtils.newSecureTransformerFactory().newTransformer();
|
||||||
|
try (
|
||||||
|
InputStream stream = getResourceStream("/xml/external-dtd.xml");
|
||||||
|
StringWriter stringWriter = new StringWriter()
|
||||||
|
) {
|
||||||
|
transformer.transform(new StreamSource(stream), new StreamResult(stringWriter));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testSecureSAXTransformerFactory() throws Exception {
|
||||||
|
Transformer transformer = XMLUtils.newSecureSAXTransformerFactory().newTransformer();
|
||||||
|
DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder();
|
||||||
|
Document doc = db.parse(new InputSource(new StringReader("<root/>")));
|
||||||
|
try (StringWriter stringWriter = new StringWriter()) {
|
||||||
|
transformer.transform(new DOMSource(doc), new StreamResult(stringWriter));
|
||||||
|
Assertions.assertThat(stringWriter.toString()).contains("<root");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test(expected = TransformerException.class)
|
||||||
|
public void testExternalDtdWithSecureSAXTransformerFactory() throws Exception {
|
||||||
|
Transformer transformer = XMLUtils.newSecureSAXTransformerFactory().newTransformer();
|
||||||
|
try (
|
||||||
|
InputStream stream = getResourceStream("/xml/external-dtd.xml");
|
||||||
|
StringWriter stringWriter = new StringWriter()
|
||||||
|
) {
|
||||||
|
transformer.transform(new StreamSource(stream), new StreamResult(stringWriter));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testBestEffortSetAttribute() throws Exception {
|
||||||
|
TransformerFactory factory = TransformerFactory.newInstance();
|
||||||
|
AtomicBoolean flag1 = new AtomicBoolean(true);
|
||||||
|
XMLUtils.bestEffortSetAttribute(factory, flag1, "unsupportedAttribute false", "abc");
|
||||||
|
Assert.assertFalse("unexpected attribute results in return of false?", flag1.get());
|
||||||
|
AtomicBoolean flag2 = new AtomicBoolean(true);
|
||||||
|
XMLUtils.bestEffortSetAttribute(factory, flag2, XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||||
|
Assert.assertTrue("expected attribute results in return of true?", flag2.get());
|
||||||
|
AtomicBoolean flag3 = new AtomicBoolean(false);
|
||||||
|
XMLUtils.bestEffortSetAttribute(factory, flag3, XMLConstants.ACCESS_EXTERNAL_DTD, "");
|
||||||
|
Assert.assertFalse("expected attribute results in return of false if input flag is false?",
|
||||||
|
flag3.get());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static InputStream getResourceStream(final String filename) {
|
||||||
|
return TestXMLUtils.class.getResourceAsStream(filename);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<!DOCTYPE lolz [
|
||||||
|
<!ENTITY lol "lol">
|
||||||
|
<!ELEMENT lolz (#PCDATA)>
|
||||||
|
]>
|
||||||
|
<lolz>&lol;</lolz>
|
|
@ -0,0 +1,23 @@
|
||||||
|
<?xml version = "1.0" encoding = "UTF-8" standalone = "no" ?>
|
||||||
|
<!--
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one or more
|
||||||
|
contributor license agreements. See the NOTICE file distributed with
|
||||||
|
this work for additional information regarding copyright ownership.
|
||||||
|
The ASF licenses this file to You under the Apache License, Version 2.0
|
||||||
|
(the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
-->
|
||||||
|
<!DOCTYPE address SYSTEM "address.dtd">
|
||||||
|
<address>
|
||||||
|
<name>First Last</name>
|
||||||
|
<company>Acme</company>
|
||||||
|
<phone>(555) 123-4567</phone>
|
||||||
|
</address>
|
|
@ -38,8 +38,35 @@ import java.util.HashMap;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
|
||||||
public class TestMiniKdc extends KerberosSecurityTestcase {
|
public class TestMiniKdc extends KerberosSecurityTestcase {
|
||||||
private static final boolean IBM_JAVA = System.getProperty("java.vendor")
|
private static final boolean IBM_JAVA = shouldUseIbmPackages();
|
||||||
.contains("IBM");
|
// duplicated to avoid cycles in the build
|
||||||
|
private static boolean shouldUseIbmPackages() {
|
||||||
|
final List<String> ibmTechnologyEditionSecurityModules = Arrays.asList(
|
||||||
|
"com.ibm.security.auth.module.JAASLoginModule",
|
||||||
|
"com.ibm.security.auth.module.Win64LoginModule",
|
||||||
|
"com.ibm.security.auth.module.NTLoginModule",
|
||||||
|
"com.ibm.security.auth.module.AIX64LoginModule",
|
||||||
|
"com.ibm.security.auth.module.LinuxLoginModule",
|
||||||
|
"com.ibm.security.auth.module.Krb5LoginModule"
|
||||||
|
);
|
||||||
|
|
||||||
|
if (System.getProperty("java.vendor").contains("IBM")) {
|
||||||
|
return ibmTechnologyEditionSecurityModules
|
||||||
|
.stream().anyMatch((module) -> isSystemClassAvailable(module));
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isSystemClassAvailable(String className) {
|
||||||
|
try {
|
||||||
|
Class.forName(className);
|
||||||
|
return true;
|
||||||
|
} catch (Exception ignored) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testMiniKdcStart() {
|
public void testMiniKdcStart() {
|
||||||
MiniKdc kdc = getKdc();
|
MiniKdc kdc = getKdc();
|
||||||
|
@ -117,9 +144,9 @@ public class TestMiniKdc extends KerberosSecurityTestcase {
|
||||||
options.put("debug", "true");
|
options.put("debug", "true");
|
||||||
|
|
||||||
return new AppConfigurationEntry[]{
|
return new AppConfigurationEntry[]{
|
||||||
new AppConfigurationEntry(getKrb5LoginModuleName(),
|
new AppConfigurationEntry(getKrb5LoginModuleName(),
|
||||||
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
|
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
|
||||||
options)};
|
options)};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import io.netty.buffer.ByteBuf;
|
||||||
import io.netty.buffer.Unpooled;
|
import io.netty.buffer.Unpooled;
|
||||||
import io.netty.channel.ChannelHandlerContext;
|
import io.netty.channel.ChannelHandlerContext;
|
||||||
import io.netty.channel.ChannelInboundHandlerAdapter;
|
import io.netty.channel.ChannelInboundHandlerAdapter;
|
||||||
|
import io.netty.util.ReferenceCountUtil;
|
||||||
import org.apache.hadoop.classification.VisibleForTesting;
|
import org.apache.hadoop.classification.VisibleForTesting;
|
||||||
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
|
import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState;
|
||||||
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
import org.apache.hadoop.oncrpc.security.VerifierNone;
|
||||||
|
@ -163,8 +164,16 @@ public abstract class RpcProgram extends ChannelInboundHandlerAdapter {
|
||||||
public void channelRead(ChannelHandlerContext ctx, Object msg)
|
public void channelRead(ChannelHandlerContext ctx, Object msg)
|
||||||
throws Exception {
|
throws Exception {
|
||||||
RpcInfo info = (RpcInfo) msg;
|
RpcInfo info = (RpcInfo) msg;
|
||||||
|
try {
|
||||||
|
channelRead(ctx, info);
|
||||||
|
} finally {
|
||||||
|
ReferenceCountUtil.release(info.data());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void channelRead(ChannelHandlerContext ctx, RpcInfo info)
|
||||||
|
throws Exception {
|
||||||
RpcCall call = (RpcCall) info.header();
|
RpcCall call = (RpcCall) info.header();
|
||||||
|
|
||||||
SocketAddress remoteAddress = info.remoteAddress();
|
SocketAddress remoteAddress = info.remoteAddress();
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace(program + " procedure #" + call.getProcedure());
|
LOG.trace(program + " procedure #" + call.getProcedure());
|
||||||
|
@ -256,4 +265,4 @@ public abstract class RpcProgram extends ChannelInboundHandlerAdapter {
|
||||||
public int getPortmapUdpTimeoutMillis() {
|
public int getPortmapUdpTimeoutMillis() {
|
||||||
return portmapUdpTimeoutMillis;
|
return portmapUdpTimeoutMillis;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue