Compare commits
10 Commits
trunk
...
feature-HA
Author | SHA1 | Date |
---|---|---|
Steve Loughran | 36bbde2fda | |
Steve Loughran | e23f70a03c | |
ahmarsuhail | a9dbd7d62f | |
ahmarsuhail | 6a3b9f1723 | |
ahmarsuhail | 515cba7d2e | |
Ahmar Suhail | 3c06960a31 | |
monthonk | 9abc77b19e | |
ahmarsuhail | 538ddf8532 | |
PJ Fanning | 5a1f4dd5c1 | |
Steve Loughran | fd24290aa4 |
|
@ -14,8 +14,6 @@
|
|||
# limitations under the License.
|
||||
|
||||
github:
|
||||
ghp_path: /
|
||||
ghp_branch: gh-pages
|
||||
enabled_merge_buttons:
|
||||
squash: true
|
||||
merge: false
|
||||
|
@ -24,4 +22,4 @@ notifications:
|
|||
commits: common-commits@hadoop.apache.org
|
||||
issues: common-issues@hadoop.apache.org
|
||||
pullrequests: common-issues@hadoop.apache.org
|
||||
jira_options: comment link label
|
||||
jira_options: link label worklog
|
|
@ -1,59 +0,0 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
name: website
|
||||
|
||||
# Controls when the action will run.
|
||||
on:
|
||||
push:
|
||||
branches: [ trunk ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Hadoop trunk
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: apache/hadoop
|
||||
- name: Set up JDK 8
|
||||
uses: actions/setup-java@v3
|
||||
with:
|
||||
java-version: '8'
|
||||
distribution: 'temurin'
|
||||
- name: Cache local Maven repository
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.m2/repository
|
||||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-maven-
|
||||
- name: Build Hadoop maven plugins
|
||||
run: cd hadoop-maven-plugins && mvn --batch-mode install
|
||||
- name: Build Hadoop
|
||||
run: mvn clean install -DskipTests -DskipShade
|
||||
- name: Build document
|
||||
run: mvn clean site
|
||||
- name: Stage document
|
||||
run: mvn site:stage -DstagingDirectory=${GITHUB_WORKSPACE}/staging/
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: peaceiris/actions-gh-pages@v3
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./staging/hadoop-project
|
||||
user_name: 'github-actions[bot]'
|
||||
user_email: 'github-actions[bot]@users.noreply.github.com'
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
dev-support/docker/Dockerfile_windows_10
|
113
BUILDING.txt
113
BUILDING.txt
|
@ -492,66 +492,39 @@ Building on CentOS 8
|
|||
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
Building on Windows 10
|
||||
Building on Windows
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Requirements:
|
||||
|
||||
* Windows 10
|
||||
* Windows System
|
||||
* JDK 1.8
|
||||
* Maven 3.0 or later (maven.apache.org)
|
||||
* Boost 1.72 (boost.org)
|
||||
* Protocol Buffers 3.7.1 (https://github.com/protocolbuffers/protobuf/releases)
|
||||
* CMake 3.19 or newer (cmake.org)
|
||||
* Visual Studio 2019 (visualstudio.com)
|
||||
* Windows SDK 8.1 (optional, if building CPU rate control for the container executor. Get this from
|
||||
http://msdn.microsoft.com/en-us/windows/bg162891.aspx)
|
||||
* Zlib (zlib.net, if building native code bindings for zlib)
|
||||
* Git (preferably, get this from https://git-scm.com/download/win since the package also contains
|
||||
Unix command-line tools that are needed during packaging).
|
||||
* Python (python.org, for generation of docs using 'mvn site')
|
||||
* Maven 3.0 or later
|
||||
* Boost 1.72
|
||||
* Protocol Buffers 3.7.1
|
||||
* CMake 3.19 or newer
|
||||
* Visual Studio 2010 Professional or Higher
|
||||
* Windows SDK 8.1 (if building CPU rate control for the container executor)
|
||||
* zlib headers (if building native code bindings for zlib)
|
||||
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
|
||||
* Unix command-line tools from GnuWin32: sh, mkdir, rm, cp, tar, gzip. These
|
||||
tools must be present on your PATH.
|
||||
* Python ( for generation of docs using 'mvn site')
|
||||
|
||||
Unix command-line tools are also included with the Windows Git package which
|
||||
can be downloaded from http://git-scm.com/downloads
|
||||
|
||||
If using Visual Studio, it must be Professional level or higher.
|
||||
Do not use Visual Studio Express. It does not support compiling for 64-bit,
|
||||
which is problematic if running a 64-bit system.
|
||||
|
||||
The Windows SDK 8.1 is available to download at:
|
||||
|
||||
http://msdn.microsoft.com/en-us/windows/bg162891.aspx
|
||||
|
||||
Cygwin is not required.
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
|
||||
Building guidelines:
|
||||
|
||||
Hadoop repository provides the Dockerfile for building Hadoop on Windows 10, located at
|
||||
dev-support/docker/Dockerfile_windows_10. It is highly recommended to use this and create the
|
||||
Docker image for building Hadoop on Windows 10, since you don't have to install anything else
|
||||
other than Docker and no additional steps are required in terms of aligning the environment with
|
||||
the necessary paths etc.
|
||||
|
||||
However, if you still prefer taking the route of not using Docker, this Dockerfile_windows_10 will
|
||||
still be immensely useful as a raw guide for all the steps involved in creating the environment
|
||||
needed to build Hadoop on Windows 10.
|
||||
|
||||
Building using the Docker:
|
||||
We first need to build the Docker image for building Hadoop on Windows 10. Run this command from
|
||||
the root of the Hadoop repository.
|
||||
> docker build -t hadoop-windows-10-builder -f .\dev-support\docker\Dockerfile_windows_10 .\dev-support\docker\
|
||||
|
||||
Start the container with the image that we just built.
|
||||
> docker run --rm -it hadoop-windows-10-builder
|
||||
|
||||
You can now clone the Hadoop repo inside this container and proceed with the build.
|
||||
|
||||
NOTE:
|
||||
While one may perceive the idea of mounting the locally cloned (on the host filesystem) Hadoop
|
||||
repository into the container (using the -v option), we have seen the build to fail owing to some
|
||||
files not being able to be located by Maven. Thus, we suggest cloning the Hadoop repository to a
|
||||
non-mounted folder inside the container and proceed with the build. When the build is completed,
|
||||
you may use the "docker cp" command to copy the built Hadoop tar.gz file from the docker container
|
||||
to the host filesystem. If you still would like to mount the Hadoop codebase, a workaround would
|
||||
be to copy the mounted Hadoop codebase into another folder (which doesn't point to a mount) in the
|
||||
container's filesystem and use this for building.
|
||||
|
||||
However, we noticed no build issues when the Maven repository from the host filesystem was mounted
|
||||
into the container. One may use this to greatly reduce the build time. Assuming that the Maven
|
||||
repository is located at D:\Maven\Repository in the host filesystem, one can use the following
|
||||
command to mount the same onto the default Maven repository location while launching the container.
|
||||
> docker run --rm -v D:\Maven\Repository:C:\Users\ContainerAdministrator\.m2\repository -it hadoop-windows-10-builder
|
||||
|
||||
Building:
|
||||
|
||||
Keep the source code tree in a short path to avoid running into problems related
|
||||
|
@ -567,24 +540,6 @@ configure the bit-ness of the build, and set several optional components.
|
|||
Several tests require that the user must have the Create Symbolic Links
|
||||
privilege.
|
||||
|
||||
To simplify the installation of Boost, Protocol buffers, OpenSSL and Zlib dependencies we can use
|
||||
vcpkg (https://github.com/Microsoft/vcpkg.git). Upon cloning the vcpkg repo, checkout the commit
|
||||
7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d to get the required versions of the dependencies
|
||||
mentioned above.
|
||||
> git clone https://github.com/Microsoft/vcpkg.git
|
||||
> cd vcpkg
|
||||
> git checkout 7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d
|
||||
> .\bootstrap-vcpkg.bat
|
||||
> .\vcpkg.exe install boost:x64-windows
|
||||
> .\vcpkg.exe install protobuf:x64-windows
|
||||
> .\vcpkg.exe install openssl:x64-windows
|
||||
> .\vcpkg.exe install zlib:x64-windows
|
||||
|
||||
Set the following environment variables -
|
||||
(Assuming that vcpkg was checked out at C:\vcpkg)
|
||||
> set PROTOBUF_HOME=C:\vcpkg\installed\x64-windows
|
||||
> set MAVEN_OPTS=-Xmx2048M -Xss128M
|
||||
|
||||
All Maven goals are the same as described above with the exception that
|
||||
native code is built by enabling the 'native-win' Maven profile. -Pnative-win
|
||||
is enabled by default when building on Windows since the native components
|
||||
|
@ -602,24 +557,6 @@ the zlib 1.2.7 source tree.
|
|||
|
||||
http://www.zlib.net/
|
||||
|
||||
|
||||
Build command:
|
||||
The following command builds all the modules in the Hadoop project and generates the tar.gz file in
|
||||
hadoop-dist/target upon successful build. Run these commands from an
|
||||
"x64 Native Tools Command Prompt for VS 2019" which can be found under "Visual Studio 2019" in the
|
||||
Windows start menu. If you're using the Docker image from Dockerfile_windows_10, you'll be
|
||||
logged into "x64 Native Tools Command Prompt for VS 2019" automatically when you start the
|
||||
container.
|
||||
|
||||
> set classpath=
|
||||
> set PROTOBUF_HOME=C:\vcpkg\installed\x64-windows
|
||||
> mvn clean package -Dhttps.protocols=TLSv1.2 -DskipTests -DskipDocs -Pnative-win,dist^
|
||||
-Drequire.openssl -Drequire.test.libhadoop -Pyarn-ui -Dshell-executable=C:\Git\bin\bash.exe^
|
||||
-Dtar -Dopenssl.prefix=C:\vcpkg\installed\x64-windows^
|
||||
-Dcmake.prefix.path=C:\vcpkg\installed\x64-windows^
|
||||
-Dwindows.cmake.toolchain.file=C:\vcpkg\scripts\buildsystems\vcpkg.cmake -Dwindows.cmake.build.type=RelWithDebInfo^
|
||||
-Dwindows.build.hdfspp.dll=off -Dwindows.no.sasl=on -Duse.platformToolsetVersion=v142
|
||||
|
||||
----------------------------------------------------------------------------------
|
||||
Building distributions:
|
||||
|
||||
|
|
112
LICENSE-binary
112
LICENSE-binary
|
@ -210,22 +210,22 @@ hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/nvd3-1.8.5.* (css and js
|
|||
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java
|
||||
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java
|
||||
|
||||
com.aliyun:aliyun-java-sdk-core:4.5.10
|
||||
com.aliyun:aliyun-java-sdk-kms:2.11.0
|
||||
com.aliyun:aliyun-java-sdk-ram:3.1.0
|
||||
com.aliyun:aliyun-java-sdk-core:3.4.0
|
||||
com.aliyun:aliyun-java-sdk-ecs:4.2.0
|
||||
com.aliyun:aliyun-java-sdk-ram:3.0.0
|
||||
com.aliyun:aliyun-java-sdk-sts:3.0.0
|
||||
com.aliyun.oss:aliyun-sdk-oss:3.13.2
|
||||
com.amazonaws:aws-java-sdk-bundle:1.12.316
|
||||
com.amazonaws:aws-java-sdk-bundle:1.12.262
|
||||
com.cedarsoftware:java-util:1.9.0
|
||||
com.cedarsoftware:json-io:2.5.1
|
||||
com.fasterxml.jackson.core:jackson-annotations:2.12.7
|
||||
com.fasterxml.jackson.core:jackson-core:2.12.7
|
||||
com.fasterxml.jackson.core:jackson-databind:2.12.7.1
|
||||
com.fasterxml.jackson.core:jackson-databind:2.12.7
|
||||
com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:2.12.7
|
||||
com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:2.12.7
|
||||
com.fasterxml.jackson.module:jackson-module-jaxb-annotations:2.12.7
|
||||
com.fasterxml.uuid:java-uuid-generator:3.1.4
|
||||
com.fasterxml.woodstox:woodstox-core:5.4.0
|
||||
com.fasterxml.woodstox:woodstox-core:5.3.0
|
||||
com.github.davidmoten:rxjava-extras:0.8.0.17
|
||||
com.github.stephenc.jcip:jcip-annotations:1.0-1
|
||||
com.google:guice:4.0
|
||||
|
@ -240,17 +240,18 @@ com.google.guava:guava:20.0
|
|||
com.google.guava:guava:27.0-jre
|
||||
com.google.guava:listenablefuture:9999.0-empty-to-avoid-conflict-with-guava
|
||||
com.microsoft.azure:azure-storage:7.0.0
|
||||
com.nimbusds:nimbus-jose-jwt:9.31
|
||||
com.squareup.okhttp3:okhttp:4.10.0
|
||||
com.squareup.okio:okio:3.2.0
|
||||
com.nimbusds:nimbus-jose-jwt:9.8.1
|
||||
com.squareup.okhttp3:okhttp:4.9.3
|
||||
com.squareup.okio:okio:1.6.0
|
||||
com.zaxxer:HikariCP:4.0.3
|
||||
commons-beanutils:commons-beanutils:1.9.4
|
||||
commons-cli:commons-cli:1.5.0
|
||||
commons-beanutils:commons-beanutils:1.9.3
|
||||
commons-cli:commons-cli:1.2
|
||||
commons-codec:commons-codec:1.11
|
||||
commons-collections:commons-collections:3.2.2
|
||||
commons-daemon:commons-daemon:1.0.13
|
||||
commons-io:commons-io:2.8.0
|
||||
commons-net:commons-net:3.9.0
|
||||
commons-logging:commons-logging:1.1.3
|
||||
commons-net:commons-net:3.6
|
||||
de.ruedigermoeller:fst:2.50
|
||||
io.grpc:grpc-api:1.26.0
|
||||
io.grpc:grpc-context:1.26.0
|
||||
|
@ -259,6 +260,7 @@ io.grpc:grpc-netty:1.26.0
|
|||
io.grpc:grpc-protobuf:1.26.0
|
||||
io.grpc:grpc-protobuf-lite:1.26.0
|
||||
io.grpc:grpc-stub:1.26.0
|
||||
io.netty:netty:3.10.6.Final
|
||||
io.netty:netty-all:4.1.77.Final
|
||||
io.netty:netty-buffer:4.1.77.Final
|
||||
io.netty:netty-codec:4.1.77.Final
|
||||
|
@ -299,15 +301,16 @@ javax.inject:javax.inject:1
|
|||
log4j:log4j:1.2.17
|
||||
net.java.dev.jna:jna:5.2.0
|
||||
net.minidev:accessors-smart:1.2
|
||||
net.minidev:json-smart:2.4.7
|
||||
org.apache.avro:avro:1.9.2
|
||||
org.apache.commons:commons-collections4:4.2
|
||||
org.apache.commons:commons-compress:1.21
|
||||
org.apache.commons:commons-configuration2:2.8.0
|
||||
org.apache.commons:commons-csv:1.9.0
|
||||
org.apache.commons:commons-configuration2:2.1.1
|
||||
org.apache.commons:commons-csv:1.0
|
||||
org.apache.commons:commons-digester:1.8.1
|
||||
org.apache.commons:commons-lang3:3.12.0
|
||||
org.apache.commons:commons-math3:3.6.1
|
||||
org.apache.commons:commons-text:1.10.0
|
||||
org.apache.commons:commons-math3:3.1.1
|
||||
org.apache.commons:commons-text:1.4
|
||||
org.apache.commons:commons-validator:1.6
|
||||
org.apache.curator:curator-client:5.2.0
|
||||
org.apache.curator:curator-framework:5.2.0
|
||||
|
@ -321,49 +324,46 @@ org.apache.htrace:htrace-core:3.1.0-incubating
|
|||
org.apache.htrace:htrace-core4:4.1.0-incubating
|
||||
org.apache.httpcomponents:httpclient:4.5.6
|
||||
org.apache.httpcomponents:httpcore:4.4.10
|
||||
org.apache.kafka:kafka-clients:2.8.2
|
||||
org.apache.kerby:kerb-admin:2.0.3
|
||||
org.apache.kerby:kerb-client:2.0.3
|
||||
org.apache.kerby:kerb-common:2.0.3
|
||||
org.apache.kerby:kerb-core:2.0.3
|
||||
org.apache.kerby:kerb-crypto:2.0.3
|
||||
org.apache.kerby:kerb-identity:2.0.3
|
||||
org.apache.kerby:kerb-server:2.0.3
|
||||
org.apache.kerby:kerb-simplekdc:2.0.3
|
||||
org.apache.kerby:kerb-util:2.0.3
|
||||
org.apache.kerby:kerby-asn1:2.0.3
|
||||
org.apache.kerby:kerby-config:2.0.3
|
||||
org.apache.kerby:kerby-pkix:2.0.3
|
||||
org.apache.kerby:kerby-util:2.0.3
|
||||
org.apache.kerby:kerby-xdr:2.0.3
|
||||
org.apache.kerby:token-provider:2.0.3
|
||||
org.apache.kafka:kafka-clients:2.8.1
|
||||
org.apache.kerby:kerb-admin:1.0.1
|
||||
org.apache.kerby:kerb-client:1.0.1
|
||||
org.apache.kerby:kerb-common:1.0.1
|
||||
org.apache.kerby:kerb-core:1.0.1
|
||||
org.apache.kerby:kerb-crypto:1.0.1
|
||||
org.apache.kerby:kerb-identity:1.0.1
|
||||
org.apache.kerby:kerb-server:1.0.1
|
||||
org.apache.kerby:kerb-simplekdc:1.0.1
|
||||
org.apache.kerby:kerb-util:1.0.1
|
||||
org.apache.kerby:kerby-asn1:1.0.1
|
||||
org.apache.kerby:kerby-config:1.0.1
|
||||
org.apache.kerby:kerby-pkix:1.0.1
|
||||
org.apache.kerby:kerby-util:1.0.1
|
||||
org.apache.kerby:kerby-xdr:1.0.1
|
||||
org.apache.kerby:token-provider:1.0.1
|
||||
org.apache.solr:solr-solrj:8.8.2
|
||||
org.apache.yetus:audience-annotations:0.5.0
|
||||
org.apache.zookeeper:zookeeper:3.6.3
|
||||
org.codehaus.jettison:jettison:1.5.4
|
||||
org.eclipse.jetty:jetty-annotations:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-http:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-io:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-jndi:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-plus:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-security:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-server:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-servlet:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-util:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-util-ajax:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-webapp:9.4.51.v20230217
|
||||
org.eclipse.jetty:jetty-xml:9.4.51.v20230217
|
||||
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.51.v20230217
|
||||
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.51.v20230217
|
||||
org.codehaus.jettison:jettison:1.1
|
||||
org.eclipse.jetty:jetty-annotations:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-http:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-io:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-jndi:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-plus:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-security:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-server:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-servlet:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-util:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-util-ajax:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-webapp:9.4.48.v20220622
|
||||
org.eclipse.jetty:jetty-xml:9.4.48.v20220622
|
||||
org.eclipse.jetty.websocket:javax-websocket-client-impl:9.4.48.v20220622
|
||||
org.eclipse.jetty.websocket:javax-websocket-server-impl:9.4.48.v20220622
|
||||
org.ehcache:ehcache:3.3.1
|
||||
org.ini4j:ini4j:0.5.4
|
||||
org.jetbrains.kotlin:kotlin-stdlib:1.4.10
|
||||
org.jetbrains.kotlin:kotlin-stdlib-common:1.4.10
|
||||
org.lz4:lz4-java:1.7.1
|
||||
org.objenesis:objenesis:2.6
|
||||
org.xerial.snappy:snappy-java:1.0.5
|
||||
org.yaml:snakeyaml:2.0
|
||||
org.wildfly.openssl:wildfly-openssl:1.1.3.Final
|
||||
org.yaml:snakeyaml:1.16:
|
||||
org.wildfly.openssl:wildfly-openssl:1.0.7.Final
|
||||
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
|
@ -427,7 +427,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
|||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.11.5/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
||||
|
@ -435,7 +435,7 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanage
|
|||
bootstrap v3.3.6
|
||||
broccoli-asset-rev v2.4.2
|
||||
broccoli-funnel v1.0.1
|
||||
datatables v1.11.5
|
||||
datatables v1.10.19
|
||||
em-helpers v0.5.13
|
||||
em-table v0.1.6
|
||||
ember v2.2.0
|
||||
|
@ -518,14 +518,12 @@ Eclipse Public License 1.0
|
|||
--------------------------
|
||||
|
||||
junit:junit:4.13.2
|
||||
org.jacoco:org.jacoco.agent:0.8.5
|
||||
|
||||
|
||||
|
||||
HSQL License
|
||||
------------
|
||||
|
||||
org.hsqldb:hsqldb:2.7.1
|
||||
org.hsqldb:hsqldb:2.3.4
|
||||
|
||||
|
||||
JDOM License
|
||||
|
|
|
@ -252,7 +252,7 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/bootstrap.min.js
|
|||
hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap.min.css
|
||||
hadoop-tools/hadoop-sls/src/main/html/css/bootstrap-responsive.min.css
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.11.5/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.10.18/*
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jt/jquery.jstree.js
|
||||
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/TERMINAL
|
||||
|
|
|
@ -20,20 +20,6 @@
|
|||
# Override these to match Apache Hadoop's requirements
|
||||
personality_plugins "all,-ant,-gradle,-scalac,-scaladoc"
|
||||
|
||||
# These flags are needed to run Yetus against Hadoop on Windows.
|
||||
WINDOWS_FLAGS="-Pnative-win
|
||||
-Dhttps.protocols=TLSv1.2
|
||||
-Drequire.openssl
|
||||
-Drequire.test.libhadoop
|
||||
-Dshell-executable=${BASH_EXECUTABLE}
|
||||
-Dopenssl.prefix=${VCPKG_INSTALLED_PACKAGES}
|
||||
-Dcmake.prefix.path=${VCPKG_INSTALLED_PACKAGES}
|
||||
-Dwindows.cmake.toolchain.file=${CMAKE_TOOLCHAIN_FILE}
|
||||
-Dwindows.cmake.build.type=RelWithDebInfo
|
||||
-Dwindows.build.hdfspp.dll=off
|
||||
-Dwindows.no.sasl=on
|
||||
-Duse.platformToolsetVersion=v142"
|
||||
|
||||
## @description Globals specific to this personality
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
|
@ -101,30 +87,17 @@ function hadoop_order
|
|||
echo "${hadoopm}"
|
||||
}
|
||||
|
||||
## @description Retrieves the Hadoop project version defined in the root pom.xml
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @returns 0 on success, 1 on failure
|
||||
function load_hadoop_version
|
||||
{
|
||||
if [[ -f "${BASEDIR}/pom.xml" ]]; then
|
||||
HADOOP_VERSION=$(grep '<version>' "${BASEDIR}/pom.xml" \
|
||||
| head -1 \
|
||||
| "${SED}" -e 's|^ *<version>||' -e 's|</version>.*$||' \
|
||||
| cut -f1 -d- )
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
## @description Determine if it is safe to run parallel tests
|
||||
## @audience private
|
||||
## @stability evolving
|
||||
## @param ordering
|
||||
function hadoop_test_parallel
|
||||
{
|
||||
if load_hadoop_version; then
|
||||
if [[ -f "${BASEDIR}/pom.xml" ]]; then
|
||||
HADOOP_VERSION=$(grep '<version>' "${BASEDIR}/pom.xml" \
|
||||
| head -1 \
|
||||
| "${SED}" -e 's|^ *<version>||' -e 's|</version>.*$||' \
|
||||
| cut -f1 -d- )
|
||||
export HADOOP_VERSION
|
||||
else
|
||||
return 1
|
||||
|
@ -289,10 +262,7 @@ function hadoop_native_flags
|
|||
Windows_NT|CYGWIN*|MINGW*|MSYS*)
|
||||
echo \
|
||||
"${args[@]}" \
|
||||
-Drequire.snappy \
|
||||
-Pdist \
|
||||
-Dtar \
|
||||
"${WINDOWS_FLAGS}"
|
||||
-Drequire.snappy -Drequire.openssl -Pnative-win
|
||||
;;
|
||||
*)
|
||||
echo \
|
||||
|
@ -435,10 +405,7 @@ function personality_modules
|
|||
extra="${extra} ${flags}"
|
||||
fi
|
||||
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
extra="-Ptest-patch -Pdist -Dtar ${WINDOWS_FLAGS} ${extra}"
|
||||
fi
|
||||
|
||||
extra="-Ptest-patch ${extra}"
|
||||
for module in $(hadoop_order ${ordering}); do
|
||||
# shellcheck disable=SC2086
|
||||
personality_enqueue_module ${module} ${extra}
|
||||
|
@ -581,28 +548,17 @@ function shadedclient_rebuild
|
|||
|
||||
big_console_header "Checking client artifacts on ${repostatus} with shaded clients"
|
||||
|
||||
extra="-Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true"
|
||||
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
if load_hadoop_version; then
|
||||
export HADOOP_HOME="${SOURCEDIR}/hadoop-dist/target/hadoop-${HADOOP_VERSION}-SNAPSHOT"
|
||||
else
|
||||
yetus_error "[WARNING] Unable to extract the Hadoop version and thus HADOOP_HOME is not set. Some tests may fail."
|
||||
fi
|
||||
|
||||
extra="${WINDOWS_FLAGS} ${extra}"
|
||||
fi
|
||||
|
||||
echo_and_redirect "${logfile}" \
|
||||
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am "${modules[@]}" "${extra}"
|
||||
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
|
||||
"${modules[@]}" \
|
||||
-Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true
|
||||
|
||||
big_console_header "Checking client artifacts on ${repostatus} with non-shaded clients"
|
||||
|
||||
echo_and_redirect "${logfile}" \
|
||||
"${MAVEN}" "${MAVEN_ARGS[@]}" verify -fae --batch-mode -am \
|
||||
"${modules[@]}" \
|
||||
-DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true \
|
||||
-Dspotbugs.skip=true "${extra}"
|
||||
-DskipShade -Dtest=NoUnitTests -Dmaven.javadoc.skip=true -Dcheckstyle.skip=true -Dspotbugs.skip=true
|
||||
|
||||
count=$("${GREP}" -c '\[ERROR\]' "${logfile}")
|
||||
if [[ ${count} -gt 0 ]]; then
|
||||
|
|
|
@ -171,17 +171,7 @@ if [[ -n "${GPGBIN}" && ! "${HADOOP_SKIP_YETUS_VERIFICATION}" = true ]]; then
|
|||
fi
|
||||
fi
|
||||
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
gunzip -c "${TARBALL}.gz" | tar xpf -
|
||||
|
||||
# One of the entries in the Yetus tarball unzips a symlink qbt.sh.
|
||||
# The symlink creation fails on Windows, unless this CI is run as Admin or Developer mode is
|
||||
# enabled.
|
||||
# Thus, we create the qbt.sh symlink ourselves and move it to the target.
|
||||
YETUS_PRECOMMIT_DIR="${YETUS_PREFIX}-${HADOOP_YETUS_VERSION}/lib/precommit"
|
||||
ln -s "${YETUS_PRECOMMIT_DIR}/test-patch.sh" qbt.sh
|
||||
mv qbt.sh "${YETUS_PRECOMMIT_DIR}"
|
||||
elif ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
|
||||
if ! (gunzip -c "${TARBALL}.gz" | tar xpf -); then
|
||||
yetus_error "ERROR: ${TARBALL}.gz is corrupt. Investigate and then remove ${HADOOP_PATCHPROCESS} to try again."
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -74,7 +74,7 @@ ENV PATH "${PATH}:/opt/protobuf/bin"
|
|||
###
|
||||
# Avoid out of memory errors in builds
|
||||
###
|
||||
ENV MAVEN_OPTS -Xms256m -Xmx3072m
|
||||
ENV MAVEN_OPTS -Xms256m -Xmx1536m
|
||||
|
||||
# Skip gpg verification when downloading Yetus via yetus-wrapper
|
||||
ENV HADOOP_SKIP_YETUS_VERIFICATION true
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Dockerfile for installing the necessary dependencies for building Hadoop.
|
||||
# See BUILDING.txt.
|
||||
|
||||
FROM mcr.microsoft.com/windows:ltsc2019
|
||||
|
||||
# Need to disable the progress bar for speeding up the downloads.
|
||||
# hadolint ignore=SC2086
|
||||
RUN powershell $Global:ProgressPreference = 'SilentlyContinue'
|
||||
|
||||
# Restore the default Windows shell for correct batch processing.
|
||||
SHELL ["cmd", "/S", "/C"]
|
||||
|
||||
# Install Visual Studio 2019 Build Tools.
|
||||
RUN curl -SL --output vs_buildtools.exe https://aka.ms/vs/16/release/vs_buildtools.exe \
|
||||
&& (start /w vs_buildtools.exe --quiet --wait --norestart --nocache \
|
||||
--installPath "%ProgramFiles(x86)%\Microsoft Visual Studio\2019\BuildTools" \
|
||||
--add Microsoft.VisualStudio.Workload.VCTools \
|
||||
--add Microsoft.VisualStudio.Component.VC.ASAN \
|
||||
--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64 \
|
||||
--add Microsoft.VisualStudio.Component.Windows10SDK.19041 \
|
||||
|| IF "%ERRORLEVEL%"=="3010" EXIT 0) \
|
||||
&& del /q vs_buildtools.exe
|
||||
|
||||
# Install Chocolatey.
|
||||
RUN powershell -NoProfile -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))"
|
||||
RUN setx PATH "%PATH%;%ALLUSERSPROFILE%\chocolatey\bin"
|
||||
|
||||
# Install git.
|
||||
RUN choco install git.install -y
|
||||
RUN powershell Copy-Item -Recurse -Path 'C:\Program Files\Git' -Destination C:\Git
|
||||
|
||||
# Install vcpkg.
|
||||
# hadolint ignore=DL3003
|
||||
RUN powershell git clone https://github.com/microsoft/vcpkg.git \
|
||||
&& cd vcpkg \
|
||||
&& git checkout 7ffa425e1db8b0c3edf9c50f2f3a0f25a324541d \
|
||||
&& .\bootstrap-vcpkg.bat
|
||||
RUN powershell .\vcpkg\vcpkg.exe install boost:x64-windows
|
||||
RUN powershell .\vcpkg\vcpkg.exe install protobuf:x64-windows
|
||||
RUN powershell .\vcpkg\vcpkg.exe install openssl:x64-windows
|
||||
RUN powershell .\vcpkg\vcpkg.exe install zlib:x64-windows
|
||||
ENV PROTOBUF_HOME "C:\vcpkg\installed\x64-windows"
|
||||
|
||||
# Install Azul Java 8 JDK.
|
||||
RUN powershell Invoke-WebRequest -URI https://cdn.azul.com/zulu/bin/zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip -OutFile $Env:TEMP\zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\zulu8.62.0.19-ca-jdk8.0.332-win_x64.zip -DestinationPath "C:\Java"
|
||||
ENV JAVA_HOME "C:\Java\zulu8.62.0.19-ca-jdk8.0.332-win_x64"
|
||||
RUN setx PATH "%PATH%;%JAVA_HOME%\bin"
|
||||
|
||||
# Install Apache Maven.
|
||||
RUN powershell Invoke-WebRequest -URI https://archive.apache.org/dist/maven/maven-3/3.8.6/binaries/apache-maven-3.8.6-bin.zip -OutFile $Env:TEMP\apache-maven-3.8.6-bin.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\apache-maven-3.8.6-bin.zip -DestinationPath "C:\Maven"
|
||||
RUN setx PATH "%PATH%;C:\Maven\apache-maven-3.8.6\bin"
|
||||
ENV MAVEN_OPTS '-Xmx2048M -Xss128M'
|
||||
|
||||
# Install CMake 3.19.0.
|
||||
RUN powershell Invoke-WebRequest -URI https://cmake.org/files/v3.19/cmake-3.19.0-win64-x64.zip -OutFile $Env:TEMP\cmake-3.19.0-win64-x64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\cmake-3.19.0-win64-x64.zip -DestinationPath "C:\CMake"
|
||||
RUN setx PATH "%PATH%;C:\CMake\cmake-3.19.0-win64-x64\bin"
|
||||
|
||||
# Install zstd 1.5.4.
|
||||
RUN powershell Invoke-WebRequest -Uri https://github.com/facebook/zstd/releases/download/v1.5.4/zstd-v1.5.4-win64.zip -OutFile $Env:TEMP\zstd-v1.5.4-win64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\zstd-v1.5.4-win64.zip -DestinationPath "C:\ZStd"
|
||||
RUN setx PATH "%PATH%;C:\ZStd"
|
||||
|
||||
# Install libopenssl 3.1.0 needed for rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libopenssl-3.1.0-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar.zst -o $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\LibOpenSSL"
|
||||
RUN powershell tar -xvf $Env:TEMP\libopenssl-3.1.0-1-x86_64.pkg.tar -C "C:\LibOpenSSL"
|
||||
|
||||
# Install libxxhash 0.8.1 needed for rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libxxhash-0.8.1-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar.zst -o $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\LibXXHash"
|
||||
RUN powershell tar -xvf $Env:TEMP\libxxhash-0.8.1-1-x86_64.pkg.tar -C "C:\LibXXHash"
|
||||
|
||||
# Install libzstd 1.5.4 needed for rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/libzstd-1.5.4-1-x86_64.pkg.tar.zst -OutFile $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar.zst -o $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\LibZStd"
|
||||
RUN powershell tar -xvf $Env:TEMP\libzstd-1.5.4-1-x86_64.pkg.tar -C "C:\LibZStd"
|
||||
|
||||
# Install rsync 3.2.7.
|
||||
RUN powershell Invoke-WebRequest -Uri https://repo.msys2.org/msys/x86_64/rsync-3.2.7-2-x86_64.pkg.tar.zst -OutFile $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar.zst
|
||||
RUN powershell zstd -d $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar.zst -o $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar
|
||||
RUN powershell mkdir "C:\RSync"
|
||||
RUN powershell tar -xvf $Env:TEMP\rsync-3.2.7-2-x86_64.pkg.tar -C "C:\RSync"
|
||||
# Copy the dependencies of rsync 3.2.7.
|
||||
RUN powershell Copy-Item -Path "C:\LibOpenSSL\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
RUN powershell Copy-Item -Path "C:\LibXXHash\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
RUN powershell Copy-Item -Path "C:\LibZStd\usr\bin\*.dll" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
RUN powershell Copy-Item -Path "C:\RSync\usr\bin\*" -Destination "C:\Program` Files\Git\usr\bin"
|
||||
|
||||
# Install Python 3.10.11.
|
||||
RUN powershell Invoke-WebRequest -Uri https://www.python.org/ftp/python/3.10.11/python-3.10.11-embed-amd64.zip -OutFile $Env:TEMP\python-3.10.11-embed-amd64.zip
|
||||
RUN powershell Expand-Archive -Path $Env:TEMP\python-3.10.11-embed-amd64.zip -DestinationPath "C:\Python3"
|
||||
RUN powershell New-Item -ItemType HardLink -Value "C:\Python3\python.exe" -Path "C:\Python3\python3.exe"
|
||||
RUN setx path "%PATH%;C:\Python3"
|
||||
|
||||
# We get strange Javadoc errors without this.
|
||||
RUN setx classpath ""
|
||||
|
||||
RUN git config --global core.longpaths true
|
||||
RUN setx PATH "%PATH%;C:\Program Files\Git\usr\bin"
|
||||
|
||||
# Define the entry point for the docker container.
|
||||
ENTRYPOINT ["C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\BuildTools\\VC\\Auxiliary\\Build\\vcvars64.bat", "&&", "cmd.exe"]
|
|
@ -48,7 +48,7 @@ is_platform_change() {
|
|||
declare in_path
|
||||
in_path="${SOURCEDIR}"/"${1}"
|
||||
|
||||
for path in "${DOCKERFILE}" "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do
|
||||
for path in "${SOURCEDIR}"/dev-support/docker/Dockerfile* "${SOURCEDIR}"/dev-support/docker/pkg-resolver/*.json; do
|
||||
if [ "${in_path}" == "${path}" ]; then
|
||||
echo "Found C/C++ platform related changes in ${in_path}"
|
||||
return 0
|
||||
|
@ -114,47 +114,22 @@ function check_ci_run() {
|
|||
function run_ci() {
|
||||
TESTPATCHBIN="${WORKSPACE}/${YETUS}/precommit/src/main/shell/test-patch.sh"
|
||||
|
||||
if [[ "$IS_WINDOWS" && "$IS_WINDOWS" == 1 ]]; then
|
||||
echo "Building in a Windows environment, skipping some Yetus related settings"
|
||||
else
|
||||
# run in docker mode and specifically point to our
|
||||
# Dockerfile since we don't want to use the auto-pulled version.
|
||||
YETUS_ARGS+=("--docker")
|
||||
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
|
||||
YETUS_ARGS+=("--mvn-custom-repos")
|
||||
YETUS_ARGS+=("--dockermemlimit=22g")
|
||||
|
||||
# test with Java 8 and 11
|
||||
YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdktests=compile")
|
||||
# this must be clean for every run
|
||||
if [[ -d "${PATCHDIR}" ]]; then
|
||||
rm -rf "${PATCHDIR:?}"
|
||||
fi
|
||||
mkdir -p "${PATCHDIR}"
|
||||
|
||||
if [[ "$IS_NIGHTLY_BUILD" && "$IS_NIGHTLY_BUILD" == 1 ]]; then
|
||||
YETUS_ARGS+=("--empty-patch")
|
||||
YETUS_ARGS+=("--branch=${BRANCH_NAME}")
|
||||
else
|
||||
# this must be clean for every run
|
||||
if [[ -d "${PATCHDIR}" ]]; then
|
||||
rm -rf "${PATCHDIR:?}"
|
||||
fi
|
||||
mkdir -p "${PATCHDIR}"
|
||||
|
||||
# if given a JIRA issue, process it. If CHANGE_URL is set
|
||||
# (e.g., Github Branch Source plugin), process it.
|
||||
# otherwise exit, because we don't want Hadoop to do a
|
||||
# full build. We wouldn't normally do this check for smaller
|
||||
# projects. :)
|
||||
if [[ -n "${JIRA_ISSUE_KEY}" ]]; then
|
||||
YETUS_ARGS+=("${JIRA_ISSUE_KEY}")
|
||||
elif [[ -z "${CHANGE_URL}" ]]; then
|
||||
echo "Full build skipped" >"${PATCHDIR}/report.html"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# write Yetus report as GitHub comment (YETUS-1102)
|
||||
YETUS_ARGS+=("--github-write-comment")
|
||||
YETUS_ARGS+=("--github-use-emoji-vote")
|
||||
# if given a JIRA issue, process it. If CHANGE_URL is set
|
||||
# (e.g., Github Branch Source plugin), process it.
|
||||
# otherwise exit, because we don't want Hadoop to do a
|
||||
# full build. We wouldn't normally do this check for smaller
|
||||
# projects. :)
|
||||
if [[ -n "${JIRA_ISSUE_KEY}" ]]; then
|
||||
YETUS_ARGS+=("${JIRA_ISSUE_KEY}")
|
||||
elif [[ -z "${CHANGE_URL}" ]]; then
|
||||
echo "Full build skipped" >"${PATCHDIR}/report.html"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
YETUS_ARGS+=("--patch-dir=${PATCHDIR}")
|
||||
|
@ -181,6 +156,7 @@ function run_ci() {
|
|||
# changing these to higher values may cause problems
|
||||
# with other jobs on systemd-enabled machines
|
||||
YETUS_ARGS+=("--proclimit=5500")
|
||||
YETUS_ARGS+=("--dockermemlimit=22g")
|
||||
|
||||
# -1 spotbugs issues that show up prior to the patch being applied
|
||||
YETUS_ARGS+=("--spotbugs-strict-precheck")
|
||||
|
@ -199,15 +175,30 @@ function run_ci() {
|
|||
# much attention to them
|
||||
YETUS_ARGS+=("--tests-filter=checkstyle")
|
||||
|
||||
# run in docker mode and specifically point to our
|
||||
# Dockerfile since we don't want to use the auto-pulled version.
|
||||
YETUS_ARGS+=("--docker")
|
||||
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
|
||||
YETUS_ARGS+=("--mvn-custom-repos")
|
||||
|
||||
# effectively treat dev-suport as a custom maven module
|
||||
YETUS_ARGS+=("--skip-dirs=dev-support")
|
||||
|
||||
# help keep the ASF boxes clean
|
||||
YETUS_ARGS+=("--sentinel")
|
||||
|
||||
# test with Java 8 and 11
|
||||
YETUS_ARGS+=("--java-home=/usr/lib/jvm/java-8-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdkdirs=/usr/lib/jvm/java-11-openjdk-amd64")
|
||||
YETUS_ARGS+=("--multijdktests=compile")
|
||||
|
||||
# custom javadoc goals
|
||||
YETUS_ARGS+=("--mvn-javadoc-goals=process-sources,javadoc:javadoc-no-fork")
|
||||
|
||||
# write Yetus report as GitHub comment (YETUS-1102)
|
||||
YETUS_ARGS+=("--github-write-comment")
|
||||
YETUS_ARGS+=("--github-use-emoji-vote")
|
||||
|
||||
"${TESTPATCHBIN}" "${YETUS_ARGS[@]}"
|
||||
}
|
||||
|
||||
|
|
|
@ -98,6 +98,13 @@
|
|||
<createSourcesJar>true</createSourcesJar>
|
||||
<shadeSourcesContent>true</shadeSourcesContent>
|
||||
</configuration>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
|
@ -247,7 +254,8 @@
|
|||
</relocation>
|
||||
</relocations>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<!-- Needed until MSHADE-182 -->
|
||||
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
|
||||
<resource>NOTICE.txt</resource>
|
||||
|
|
|
@ -671,6 +671,13 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
|
@ -1045,7 +1052,8 @@
|
|||
</relocation>
|
||||
</relocations>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<!-- Needed until MSHADE-182 -->
|
||||
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
|
||||
<resources>
|
||||
|
|
|
@ -128,6 +128,13 @@
|
|||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-shade-plugin</artifactId>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>package</phase>
|
||||
|
@ -148,7 +155,6 @@
|
|||
<!-- Leave javax APIs that are stable -->
|
||||
<!-- the jdk ships part of the javax.annotation namespace, so if we want to relocate this we'll have to care it out by class :( -->
|
||||
<exclude>com.google.code.findbugs:jsr305</exclude>
|
||||
<exclude>io.netty:*</exclude>
|
||||
<exclude>io.dropwizard.metrics:metrics-core</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-servlet</exclude>
|
||||
<exclude>org.eclipse.jetty:jetty-security</exclude>
|
||||
|
@ -157,8 +163,6 @@
|
|||
<exclude>org.bouncycastle:*</exclude>
|
||||
<!-- Leave snappy that includes native methods which cannot be relocated. -->
|
||||
<exclude>org.xerial.snappy:*</exclude>
|
||||
<!-- leave out kotlin classes -->
|
||||
<exclude>org.jetbrains.kotlin:*</exclude>
|
||||
</excludes>
|
||||
</artifactSet>
|
||||
<filters>
|
||||
|
@ -393,7 +397,8 @@
|
|||
-->
|
||||
</relocations>
|
||||
<transformers>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ServicesResourceTransformer"/>
|
||||
<!-- Needed until MSHADE-182 -->
|
||||
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
|
||||
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
|
||||
<resources>
|
||||
|
|
|
@ -69,10 +69,6 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
|
@ -186,10 +182,6 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
@ -241,10 +233,6 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-servlet</artifactId>
|
||||
|
@ -302,10 +290,6 @@
|
|||
<groupId>com.github.pjfanning</groupId>
|
||||
<artifactId>jersey-json</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</exclusion>
|
||||
<exclusion>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty</artifactId>
|
||||
|
|
|
@ -127,6 +127,11 @@
|
|||
<artifactId>hadoop-azure-datalake</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-openstack</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-cos</artifactId>
|
||||
|
|
|
@ -110,8 +110,20 @@
|
|||
<groupId>org.bouncycastle</groupId>
|
||||
<artifactId>bcprov-jdk15on</artifactId>
|
||||
</exclusion>
|
||||
<!-- HACK. Transitive dependency for nimbus-jose-jwt. Needed for
|
||||
packaging. Please re-check this version when updating
|
||||
nimbus-jose-jwt. Please read HADOOP-14903 for more details.
|
||||
-->
|
||||
<exclusion>
|
||||
<groupId>net.minidev</groupId>
|
||||
<artifactId>json-smart</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.minidev</groupId>
|
||||
<artifactId>json-smart</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
<artifactId>zookeeper</artifactId>
|
||||
|
|
|
@ -18,10 +18,6 @@
|
|||
|
||||
package org.apache.hadoop.util;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.Arrays;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
|
@ -37,71 +33,21 @@ public class PlatformName {
|
|||
* per the java-vm.
|
||||
*/
|
||||
public static final String PLATFORM_NAME =
|
||||
(System.getProperty("os.name").startsWith("Windows") ?
|
||||
System.getenv("os") : System.getProperty("os.name"))
|
||||
+ "-" + System.getProperty("os.arch") + "-"
|
||||
+ System.getProperty("sun.arch.data.model");
|
||||
(System.getProperty("os.name").startsWith("Windows")
|
||||
? System.getenv("os") : System.getProperty("os.name"))
|
||||
+ "-" + System.getProperty("os.arch")
|
||||
+ "-" + System.getProperty("sun.arch.data.model");
|
||||
|
||||
/**
|
||||
* The java vendor name used in this platform.
|
||||
*/
|
||||
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
|
||||
|
||||
/**
|
||||
* Define a system class accessor that is open to changes in underlying implementations
|
||||
* of the system class loader modules.
|
||||
*/
|
||||
private static final class SystemClassAccessor extends ClassLoader {
|
||||
public Class<?> getSystemClass(String className) throws ClassNotFoundException {
|
||||
return findSystemClass(className);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A public static variable to indicate the current java vendor is
|
||||
* IBM and the type is Java Technology Edition which provides its
|
||||
* own implementations of many security packages and Cipher suites.
|
||||
* Note that these are not provided in Semeru runtimes:
|
||||
* See https://developer.ibm.com/languages/java/semeru-runtimes for details.
|
||||
* IBM java or not.
|
||||
*/
|
||||
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM") &&
|
||||
hasIbmTechnologyEditionModules();
|
||||
|
||||
private static boolean hasIbmTechnologyEditionModules() {
|
||||
return Arrays.asList(
|
||||
"com.ibm.security.auth.module.JAASLoginModule",
|
||||
"com.ibm.security.auth.module.Win64LoginModule",
|
||||
"com.ibm.security.auth.module.NTLoginModule",
|
||||
"com.ibm.security.auth.module.AIX64LoginModule",
|
||||
"com.ibm.security.auth.module.LinuxLoginModule",
|
||||
"com.ibm.security.auth.module.Krb5LoginModule"
|
||||
).stream().anyMatch((module) -> isSystemClassAvailable(module));
|
||||
}
|
||||
|
||||
/**
|
||||
* In rare cases where different behaviour is performed based on the JVM vendor
|
||||
* this method should be used to test for a unique JVM class provided by the
|
||||
* vendor rather than using the vendor method. For example if on JVM provides a
|
||||
* different Kerberos login module testing for that login module being loadable
|
||||
* before configuring to use it is preferable to using the vendor data.
|
||||
*
|
||||
* @param className the name of a class in the JVM to test for
|
||||
* @return true if the class is available, false otherwise.
|
||||
*/
|
||||
private static boolean isSystemClassAvailable(String className) {
|
||||
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> {
|
||||
try {
|
||||
// Using ClassLoader.findSystemClass() instead of
|
||||
// Class.forName(className, false, null) because Class.forName with a null
|
||||
// ClassLoader only looks at the boot ClassLoader with Java 9 and above
|
||||
// which doesn't look at all the modules available to the findSystemClass.
|
||||
new SystemClassAccessor().getSystemClass(className);
|
||||
return true;
|
||||
} catch (Exception ignored) {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
}
|
||||
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
|
||||
|
||||
public static void main(String[] args) {
|
||||
System.out.println(PLATFORM_NAME);
|
||||
|
|
|
@ -24,7 +24,7 @@ This filter must be configured in front of all the web application resources tha
|
|||
|
||||
The Hadoop Auth and dependent JAR files must be in the web application classpath (commonly the `WEB-INF/lib` directory).
|
||||
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part of the web application classpath as well as the Log4j configuration file.
|
||||
Hadoop Auth uses SLF4J-API for logging. Auth Maven POM dependencies define the SLF4J API dependency but it does not define the dependency on a concrete logging implementation, this must be addded explicitly to the web application. For example, if the web applicationan uses Log4j, the SLF4J-LOG4J12 and LOG4J jar files must be part part of the web application classpath as well as the Log4j configuration file.
|
||||
|
||||
### Common Configuration parameters
|
||||
|
||||
|
|
|
@ -379,6 +379,21 @@
|
|||
<Bug code="JLM" />
|
||||
</Match>
|
||||
|
||||
<!--
|
||||
OpenStack Swift FS module -closes streams in a different method
|
||||
from where they are opened.
|
||||
-->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
|
||||
<Method name="uploadFileAttempt"/>
|
||||
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
|
||||
</Match>
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.fs.swift.snative.SwiftNativeOutputStream"/>
|
||||
<Method name="uploadFilePartAttempt"/>
|
||||
<Bug pattern="OBL_UNSATISFIED_OBLIGATION"/>
|
||||
</Match>
|
||||
|
||||
<!-- code from maven source, null value is checked at callee side. -->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.util.ComparableVersion$ListItem" />
|
||||
|
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -175,19 +175,16 @@
|
|||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<!--
|
||||
adding jettison as direct dependency (as jersey-json's jettison dependency is vulnerable with verison 1.1),
|
||||
so those who depends on hadoop-common externally will get the non-vulnerable jettison
|
||||
-->
|
||||
<groupId>org.codehaus.jettison</groupId>
|
||||
<artifactId>jettison</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<scope>compile</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>log4j</groupId>
|
||||
<artifactId>log4j</artifactId>
|
||||
|
@ -203,6 +200,11 @@
|
|||
<artifactId>assertj-core</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.glassfish.grizzly</groupId>
|
||||
<artifactId>grizzly-http-servlet</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-beanutils</groupId>
|
||||
<artifactId>commons-beanutils</artifactId>
|
||||
|
@ -340,14 +342,6 @@
|
|||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-handler</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-transport-native-epoll</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.dropwizard.metrics</groupId>
|
||||
<artifactId>metrics-core</artifactId>
|
||||
|
@ -389,11 +383,6 @@
|
|||
<artifactId>mockwebserver</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.squareup.okio</groupId>
|
||||
<artifactId>okio-jvm</artifactId>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>dnsjava</groupId>
|
||||
<artifactId>dnsjava</artifactId>
|
||||
|
@ -660,10 +649,9 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<executable>${shell-executable}</executable>
|
||||
<executable>${basedir}/../../dev-support/bin/shelldocs</executable>
|
||||
<workingDirectory>src/site/markdown</workingDirectory>
|
||||
<arguments>
|
||||
<argument>${basedir}/../../dev-support/bin/shelldocs</argument>
|
||||
<argument>--skipprnorep</argument>
|
||||
<argument>--output</argument>
|
||||
<argument>${basedir}/src/site/markdown/UnixShellAPI.md</argument>
|
||||
|
@ -853,36 +841,6 @@
|
|||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<!--Sets the skip.platformToolsetDetection to true if use.platformToolsetVersion is specified.
|
||||
This implies that the automatic detection of which platform toolset to use will be skipped
|
||||
and the one specified with use.platformToolsetVersion will be used.-->
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-antrun-plugin</artifactId>
|
||||
<version>1.8</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<phase>validate</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<exportAntProperties>true</exportAntProperties>
|
||||
<target>
|
||||
<condition property="skip.platformToolsetDetection" value="true" else="false">
|
||||
<isset property="use.platformToolsetVersion"/>
|
||||
</condition>
|
||||
<!--Unfortunately, Maven doesn't have a way to negate a flag, thus we declare a
|
||||
property which holds the negated value of skip.platformToolsetDetection.-->
|
||||
<condition property="skip.platformToolsetDetection.negated" value="false" else="true">
|
||||
<isset property="use.platformToolsetVersion"/>
|
||||
</condition>
|
||||
<echo>Skip platform toolset version detection = ${skip.platformToolsetDetection}</echo>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.codehaus.mojo</groupId>
|
||||
<artifactId>exec-maven-plugin</artifactId>
|
||||
|
@ -894,7 +852,6 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>${basedir}\..\..\dev-support\bin\win-vs-upgrade.cmd</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}\src\main\winutils</argument>
|
||||
|
@ -909,7 +866,6 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/winutils/winutils.sln</argument>
|
||||
|
@ -922,27 +878,6 @@
|
|||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>compile-ms-winutils-using-build-tools</id>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection.negated}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/winutils/winutils.sln</argument>
|
||||
<argument>/nologo</argument>
|
||||
<argument>/p:Configuration=Release</argument>
|
||||
<argument>/p:OutDir=${project.build.directory}/bin/</argument>
|
||||
<argument>/p:IntermediateOutputPath=${project.build.directory}/winutils/</argument>
|
||||
<argument>/p:WsceConfigDir=${wsce.config.dir}</argument>
|
||||
<argument>/p:WsceConfigFile=${wsce.config.file}</argument>
|
||||
<argument>/p:PlatformToolset=${use.platformToolsetVersion}</argument>
|
||||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>convert-ms-native-dll</id>
|
||||
<phase>generate-sources</phase>
|
||||
|
@ -950,7 +885,6 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>${basedir}\..\..\dev-support\bin\win-vs-upgrade.cmd</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}\src\main\native</argument>
|
||||
|
@ -965,7 +899,6 @@
|
|||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/native/native.sln</argument>
|
||||
|
@ -986,35 +919,6 @@
|
|||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>compile-ms-native-dll-using-build-tools</id>
|
||||
<phase>compile</phase>
|
||||
<goals>
|
||||
<goal>exec</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>${skip.platformToolsetDetection.negated}</skip>
|
||||
<executable>msbuild</executable>
|
||||
<arguments>
|
||||
<argument>${basedir}/src/main/native/native.sln</argument>
|
||||
<argument>/nologo</argument>
|
||||
<argument>/p:Configuration=Release</argument>
|
||||
<argument>/p:OutDir=${project.build.directory}/bin/</argument>
|
||||
<argument>/p:CustomZstdPrefix=${zstd.prefix}</argument>
|
||||
<argument>/p:CustomZstdLib=${zstd.lib}</argument>
|
||||
<argument>/p:CustomZstdInclude=${zstd.include}</argument>
|
||||
<argument>/p:RequireZstd=${require.zstd}</argument>
|
||||
<argument>/p:CustomOpensslPrefix=${openssl.prefix}</argument>
|
||||
<argument>/p:CustomOpensslLib=${openssl.lib}</argument>
|
||||
<argument>/p:CustomOpensslInclude=${openssl.include}</argument>
|
||||
<argument>/p:RequireOpenssl=${require.openssl}</argument>
|
||||
<argument>/p:RequireIsal=${require.isal}</argument>
|
||||
<argument>/p:CustomIsalPrefix=${isal.prefix}</argument>
|
||||
<argument>/p:CustomIsalLib=${isal.lib}</argument>
|
||||
<argument>/p:PlatformToolset=${use.platformToolsetVersion}</argument>
|
||||
</arguments>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
@ -1247,7 +1151,7 @@
|
|||
<id>src-test-compile-protoc-legacy</id>
|
||||
<phase>generate-test-sources</phase>
|
||||
<goals>
|
||||
<goal>test-compile</goal>
|
||||
<goal>compile</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<skip>false</skip>
|
||||
|
@ -1256,7 +1160,7 @@
|
|||
com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier}
|
||||
</protocArtifact>
|
||||
<includeDependenciesInDescriptorSet>false</includeDependenciesInDescriptorSet>
|
||||
<protoTestSourceRoot>${basedir}/src/test/proto</protoTestSourceRoot>
|
||||
<protoSourceRoot>${basedir}/src/test/proto</protoSourceRoot>
|
||||
<outputDirectory>${project.build.directory}/generated-test-sources/java</outputDirectory>
|
||||
<clearOutputDirectory>false</clearOutputDirectory>
|
||||
<includes>
|
||||
|
|
|
@ -26,9 +26,9 @@ MYNAME="${BASH_SOURCE-$0}"
|
|||
function hadoop_usage
|
||||
{
|
||||
hadoop_add_option "buildpaths" "attempt to add class files from build tree"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in worker mode"
|
||||
hadoop_add_option "hostnames list[,of,host,names]" "hosts to use in slave mode"
|
||||
hadoop_add_option "loglevel level" "set the log4j level for this command"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in worker mode"
|
||||
hadoop_add_option "hosts filename" "list of hosts to use in slave mode"
|
||||
hadoop_add_option "workers" "turn on worker mode"
|
||||
|
||||
hadoop_add_subcommand "checknative" client "check native Hadoop and compression libraries availability"
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
|
||||
# Run a Hadoop command on all worker hosts.
|
||||
# Run a Hadoop command on all slave hosts.
|
||||
|
||||
function hadoop_usage
|
||||
{
|
||||
|
|
|
@ -53,10 +53,6 @@
|
|||
# variable is REQUIRED on ALL platforms except OS X!
|
||||
# export JAVA_HOME=
|
||||
|
||||
# The language environment in which Hadoop runs. Use the English
|
||||
# environment to ensure that logs are printed as expected.
|
||||
export LANG=en_US.UTF-8
|
||||
|
||||
# Location of Hadoop. By default, Hadoop will attempt to determine
|
||||
# this location based upon its execution path.
|
||||
# export HADOOP_HOME=
|
||||
|
|
|
@ -75,6 +75,14 @@ log4j.appender.console.target=System.err
|
|||
log4j.appender.console.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
|
||||
|
||||
#
|
||||
# TaskLog Appender
|
||||
#
|
||||
log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
|
||||
|
||||
log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
|
||||
|
||||
#
|
||||
# HDFS block state change log from block manager
|
||||
#
|
||||
|
|
|
@ -98,7 +98,7 @@ public class ConfServlet extends HttpServlet {
|
|||
if (FORMAT_JSON.equals(format)) {
|
||||
Configuration.dumpConfiguration(conf, propertyName, out);
|
||||
} else if (FORMAT_XML.equals(format)) {
|
||||
conf.writeXml(propertyName, out, conf);
|
||||
conf.writeXml(propertyName, out);
|
||||
} else {
|
||||
throw new BadFormatException("Bad format: " + format);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ import org.apache.hadoop.util.StringUtils;
|
|||
public class ConfigRedactor {
|
||||
|
||||
private static final String REDACTED_TEXT = "<redacted>";
|
||||
private static final String REDACTED_XML = "******";
|
||||
|
||||
private List<Pattern> compiledPatterns;
|
||||
|
||||
|
@ -85,19 +84,4 @@ public class ConfigRedactor {
|
|||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a key / value pair, decides whether or not to redact and returns
|
||||
* either the original value or text indicating it has been redacted.
|
||||
*
|
||||
* @param key param key.
|
||||
* @param value param value, will return if conditions permit.
|
||||
* @return Original value, or text indicating it has been redacted
|
||||
*/
|
||||
public String redactXml(String key, String value) {
|
||||
if (configIsSensitive(key)) {
|
||||
return REDACTED_XML;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.ctc.wstx.io.SystemId;
|
|||
import com.ctc.wstx.stax.WstxInputFactory;
|
||||
import com.fasterxml.jackson.core.JsonFactory;
|
||||
import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.DataInput;
|
||||
|
@ -86,7 +87,6 @@ import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
|||
import org.apache.commons.collections.map.UnmodifiableMap;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -98,19 +98,18 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.alias.CredentialProvider;
|
||||
import org.apache.hadoop.security.alias.CredentialProvider.CredentialEntry;
|
||||
import org.apache.hadoop.security.alias.CredentialProviderFactory;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringInterner;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.XMLUtils;
|
||||
|
||||
import org.codehaus.stax2.XMLStreamReader2;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.w3c.dom.Document;
|
||||
import org.w3c.dom.Element;
|
||||
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||
|
||||
import static org.apache.commons.lang3.StringUtils.isBlank;
|
||||
import static org.apache.commons.lang3.StringUtils.isNotBlank;
|
||||
|
||||
|
@ -3594,18 +3593,16 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* </ul>
|
||||
* @param propertyName xml property name.
|
||||
* @param out the writer to write to.
|
||||
* @param config configuration.
|
||||
* @throws IOException raised on errors performing I/O.
|
||||
*/
|
||||
public void writeXml(@Nullable String propertyName, Writer out, Configuration config)
|
||||
public void writeXml(@Nullable String propertyName, Writer out)
|
||||
throws IOException, IllegalArgumentException {
|
||||
ConfigRedactor redactor = config != null ? new ConfigRedactor(this) : null;
|
||||
Document doc = asXmlDocument(propertyName, redactor);
|
||||
Document doc = asXmlDocument(propertyName);
|
||||
|
||||
try {
|
||||
DOMSource source = new DOMSource(doc);
|
||||
StreamResult result = new StreamResult(out);
|
||||
TransformerFactory transFactory = XMLUtils.newSecureTransformerFactory();
|
||||
TransformerFactory transFactory = TransformerFactory.newInstance();
|
||||
Transformer transformer = transFactory.newTransformer();
|
||||
|
||||
// Important to not hold Configuration log while writing result, since
|
||||
|
@ -3617,16 +3614,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
}
|
||||
}
|
||||
|
||||
public void writeXml(@Nullable String propertyName, Writer out)
|
||||
throws IOException, IllegalArgumentException {
|
||||
writeXml(propertyName, out, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the XML DOM corresponding to this Configuration.
|
||||
*/
|
||||
private synchronized Document asXmlDocument(@Nullable String propertyName,
|
||||
ConfigRedactor redactor) throws IOException, IllegalArgumentException {
|
||||
private synchronized Document asXmlDocument(@Nullable String propertyName)
|
||||
throws IOException, IllegalArgumentException {
|
||||
Document doc;
|
||||
try {
|
||||
doc = DocumentBuilderFactory
|
||||
|
@ -3649,13 +3641,13 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
propertyName + " not found");
|
||||
} else {
|
||||
// given property is found, write single property
|
||||
appendXMLProperty(doc, conf, propertyName, redactor);
|
||||
appendXMLProperty(doc, conf, propertyName);
|
||||
conf.appendChild(doc.createTextNode("\n"));
|
||||
}
|
||||
} else {
|
||||
// append all elements
|
||||
for (Enumeration<Object> e = properties.keys(); e.hasMoreElements();) {
|
||||
appendXMLProperty(doc, conf, (String)e.nextElement(), redactor);
|
||||
appendXMLProperty(doc, conf, (String)e.nextElement());
|
||||
conf.appendChild(doc.createTextNode("\n"));
|
||||
}
|
||||
}
|
||||
|
@ -3671,7 +3663,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
* @param propertyName
|
||||
*/
|
||||
private synchronized void appendXMLProperty(Document doc, Element conf,
|
||||
String propertyName, ConfigRedactor redactor) {
|
||||
String propertyName) {
|
||||
// skip writing if given property name is empty or null
|
||||
if (!Strings.isNullOrEmpty(propertyName)) {
|
||||
String value = properties.getProperty(propertyName);
|
||||
|
@ -3684,11 +3676,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
propNode.appendChild(nameNode);
|
||||
|
||||
Element valueNode = doc.createElement("value");
|
||||
String propertyValue = properties.getProperty(propertyName);
|
||||
if (redactor != null) {
|
||||
propertyValue = redactor.redactXml(propertyName, propertyValue);
|
||||
}
|
||||
valueNode.appendChild(doc.createTextNode(propertyValue));
|
||||
valueNode.appendChild(doc.createTextNode(
|
||||
properties.getProperty(propertyName)));
|
||||
propNode.appendChild(valueNode);
|
||||
|
||||
Element finalNode = doc.createElement("final");
|
||||
|
|
|
@ -241,15 +241,12 @@ public class CryptoOutputStream extends FilterOutputStream implements
|
|||
return;
|
||||
}
|
||||
try {
|
||||
try {
|
||||
flush();
|
||||
} finally {
|
||||
if (closeOutputStream) {
|
||||
super.close();
|
||||
codec.close();
|
||||
}
|
||||
freeBuffers();
|
||||
flush();
|
||||
if (closeOutputStream) {
|
||||
super.close();
|
||||
codec.close();
|
||||
}
|
||||
freeBuffers();
|
||||
} finally {
|
||||
closed = true;
|
||||
}
|
||||
|
|
|
@ -639,14 +639,13 @@ public abstract class KeyProvider implements Closeable {
|
|||
public abstract void flush() throws IOException;
|
||||
|
||||
/**
|
||||
* Split the versionName in to a base name. Converts "/aaa/bbb@3" to
|
||||
* Split the versionName in to a base name. Converts "/aaa/bbb/3" to
|
||||
* "/aaa/bbb".
|
||||
* @param versionName the version name to split
|
||||
* @return the base name of the key
|
||||
* @throws IOException raised on errors performing I/O.
|
||||
*/
|
||||
public static String getBaseName(String versionName) throws IOException {
|
||||
Objects.requireNonNull(versionName, "VersionName cannot be null");
|
||||
int div = versionName.lastIndexOf('@');
|
||||
if (div == -1) {
|
||||
throw new IOException("No version in key path " + versionName);
|
||||
|
|
|
@ -60,6 +60,7 @@ public class AvroFSInput implements Closeable, SeekableInput {
|
|||
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
|
||||
.withFileStatus(status)
|
||||
.build());
|
||||
fc.open(p);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -174,7 +174,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
private static final int HEADER_LENGTH = 8;
|
||||
|
||||
private int bytesPerSum = 1;
|
||||
private long fileLen = -1L;
|
||||
|
||||
public ChecksumFSInputChecker(ChecksumFileSystem fs, Path file)
|
||||
throws IOException {
|
||||
|
@ -321,18 +320,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
return HEADER_LENGTH + (dataOffset/bytesPerSum) * FSInputChecker.CHECKSUM_SIZE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate length of file if not already cached.
|
||||
* @return file length.
|
||||
* @throws IOException any IOE.
|
||||
*/
|
||||
private long getFileLength() throws IOException {
|
||||
if (fileLen == -1L) {
|
||||
fileLen = fs.getFileStatus(file).getLen();
|
||||
}
|
||||
return fileLen;
|
||||
}
|
||||
|
||||
/**
|
||||
* Find the checksum ranges that correspond to the given data ranges.
|
||||
* @param dataRanges the input data ranges, which are assumed to be sorted
|
||||
|
@ -384,28 +371,13 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
IntBuffer sums = sumsBytes.asIntBuffer();
|
||||
sums.position(offset / FSInputChecker.CHECKSUM_SIZE);
|
||||
ByteBuffer current = data.duplicate();
|
||||
int numFullChunks = data.remaining() / bytesPerSum;
|
||||
boolean partialChunk = ((data.remaining() % bytesPerSum) != 0);
|
||||
int totalChunks = numFullChunks;
|
||||
if (partialChunk) {
|
||||
totalChunks++;
|
||||
}
|
||||
int numChunks = data.remaining() / bytesPerSum;
|
||||
CRC32 crc = new CRC32();
|
||||
// check each chunk to ensure they match
|
||||
for(int c = 0; c < totalChunks; ++c) {
|
||||
// set the buffer position to the start of every chunk.
|
||||
for(int c = 0; c < numChunks; ++c) {
|
||||
// set the buffer position and the limit
|
||||
current.limit((c + 1) * bytesPerSum);
|
||||
current.position(c * bytesPerSum);
|
||||
|
||||
if (c == numFullChunks) {
|
||||
// During last chunk, there may be less than chunk size
|
||||
// data preset, so setting the limit accordingly.
|
||||
int lastIncompleteChunk = data.remaining() % bytesPerSum;
|
||||
current.limit((c * bytesPerSum) + lastIncompleteChunk);
|
||||
} else {
|
||||
// set the buffer limit to end of every chunk.
|
||||
current.limit((c + 1) * bytesPerSum);
|
||||
}
|
||||
|
||||
// compute the crc
|
||||
crc.reset();
|
||||
crc.update(current);
|
||||
|
@ -424,34 +396,11 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates range parameters.
|
||||
* In case of CheckSum FS, we already have calculated
|
||||
* fileLength so failing fast here.
|
||||
* @param ranges requested ranges.
|
||||
* @param fileLength length of file.
|
||||
* @throws EOFException end of file exception.
|
||||
*/
|
||||
private void validateRangeRequest(List<? extends FileRange> ranges,
|
||||
final long fileLength) throws EOFException {
|
||||
for (FileRange range : ranges) {
|
||||
VectoredReadUtils.validateRangeRequest(range);
|
||||
if (range.getOffset() + range.getLength() > fileLength) {
|
||||
final String errMsg = String.format("Requested range [%d, %d) is beyond EOF for path %s",
|
||||
range.getOffset(), range.getLength(), file);
|
||||
LOG.warn(errMsg);
|
||||
throw new EOFException(errMsg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readVectored(List<? extends FileRange> ranges,
|
||||
IntFunction<ByteBuffer> allocate) throws IOException {
|
||||
final long length = getFileLength();
|
||||
validateRangeRequest(ranges, length);
|
||||
|
||||
// If the stream doesn't have checksums, just delegate.
|
||||
VectoredReadUtils.validateVectoredReadRanges(ranges);
|
||||
if (sums == null) {
|
||||
datas.readVectored(ranges, allocate);
|
||||
return;
|
||||
|
@ -461,18 +410,15 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
|
|||
List<CombinedFileRange> dataRanges =
|
||||
VectoredReadUtils.mergeSortedRanges(Arrays.asList(sortRanges(ranges)), bytesPerSum,
|
||||
minSeek, maxReadSizeForVectorReads());
|
||||
// While merging the ranges above, they are rounded up based on the value of bytesPerSum
|
||||
// which leads to some ranges crossing the EOF thus they need to be fixed else it will
|
||||
// cause EOFException during actual reads.
|
||||
for (CombinedFileRange range : dataRanges) {
|
||||
if (range.getOffset() + range.getLength() > length) {
|
||||
range.setLength((int) (length - range.getOffset()));
|
||||
}
|
||||
}
|
||||
List<CombinedFileRange> checksumRanges = findChecksumRanges(dataRanges,
|
||||
bytesPerSum, minSeek, maxSize);
|
||||
sums.readVectored(checksumRanges, allocate);
|
||||
datas.readVectored(dataRanges, allocate);
|
||||
// Data read is correct. I have verified content of dataRanges.
|
||||
// There is some bug below here as test (testVectoredReadMultipleRanges)
|
||||
// is failing, should be
|
||||
// somewhere while slicing the merged data into smaller user ranges.
|
||||
// Spend some time figuring out but it is a complex code.
|
||||
for(CombinedFileRange checksumRange: checksumRanges) {
|
||||
for(FileRange dataRange: checksumRange.getUnderlying()) {
|
||||
// when we have both the ranges, validate the checksum
|
||||
|
|
|
@ -417,14 +417,6 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
/** How often to retry a ZooKeeper operation in milliseconds. */
|
||||
public static final String ZK_RETRY_INTERVAL_MS =
|
||||
ZK_PREFIX + "retry-interval-ms";
|
||||
/** Keystore location for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_KEYSTORE_LOCATION = ZK_PREFIX + "ssl.keystore.location";
|
||||
/** Keystore password for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_KEYSTORE_PASSWORD = ZK_PREFIX + "ssl.keystore.password";
|
||||
/** Truststore location for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_TRUSTSTORE_LOCATION = ZK_PREFIX + "ssl.truststore.location";
|
||||
/** Truststore password for ZooKeeper client connection over SSL. */
|
||||
public static final String ZK_SSL_TRUSTSTORE_PASSWORD = ZK_PREFIX + "ssl.truststore.password";
|
||||
public static final int ZK_RETRY_INTERVAL_MS_DEFAULT = 1000;
|
||||
/** Default domain name resolver for hadoop to use. */
|
||||
public static final String HADOOP_DOMAINNAME_RESOLVER_IMPL =
|
||||
|
@ -488,16 +480,13 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
* Thread-level IOStats Support.
|
||||
* {@value}
|
||||
*/
|
||||
public static final String IOSTATISTICS_THREAD_LEVEL_ENABLED =
|
||||
"fs.iostatistics.thread.level.enabled";
|
||||
public static final String THREAD_LEVEL_IOSTATISTICS_ENABLED =
|
||||
"fs.thread.level.iostatistics.enabled";
|
||||
|
||||
/**
|
||||
* Default value for Thread-level IOStats Support is true.
|
||||
*/
|
||||
public static final boolean IOSTATISTICS_THREAD_LEVEL_ENABLED_DEFAULT =
|
||||
public static final boolean THREAD_LEVEL_IOSTATISTICS_ENABLED_DEFAULT =
|
||||
true;
|
||||
|
||||
public static final String HADOOP_SECURITY_RESOLVER_IMPL =
|
||||
"hadoop.security.resolver.impl";
|
||||
|
||||
}
|
||||
|
|
|
@ -1000,7 +1000,6 @@ public class CommonConfigurationKeysPublic {
|
|||
String.join(",",
|
||||
"secret$",
|
||||
"password$",
|
||||
"username$",
|
||||
"ssl.keystore.pass$",
|
||||
"fs.s3.*[Ss]ecret.?[Kk]ey",
|
||||
"fs.s3a.*.server-side-encryption.key",
|
||||
|
|
|
@ -163,11 +163,5 @@ public final class CommonPathCapabilities {
|
|||
public static final String ETAGS_PRESERVED_IN_RENAME =
|
||||
"fs.capability.etags.preserved.in.rename";
|
||||
|
||||
/**
|
||||
* Does this Filesystem support lease recovery operations such as
|
||||
* {@link LeaseRecoverable#recoverLease(Path)} and {@link LeaseRecoverable#isFileClosed(Path)}}?
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String LEASE_RECOVERABLE = "fs.capability.lease.recoverable";
|
||||
|
||||
}
|
||||
|
|
|
@ -256,8 +256,9 @@ public class DelegationTokenRenewer
|
|||
try {
|
||||
action.cancel();
|
||||
} catch (InterruptedException ie) {
|
||||
LOG.error("Interrupted while canceling token for {} filesystem.", fs.getUri());
|
||||
LOG.debug("Exception in removeRenewAction.", ie);
|
||||
LOG.error("Interrupted while canceling token for " + fs.getUri()
|
||||
+ "filesystem");
|
||||
LOG.debug("Exception in removeRenewAction: {}", ie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,34 +28,6 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
* The base interface which various FileSystem FileContext Builder
|
||||
* interfaces can extend, and which underlying implementations
|
||||
* will then implement.
|
||||
* <p>
|
||||
* HADOOP-16202 expanded the opt() and must() arguments with
|
||||
* operator overloading, but HADOOP-18724 identified mapping problems:
|
||||
* passing a long value in to {@code opt()} could end up invoking
|
||||
* {@code opt(string, double)}, which could then trigger parse failures.
|
||||
* <p>
|
||||
* To fix this without forcing existing code to break/be recompiled.
|
||||
* <ol>
|
||||
* <li>A new method to explicitly set a long value is added:
|
||||
* {@link #optLong(String, long)}
|
||||
* </li>
|
||||
* <li>A new method to explicitly set a double value is added:
|
||||
* {@link #optLong(String, long)}
|
||||
* </li>
|
||||
* <li>
|
||||
* All of {@link #opt(String, long)}, {@link #opt(String, float)} and
|
||||
* {@link #opt(String, double)} invoke {@link #optLong(String, long)}.
|
||||
* </li>
|
||||
* <li>
|
||||
* The same changes have been applied to {@code must()} methods.
|
||||
* </li>
|
||||
* </ol>
|
||||
* The forwarding of existing double/float setters to the long setters ensure
|
||||
* that existing code will link, but are guaranteed to always set a long value.
|
||||
* If you need to write code which works correctly with all hadoop releases,
|
||||
* covert the option to a string explicitly and then call {@link #opt(String, String)}
|
||||
* or {@link #must(String, String)} as appropriate.
|
||||
*
|
||||
* @param <S> Return type on the {@link #build()} call.
|
||||
* @param <B> type of builder itself.
|
||||
*/
|
||||
|
@ -78,9 +50,7 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B opt(@Nonnull String key, boolean value) {
|
||||
return opt(key, Boolean.toString(value));
|
||||
}
|
||||
B opt(@Nonnull String key, boolean value);
|
||||
|
||||
/**
|
||||
* Set optional int parameter for the Builder.
|
||||
|
@ -90,25 +60,17 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B opt(@Nonnull String key, int value) {
|
||||
return optLong(key, value);
|
||||
}
|
||||
B opt(@Nonnull String key, int value);
|
||||
|
||||
/**
|
||||
* This parameter is converted to a long and passed
|
||||
* to {@link #optLong(String, long)} -all
|
||||
* decimal precision is lost.
|
||||
* Set optional float parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
* @deprecated use {@link #optDouble(String, double)}
|
||||
*/
|
||||
@Deprecated
|
||||
default B opt(@Nonnull String key, float value) {
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
B opt(@Nonnull String key, float value);
|
||||
|
||||
/**
|
||||
* Set optional long parameter for the Builder.
|
||||
|
@ -116,27 +78,19 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @deprecated use {@link #optLong(String, long)} where possible.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B opt(@Nonnull String key, long value) {
|
||||
return optLong(key, value);
|
||||
}
|
||||
B opt(@Nonnull String key, long value);
|
||||
|
||||
/**
|
||||
* Pass an optional double parameter for the Builder.
|
||||
* This parameter is converted to a long and passed
|
||||
* to {@link #optLong(String, long)} -all
|
||||
* decimal precision is lost.
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
* @deprecated use {@link #optDouble(String, double)}
|
||||
*/
|
||||
@Deprecated
|
||||
default B opt(@Nonnull String key, double value) {
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
B opt(@Nonnull String key, double value);
|
||||
|
||||
/**
|
||||
* Set an array of string values as optional parameter for the Builder.
|
||||
|
@ -148,30 +102,6 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
*/
|
||||
B opt(@Nonnull String key, @Nonnull String... values);
|
||||
|
||||
/**
|
||||
* Set optional long parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B optLong(@Nonnull String key, long value) {
|
||||
return opt(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B optDouble(@Nonnull String key, double value) {
|
||||
return opt(key, Double.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory option to the Builder.
|
||||
*
|
||||
|
@ -192,9 +122,7 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
default B must(@Nonnull String key, boolean value) {
|
||||
return must(key, Boolean.toString(value));
|
||||
}
|
||||
B must(@Nonnull String key, boolean value);
|
||||
|
||||
/**
|
||||
* Set mandatory int option.
|
||||
|
@ -204,24 +132,17 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
default B must(@Nonnull String key, int value) {
|
||||
return mustLong(key, value);
|
||||
}
|
||||
B must(@Nonnull String key, int value);
|
||||
|
||||
/**
|
||||
* This parameter is converted to a long and passed
|
||||
* to {@link #mustLong(String, long)} -all
|
||||
* decimal precision is lost.
|
||||
* Set mandatory float option.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @deprecated use {@link #mustDouble(String, double)} to set floating point.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Deprecated
|
||||
default B must(@Nonnull String key, float value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
B must(@Nonnull String key, float value);
|
||||
|
||||
/**
|
||||
* Set mandatory long option.
|
||||
|
@ -231,24 +152,17 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Deprecated
|
||||
default B must(@Nonnull String key, long value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
B must(@Nonnull String key, long value);
|
||||
|
||||
/**
|
||||
* Set mandatory long option, despite passing in a floating
|
||||
* point value.
|
||||
* Set mandatory double option.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Deprecated
|
||||
default B must(@Nonnull String key, double value) {
|
||||
return mustLong(key, (long) value);
|
||||
}
|
||||
B must(@Nonnull String key, double value);
|
||||
|
||||
/**
|
||||
* Set a string array as mandatory option.
|
||||
|
@ -260,30 +174,6 @@ public interface FSBuilder<S, B extends FSBuilder<S, B>> {
|
|||
*/
|
||||
B must(@Nonnull String key, @Nonnull String... values);
|
||||
|
||||
/**
|
||||
* Set mandatory long parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B mustLong(@Nonnull String key, long value) {
|
||||
return must(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory double parameter for the Builder.
|
||||
*
|
||||
* @param key key.
|
||||
* @param value value.
|
||||
* @return generic type B.
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
default B mustDouble(@Nonnull String key, double value) {
|
||||
return must(key, Double.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Instantiate the object which was being built.
|
||||
*
|
||||
|
|
|
@ -2231,7 +2231,7 @@ public class FileContext implements PathCapabilities {
|
|||
InputStream in = awaitFuture(openFile(qSrc)
|
||||
.opt(FS_OPTION_OPENFILE_READ_POLICY,
|
||||
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
|
||||
.optLong(FS_OPTION_OPENFILE_LENGTH,
|
||||
.opt(FS_OPTION_OPENFILE_LENGTH,
|
||||
fs.getLen()) // file length hint for object stores
|
||||
.build());
|
||||
try (OutputStream out = create(qDst, createFlag)) {
|
||||
|
|
|
@ -55,15 +55,6 @@ public interface FileRange {
|
|||
*/
|
||||
void setData(CompletableFuture<ByteBuffer> data);
|
||||
|
||||
/**
|
||||
* Get any reference passed in to the file range constructor.
|
||||
* This is not used by any implementation code; it is to help
|
||||
* bind this API to libraries retrieving multiple stripes of
|
||||
* data in parallel.
|
||||
* @return a reference or null.
|
||||
*/
|
||||
Object getReference();
|
||||
|
||||
/**
|
||||
* Factory method to create a FileRange object.
|
||||
* @param offset starting offset of the range.
|
||||
|
@ -71,17 +62,6 @@ public interface FileRange {
|
|||
* @return a new instance of FileRangeImpl.
|
||||
*/
|
||||
static FileRange createFileRange(long offset, int length) {
|
||||
return new FileRangeImpl(offset, length, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Factory method to create a FileRange object.
|
||||
* @param offset starting offset of the range.
|
||||
* @param length length of the range.
|
||||
* @param reference nullable reference to store in the range.
|
||||
* @return a new instance of FileRangeImpl.
|
||||
*/
|
||||
static FileRange createFileRange(long offset, int length, Object reference) {
|
||||
return new FileRangeImpl(offset, length, reference);
|
||||
return new FileRangeImpl(offset, length);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -402,8 +402,7 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare this FileStatus to another FileStatus based on lexicographical
|
||||
* order of path.
|
||||
* Compare this FileStatus to another FileStatus
|
||||
* @param o the FileStatus to be compared.
|
||||
* @return a negative integer, zero, or a positive integer as this object
|
||||
* is less than, equal to, or greater than the specified object.
|
||||
|
@ -413,8 +412,7 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
}
|
||||
|
||||
/**
|
||||
* Compare this FileStatus to another FileStatus based on lexicographical
|
||||
* order of path.
|
||||
* Compare this FileStatus to another FileStatus.
|
||||
* This method was added back by HADOOP-14683 to keep binary compatibility.
|
||||
*
|
||||
* @param o the FileStatus to be compared.
|
||||
|
|
|
@ -21,6 +21,7 @@ import javax.annotation.Nonnull;
|
|||
import java.io.Closeable;
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.lang.ref.ReferenceQueue;
|
||||
import java.net.URI;
|
||||
|
@ -1543,39 +1544,6 @@ public abstract class FileSystem extends Configured
|
|||
public abstract FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress) throws IOException;
|
||||
|
||||
/**
|
||||
* Append to an existing file (optional operation).
|
||||
* @param f the existing file to be appended.
|
||||
* @param appendToNewBlock whether to append data to a new block
|
||||
* instead of the end of the last partial block
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
* @return output stream.
|
||||
*/
|
||||
public FSDataOutputStream append(Path f, boolean appendToNewBlock) throws IOException {
|
||||
return append(f, getConf().getInt(IO_FILE_BUFFER_SIZE_KEY,
|
||||
IO_FILE_BUFFER_SIZE_DEFAULT), null, appendToNewBlock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Append to an existing file (optional operation).
|
||||
* This function is used for being overridden by some FileSystem like DistributedFileSystem
|
||||
* @param f the existing file to be appended.
|
||||
* @param bufferSize the size of the buffer to be used.
|
||||
* @param progress for reporting progress if it is not null.
|
||||
* @param appendToNewBlock whether to append data to a new block
|
||||
* instead of the end of the last partial block
|
||||
* @throws IOException IO failure
|
||||
* @throws UnsupportedOperationException if the operation is unsupported
|
||||
* (default).
|
||||
* @return output stream.
|
||||
*/
|
||||
public FSDataOutputStream append(Path f, int bufferSize,
|
||||
Progressable progress, boolean appendToNewBlock) throws IOException {
|
||||
return append(f, bufferSize, progress);
|
||||
}
|
||||
|
||||
/**
|
||||
* Concat existing files together.
|
||||
* @param trg the path to the target destination.
|
||||
|
@ -2413,14 +2381,8 @@ public abstract class FileSystem extends Configured
|
|||
if (stat.isFile()) { // file
|
||||
curFile = stat;
|
||||
} else if (recursive) { // directory
|
||||
try {
|
||||
RemoteIterator<LocatedFileStatus> newDirItor = listLocatedStatus(stat.getPath());
|
||||
itors.push(curItor);
|
||||
curItor = newDirItor;
|
||||
} catch (FileNotFoundException ignored) {
|
||||
LOGGER.debug("Directory {} deleted while attempting for recursive listing",
|
||||
stat.getPath());
|
||||
}
|
||||
itors.push(curItor);
|
||||
curItor = listLocatedStatus(stat.getPath());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3602,9 +3564,9 @@ public abstract class FileSystem extends Configured
|
|||
} catch (IOException | RuntimeException e) {
|
||||
// exception raised during initialization.
|
||||
// log summary at warn and full stack at debug
|
||||
LOGGER.warn("Failed to initialize filesystem {}: {}",
|
||||
LOGGER.warn("Failed to initialize fileystem {}: {}",
|
||||
uri, e.toString());
|
||||
LOGGER.debug("Failed to initialize filesystem", e);
|
||||
LOGGER.debug("Failed to initialize fileystem", e);
|
||||
// then (robustly) close the FS, so as to invoke any
|
||||
// cleanup code.
|
||||
IOUtils.cleanupWithLogger(LOGGER, fs);
|
||||
|
@ -3685,7 +3647,11 @@ public abstract class FileSystem extends Configured
|
|||
// to construct an instance.
|
||||
try (DurationInfo d = new DurationInfo(LOGGER, false,
|
||||
"Acquiring creator semaphore for %s", uri)) {
|
||||
creatorPermits.acquireUninterruptibly();
|
||||
creatorPermits.acquire();
|
||||
} catch (InterruptedException e) {
|
||||
// acquisition was interrupted; convert to an IOE.
|
||||
throw (IOException)new InterruptedIOException(e.toString())
|
||||
.initCause(e);
|
||||
}
|
||||
FileSystem fsToClose = null;
|
||||
try {
|
||||
|
@ -3942,7 +3908,6 @@ public abstract class FileSystem extends Configured
|
|||
private volatile long bytesReadDistanceOfThreeOrFour;
|
||||
private volatile long bytesReadDistanceOfFiveOrLarger;
|
||||
private volatile long bytesReadErasureCoded;
|
||||
private volatile long remoteReadTimeMS;
|
||||
|
||||
/**
|
||||
* Add another StatisticsData object to this one.
|
||||
|
@ -3960,7 +3925,6 @@ public abstract class FileSystem extends Configured
|
|||
this.bytesReadDistanceOfFiveOrLarger +=
|
||||
other.bytesReadDistanceOfFiveOrLarger;
|
||||
this.bytesReadErasureCoded += other.bytesReadErasureCoded;
|
||||
this.remoteReadTimeMS += other.remoteReadTimeMS;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3979,7 +3943,6 @@ public abstract class FileSystem extends Configured
|
|||
this.bytesReadDistanceOfFiveOrLarger =
|
||||
-this.bytesReadDistanceOfFiveOrLarger;
|
||||
this.bytesReadErasureCoded = -this.bytesReadErasureCoded;
|
||||
this.remoteReadTimeMS = -this.remoteReadTimeMS;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -4028,10 +3991,6 @@ public abstract class FileSystem extends Configured
|
|||
public long getBytesReadErasureCoded() {
|
||||
return bytesReadErasureCoded;
|
||||
}
|
||||
|
||||
public long getRemoteReadTimeMS() {
|
||||
return remoteReadTimeMS;
|
||||
}
|
||||
}
|
||||
|
||||
private interface StatisticsAggregator<T> {
|
||||
|
@ -4259,14 +4218,6 @@ public abstract class FileSystem extends Configured
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment the time taken to read bytes from remote in the statistics.
|
||||
* @param durationMS time taken in ms to read bytes from remote
|
||||
*/
|
||||
public void increaseRemoteReadTime(final long durationMS) {
|
||||
getThreadStatistics().remoteReadTimeMS += durationMS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the given aggregator to all StatisticsData objects associated with
|
||||
* this Statistics object.
|
||||
|
@ -4414,25 +4365,6 @@ public abstract class FileSystem extends Configured
|
|||
return bytesRead;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get total time taken in ms for bytes read from remote.
|
||||
* @return time taken in ms for remote bytes read.
|
||||
*/
|
||||
public long getRemoteReadTime() {
|
||||
return visitAll(new StatisticsAggregator<Long>() {
|
||||
private long remoteReadTimeMS = 0;
|
||||
|
||||
@Override
|
||||
public void accept(StatisticsData data) {
|
||||
remoteReadTimeMS += data.remoteReadTimeMS;
|
||||
}
|
||||
|
||||
public Long aggregate() {
|
||||
return remoteReadTimeMS;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all statistics data.
|
||||
* MR or other frameworks can use the method to get all statistics at once.
|
||||
|
|
|
@ -47,8 +47,7 @@ public class FileSystemStorageStatistics extends StorageStatistics {
|
|||
"bytesReadDistanceOfOneOrTwo",
|
||||
"bytesReadDistanceOfThreeOrFour",
|
||||
"bytesReadDistanceOfFiveOrLarger",
|
||||
"bytesReadErasureCoded",
|
||||
"remoteReadTimeMS"
|
||||
"bytesReadErasureCoded"
|
||||
};
|
||||
|
||||
private static class LongStatisticIterator
|
||||
|
@ -108,8 +107,6 @@ public class FileSystemStorageStatistics extends StorageStatistics {
|
|||
return data.getBytesReadDistanceOfFiveOrLarger();
|
||||
case "bytesReadErasureCoded":
|
||||
return data.getBytesReadErasureCoded();
|
||||
case "remoteReadTimeMS":
|
||||
return data.getRemoteReadTimeMS();
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -484,7 +484,7 @@ public class FileUtil {
|
|||
in = awaitFuture(srcFS.openFile(src)
|
||||
.opt(FS_OPTION_OPENFILE_READ_POLICY,
|
||||
FS_OPTION_OPENFILE_READ_POLICY_WHOLE_FILE)
|
||||
.optLong(FS_OPTION_OPENFILE_LENGTH,
|
||||
.opt(FS_OPTION_OPENFILE_LENGTH,
|
||||
srcStatus.getLen()) // file length hint for object stores
|
||||
.build());
|
||||
out = dstFS.create(dst, overwrite);
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Whether the given Path of the FileSystem has the capability to perform lease recovery.
|
||||
*/
|
||||
public interface LeaseRecoverable {
|
||||
|
||||
/**
|
||||
* Start the lease recovery of a file.
|
||||
*
|
||||
* @param file path to a file.
|
||||
* @return true if the file is already closed, and it does not require lease recovery.
|
||||
* @throws IOException if an error occurs during lease recovery.
|
||||
* @throws UnsupportedOperationException if lease recovery is not supported by this filesystem.
|
||||
*/
|
||||
boolean recoverLease(Path file) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the close status of a file.
|
||||
* @param file The string representation of the path to the file
|
||||
* @return return true if file is closed
|
||||
* @throws IOException If an I/O error occurred
|
||||
* @throws UnsupportedOperationException if isFileClosed is not supported by this filesystem.
|
||||
*/
|
||||
boolean isFileClosed(Path file) throws IOException;
|
||||
}
|
|
@ -396,10 +396,6 @@ public class LocalDirAllocator {
|
|||
Context ctx = confChanged(conf);
|
||||
int numDirs = ctx.localDirs.length;
|
||||
int numDirsSearched = 0;
|
||||
// Max capacity in any directory
|
||||
long maxCapacity = 0;
|
||||
String errorText = null;
|
||||
IOException diskException = null;
|
||||
//remove the leading slash from the path (to make sure that the uri
|
||||
//resolution results in a valid path on the dir being checked)
|
||||
if (pathStr.startsWith("/")) {
|
||||
|
@ -414,14 +410,7 @@ public class LocalDirAllocator {
|
|||
|
||||
//build the "roulette wheel"
|
||||
for(int i =0; i < ctx.dirDF.length; ++i) {
|
||||
final DF target = ctx.dirDF[i];
|
||||
// attempt to recreate the dir so that getAvailable() is valid
|
||||
// if it fails, getAvailable() will return 0, so the dir will
|
||||
// be declared unavailable.
|
||||
// return value is logged at debug to keep spotbugs quiet.
|
||||
final boolean b = new File(target.getDirPath()).mkdirs();
|
||||
LOG.debug("mkdirs of {}={}", target, b);
|
||||
availableOnDisk[i] = target.getAvailable();
|
||||
availableOnDisk[i] = ctx.dirDF[i].getAvailable();
|
||||
totalAvailable += availableOnDisk[i];
|
||||
}
|
||||
|
||||
|
@ -455,18 +444,9 @@ public class LocalDirAllocator {
|
|||
int dirNum = ctx.getAndIncrDirNumLastAccessed(randomInc);
|
||||
while (numDirsSearched < numDirs) {
|
||||
long capacity = ctx.dirDF[dirNum].getAvailable();
|
||||
if (capacity > maxCapacity) {
|
||||
maxCapacity = capacity;
|
||||
}
|
||||
if (capacity > size) {
|
||||
try {
|
||||
returnPath = createPath(ctx.localDirs[dirNum], pathStr,
|
||||
checkWrite);
|
||||
} catch (IOException e) {
|
||||
errorText = e.getMessage();
|
||||
diskException = e;
|
||||
LOG.debug("DiskException caught for dir {}", ctx.localDirs[dirNum], e);
|
||||
}
|
||||
returnPath =
|
||||
createPath(ctx.localDirs[dirNum], pathStr, checkWrite);
|
||||
if (returnPath != null) {
|
||||
ctx.getAndIncrDirNumLastAccessed(numDirsSearched);
|
||||
break;
|
||||
|
@ -482,13 +462,8 @@ public class LocalDirAllocator {
|
|||
}
|
||||
|
||||
//no path found
|
||||
String newErrorText = "Could not find any valid local directory for " +
|
||||
pathStr + " with requested size " + size +
|
||||
" as the max capacity in any directory is " + maxCapacity;
|
||||
if (errorText != null) {
|
||||
newErrorText = newErrorText + " due to " + errorText;
|
||||
}
|
||||
throw new DiskErrorException(newErrorText, diskException);
|
||||
throw new DiskErrorException("Could not find any valid local " +
|
||||
"directory for " + pathStr);
|
||||
}
|
||||
|
||||
/** Creates a file on the local FS. Pass size as
|
||||
|
|
|
@ -465,12 +465,7 @@ public class Path
|
|||
* @return a new path with the suffix added
|
||||
*/
|
||||
public Path suffix(String suffix) {
|
||||
Path parent = getParent();
|
||||
if (parent == null) {
|
||||
return new Path("/", getName() + suffix);
|
||||
}
|
||||
|
||||
return new Path(parent, getName() + suffix);
|
||||
return new Path(getParent(), getName()+suffix);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -114,16 +114,6 @@ public interface PositionedReadable {
|
|||
* As a result of the call, each range will have FileRange.setData(CompletableFuture)
|
||||
* called with a future that when complete will have a ByteBuffer with the
|
||||
* data from the file's range.
|
||||
* <p>
|
||||
* The position returned by getPos() after readVectored() is undefined.
|
||||
* </p>
|
||||
* <p>
|
||||
* If a file is changed while the readVectored() operation is in progress, the output is
|
||||
* undefined. Some ranges may have old data, some may have new and some may have both.
|
||||
* </p>
|
||||
* <p>
|
||||
* While a readVectored() operation is in progress, normal read api calls may block.
|
||||
* </p>
|
||||
* @param ranges the byte ranges to read
|
||||
* @param allocate the function to allocate ByteBuffer
|
||||
* @throws IOException any IOE.
|
||||
|
|
|
@ -1326,9 +1326,4 @@ public class RawLocalFileSystem extends FileSystem {
|
|||
return super.hasPathCapability(path, capability);
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static void setUseDeprecatedFileStatus(boolean useDeprecatedFileStatus) {
|
||||
RawLocalFileSystem.useDeprecatedFileStatus = useDeprecatedFileStatus;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Whether the given filesystem is in any status of safe mode.
|
||||
*/
|
||||
public interface SafeMode {
|
||||
|
||||
/**
|
||||
* Enter, leave, or get safe mode.
|
||||
*
|
||||
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
|
||||
* @throws IOException if set safe mode fails to proceed.
|
||||
* @return true if the action is successfully accepted, otherwise false means rejected.
|
||||
*/
|
||||
default boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
return setSafeMode(action, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enter, leave, or get safe mode.
|
||||
*
|
||||
* @param action One of {@link SafeModeAction} LEAVE, ENTER, GET, FORCE_EXIT.
|
||||
* @param isChecked If true check only for Active metadata node / NameNode's status,
|
||||
* else check first metadata node / NameNode's status.
|
||||
* @throws IOException if set safe mode fails to proceed.
|
||||
* @return true if the action is successfully accepted, otherwise false means rejected.
|
||||
*/
|
||||
boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException;
|
||||
|
||||
}
|
|
@ -84,7 +84,7 @@ public interface StreamCapabilities {
|
|||
* Support for vectored IO api.
|
||||
* See {@code PositionedReadable#readVectored(List, IntFunction)}.
|
||||
*/
|
||||
String VECTOREDIO = "in:readvectored";
|
||||
String VECTOREDIO = "readvectored";
|
||||
|
||||
/**
|
||||
* Stream abort() capability implemented by {@link Abortable#abort()}.
|
||||
|
|
|
@ -23,10 +23,8 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import static org.apache.hadoop.fs.viewfs.Constants.*;
|
||||
|
||||
/**
|
||||
* Provides a trash facility which supports pluggable Trash policies.
|
||||
|
@ -69,7 +67,7 @@ public class Trash extends Configured {
|
|||
* Hence we get the file system of the fully-qualified resolved-path and
|
||||
* then move the path p to the trashbin in that volume,
|
||||
* @param fs - the filesystem of path p
|
||||
* @param p - the path being deleted - to be moved to trash
|
||||
* @param p - the path being deleted - to be moved to trasg
|
||||
* @param conf - configuration
|
||||
* @return false if the item is already in the trash or trash is disabled
|
||||
* @throws IOException on error
|
||||
|
@ -96,27 +94,6 @@ public class Trash extends Configured {
|
|||
LOG.warn("Failed to get server trash configuration", e);
|
||||
throw new IOException("Failed to get server trash configuration", e);
|
||||
}
|
||||
|
||||
/*
|
||||
* In HADOOP-18144, we changed getTrashRoot() in ViewFileSystem to return a
|
||||
* viewFS path, instead of a targetFS path. moveToTrash works for
|
||||
* ViewFileSystem now. ViewFileSystem will do path resolution internally by
|
||||
* itself.
|
||||
*
|
||||
* When localized trash flag is enabled:
|
||||
* 1). if fs is a ViewFileSystem, we can initialize Trash() with a
|
||||
* ViewFileSystem object;
|
||||
* 2). When fs is not a ViewFileSystem, the only place we would need to
|
||||
* resolve a path is for symbolic links. However, symlink is not
|
||||
* enabled in Hadoop due to the complexity to support it
|
||||
* (HADOOP-10019).
|
||||
*/
|
||||
if (conf.getBoolean(CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT,
|
||||
CONFIG_VIEWFS_TRASH_FORCE_INSIDE_MOUNT_POINT_DEFAULT)) {
|
||||
Trash trash = new Trash(fs, conf);
|
||||
return trash.moveToTrash(p);
|
||||
}
|
||||
|
||||
Trash trash = new Trash(fullyResolvedFs, conf);
|
||||
return trash.moveToTrash(fullyResolvedPath);
|
||||
}
|
||||
|
|
|
@ -30,7 +30,6 @@ import java.util.function.IntFunction;
|
|||
|
||||
import org.apache.hadoop.fs.impl.CombinedFileRange;
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.util.functional.Function4RaisingIOE;
|
||||
|
||||
/**
|
||||
* Utility class which implements helper methods used
|
||||
|
@ -38,8 +37,6 @@ import org.apache.hadoop.util.functional.Function4RaisingIOE;
|
|||
*/
|
||||
public final class VectoredReadUtils {
|
||||
|
||||
private static final int TMP_BUFFER_MAX_SIZE = 64 * 1024;
|
||||
|
||||
/**
|
||||
* Validate a single range.
|
||||
* @param range file range.
|
||||
|
@ -117,12 +114,7 @@ public final class VectoredReadUtils {
|
|||
FileRange range,
|
||||
ByteBuffer buffer) throws IOException {
|
||||
if (buffer.isDirect()) {
|
||||
readInDirectBuffer(range.getLength(),
|
||||
buffer,
|
||||
(position, buffer1, offset, length) -> {
|
||||
stream.readFully(position, buffer1, offset, length);
|
||||
return null;
|
||||
});
|
||||
buffer.put(readInDirectBuffer(stream, range));
|
||||
buffer.flip();
|
||||
} else {
|
||||
stream.readFully(range.getOffset(), buffer.array(),
|
||||
|
@ -130,34 +122,13 @@ public final class VectoredReadUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read bytes from stream into a byte buffer using an
|
||||
* intermediate byte array.
|
||||
* @param length number of bytes to read.
|
||||
* @param buffer buffer to fill.
|
||||
* @param operation operation to use for reading data.
|
||||
* @throws IOException any IOE.
|
||||
*/
|
||||
public static void readInDirectBuffer(int length,
|
||||
ByteBuffer buffer,
|
||||
Function4RaisingIOE<Integer, byte[], Integer,
|
||||
Integer, Void> operation) throws IOException {
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
int readBytes = 0;
|
||||
int position = 0;
|
||||
int tmpBufferMaxSize = Math.min(TMP_BUFFER_MAX_SIZE, length);
|
||||
byte[] tmp = new byte[tmpBufferMaxSize];
|
||||
while (readBytes < length) {
|
||||
int currentLength = (readBytes + tmpBufferMaxSize) < length ?
|
||||
tmpBufferMaxSize
|
||||
: (length - readBytes);
|
||||
operation.apply(position, tmp, 0, currentLength);
|
||||
buffer.put(tmp, 0, currentLength);
|
||||
position = position + currentLength;
|
||||
readBytes = readBytes + currentLength;
|
||||
}
|
||||
private static byte[] readInDirectBuffer(PositionedReadable stream,
|
||||
FileRange range) throws IOException {
|
||||
// if we need to read data from a direct buffer and the stream doesn't
|
||||
// support it, we allocate a byte array to use.
|
||||
byte[] tmp = new byte[range.getLength()];
|
||||
stream.readFully(range.getOffset(), tmp, 0, tmp.length);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -239,7 +210,6 @@ public final class VectoredReadUtils {
|
|||
if (sortedRanges[i].getOffset() < prev.getOffset() + prev.getLength()) {
|
||||
throw new UnsupportedOperationException("Overlapping ranges are not supported");
|
||||
}
|
||||
prev = sortedRanges[i];
|
||||
}
|
||||
return Arrays.asList(sortedRanges);
|
||||
}
|
||||
|
@ -307,16 +277,9 @@ public final class VectoredReadUtils {
|
|||
FileRange request) {
|
||||
int offsetChange = (int) (request.getOffset() - readOffset);
|
||||
int requestLength = request.getLength();
|
||||
// Create a new buffer that is backed by the original contents
|
||||
// The buffer will have position 0 and the same limit as the original one
|
||||
readData = readData.slice();
|
||||
// Change the offset and the limit of the buffer as the reader wants to see
|
||||
// only relevant data
|
||||
readData.position(offsetChange);
|
||||
readData.limit(offsetChange + requestLength);
|
||||
// Create a new buffer after the limit change so that only that portion of the data is
|
||||
// returned to the reader.
|
||||
readData = readData.slice();
|
||||
return readData;
|
||||
}
|
||||
|
||||
|
|
|
@ -90,11 +90,6 @@ public final class AuditConstants {
|
|||
*/
|
||||
public static final String PARAM_PROCESS = "ps";
|
||||
|
||||
/**
|
||||
* Header: Range for GET request data: {@value}.
|
||||
*/
|
||||
public static final String PARAM_RANGE = "rg";
|
||||
|
||||
/**
|
||||
* Task Attempt ID query header: {@value}.
|
||||
*/
|
||||
|
@ -115,9 +110,4 @@ public final class AuditConstants {
|
|||
*/
|
||||
public static final String PARAM_TIMESTAMP = "ts";
|
||||
|
||||
/**
|
||||
* Num of files to be deleted as part of the bulk delete request.
|
||||
*/
|
||||
public static final String DELETE_KEYS_SIZE = "ks";
|
||||
|
||||
}
|
||||
|
|
|
@ -44,13 +44,11 @@ import static org.apache.hadoop.util.Preconditions.checkNotNull;
|
|||
* with option support.
|
||||
*
|
||||
* <code>
|
||||
* .opt("fs.s3a.open.option.caching", true)
|
||||
* .opt("fs.option.openfile.read.policy", "random, adaptive")
|
||||
* .opt("foofs:option.a", true)
|
||||
* .opt("foofs:option.b", "value")
|
||||
* .opt("fs.s3a.open.option.etag", "9fe4c37c25b")
|
||||
* .optLong("fs.option.openfile.length", 1_500_000_000_000)
|
||||
* .must("fs.option.openfile.buffer.size", 256_000)
|
||||
* .mustLong("fs.option.openfile.split.start", 256_000_000)
|
||||
* .mustLong("fs.option.openfile.split.end", 512_000_000)
|
||||
* .must("foofs:cache", true)
|
||||
* .must("barfs:cache-size", 256 * 1024 * 1024)
|
||||
* .build();
|
||||
* </code>
|
||||
*
|
||||
|
@ -66,7 +64,6 @@ import static org.apache.hadoop.util.Preconditions.checkNotNull;
|
|||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Unstable
|
||||
@SuppressWarnings({"deprecation", "unused"})
|
||||
public abstract class
|
||||
AbstractFSBuilderImpl<S, B extends FSBuilder<S, B>>
|
||||
implements FSBuilder<S, B> {
|
||||
|
@ -181,7 +178,10 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, boolean value) {
|
||||
return opt(key, Boolean.toString(value));
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setBoolean(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -191,17 +191,18 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, int value) {
|
||||
return optLong(key, value);
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setInt(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, final long value) {
|
||||
return optLong(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public B optLong(@Nonnull final String key, final long value) {
|
||||
return opt(key, Long.toString(value));
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setLong(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -211,7 +212,10 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, float value) {
|
||||
return optLong(key, (long) value);
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setFloat(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -221,17 +225,10 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B opt(@Nonnull final String key, double value) {
|
||||
return optLong(key, (long) value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B optDouble(@Nonnull final String key, double value) {
|
||||
return opt(key, Double.toString(value));
|
||||
mandatoryKeys.remove(key);
|
||||
optionalKeys.add(key);
|
||||
options.setDouble(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -267,22 +264,10 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, boolean value) {
|
||||
return must(key, Boolean.toString(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public B mustLong(@Nonnull final String key, final long value) {
|
||||
return must(key, Long.toString(value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Set optional double parameter for the Builder.
|
||||
*
|
||||
* @see #opt(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B mustDouble(@Nonnull final String key, double value) {
|
||||
return must(key, Double.toString(value));
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setBoolean(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -292,22 +277,44 @@ public abstract class
|
|||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, int value) {
|
||||
return mustLong(key, value);
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setInt(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
@Override
|
||||
public B must(@Nonnull final String key, final long value) {
|
||||
return mustLong(key, value);
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setLong(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory float option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, final float value) {
|
||||
return mustLong(key, (long) value);
|
||||
public B must(@Nonnull final String key, float value) {
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setFloat(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set mandatory double option.
|
||||
*
|
||||
* @see #must(String, String)
|
||||
*/
|
||||
@Override
|
||||
public B must(@Nonnull final String key, double value) {
|
||||
return mustLong(key, (long) value);
|
||||
mandatoryKeys.add(key);
|
||||
optionalKeys.remove(key);
|
||||
options.setDouble(key, value);
|
||||
return getThisBuilder();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -29,10 +29,10 @@ import java.util.List;
|
|||
* together into a single read for efficiency.
|
||||
*/
|
||||
public class CombinedFileRange extends FileRangeImpl {
|
||||
private List<FileRange> underlying = new ArrayList<>();
|
||||
private ArrayList<FileRange> underlying = new ArrayList<>();
|
||||
|
||||
public CombinedFileRange(long offset, long end, FileRange original) {
|
||||
super(offset, (int) (end - offset), null);
|
||||
super(offset, (int) (end - offset));
|
||||
this.underlying.add(original);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.store.LogExactlyOnce;
|
||||
|
||||
/**
|
||||
* Class to help with use of FSBuilder.
|
||||
*/
|
||||
public class FSBuilderSupport {
|
||||
|
||||
private static final Logger LOG =
|
||||
LoggerFactory.getLogger(FSBuilderSupport.class);
|
||||
|
||||
public static final LogExactlyOnce LOG_PARSE_ERROR = new LogExactlyOnce(LOG);
|
||||
|
||||
/**
|
||||
* Options which are parsed.
|
||||
*/
|
||||
private final Configuration options;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param options the configuration options from the builder.
|
||||
*/
|
||||
public FSBuilderSupport(final Configuration options) {
|
||||
this.options = options;
|
||||
}
|
||||
|
||||
public Configuration getOptions() {
|
||||
return options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a long value with resilience to unparseable values.
|
||||
* Negative values are replaced with the default.
|
||||
* @param key key to log
|
||||
* @param defVal default value
|
||||
* @return long value
|
||||
*/
|
||||
public long getPositiveLong(String key, long defVal) {
|
||||
long l = getLong(key, defVal);
|
||||
if (l < 0) {
|
||||
LOG.debug("The option {} has a negative value {}, replacing with the default {}",
|
||||
key, l, defVal);
|
||||
l = defVal;
|
||||
}
|
||||
return l;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a long value with resilience to unparseable values.
|
||||
* @param key key to log
|
||||
* @param defVal default value
|
||||
* @return long value
|
||||
*/
|
||||
public long getLong(String key, long defVal) {
|
||||
final String v = options.getTrimmed(key, "");
|
||||
if (v.isEmpty()) {
|
||||
return defVal;
|
||||
}
|
||||
try {
|
||||
return options.getLong(key, defVal);
|
||||
} catch (NumberFormatException e) {
|
||||
final String msg = String.format(
|
||||
"The option %s value \"%s\" is not a long integer; using the default value %s",
|
||||
key, v, defVal);
|
||||
// not a long,
|
||||
LOG_PARSE_ERROR.warn(msg);
|
||||
LOG.debug("{}", msg, e);
|
||||
return defVal;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -34,21 +34,9 @@ public class FileRangeImpl implements FileRange {
|
|||
private int length;
|
||||
private CompletableFuture<ByteBuffer> reader;
|
||||
|
||||
/**
|
||||
* nullable reference to store in the range.
|
||||
*/
|
||||
private final Object reference;
|
||||
|
||||
/**
|
||||
* Create.
|
||||
* @param offset offset in file
|
||||
* @param length length of data to read.
|
||||
* @param reference nullable reference to store in the range.
|
||||
*/
|
||||
public FileRangeImpl(long offset, int length, Object reference) {
|
||||
public FileRangeImpl(long offset, int length) {
|
||||
this.offset = offset;
|
||||
this.length = length;
|
||||
this.reference = reference;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -83,9 +71,4 @@ public class FileRangeImpl implements FileRange {
|
|||
public CompletableFuture<ByteBuffer> getData() {
|
||||
return reader;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getReference() {
|
||||
return reference;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,97 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.impl;
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||
import org.apache.hadoop.metrics2.MetricsSource;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* A weak referenced metrics source which avoids hanging on to large objects
|
||||
* if somehow they don't get fully closed/cleaned up.
|
||||
* The JVM may clean up all objects which are only weakly referenced whenever
|
||||
* it does a GC, <i>even if there is no memory pressure</i>.
|
||||
* To avoid these refs being removed, always keep a strong reference around
|
||||
* somewhere.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class WeakRefMetricsSource implements MetricsSource {
|
||||
|
||||
/**
|
||||
* Name to know when unregistering.
|
||||
*/
|
||||
private final String name;
|
||||
|
||||
/**
|
||||
* Underlying metrics source.
|
||||
*/
|
||||
private final WeakReference<MetricsSource> sourceWeakReference;
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param name Name to know when unregistering.
|
||||
* @param source metrics source
|
||||
*/
|
||||
public WeakRefMetricsSource(final String name, final MetricsSource source) {
|
||||
this.name = name;
|
||||
this.sourceWeakReference = new WeakReference<>(requireNonNull(source));
|
||||
}
|
||||
|
||||
/**
|
||||
* If the weak reference is non null, update the metrics.
|
||||
* @param collector to contain the resulting metrics snapshot
|
||||
* @param all if true, return all metrics even if unchanged.
|
||||
*/
|
||||
@Override
|
||||
public void getMetrics(final MetricsCollector collector, final boolean all) {
|
||||
MetricsSource metricsSource = sourceWeakReference.get();
|
||||
if (metricsSource != null) {
|
||||
metricsSource.getMetrics(collector, all);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Name to know when unregistering.
|
||||
* @return the name passed in during construction.
|
||||
*/
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the source, will be null if the reference has been GC'd
|
||||
* @return the source reference
|
||||
*/
|
||||
public MetricsSource getSource() {
|
||||
return sourceWeakReference.get();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "WeakRefMetricsSource{" +
|
||||
"name='" + name + '\'' +
|
||||
", sourceWeakReference is " +
|
||||
(sourceWeakReference.get() == null ? "unset" : "set") +
|
||||
'}';
|
||||
}
|
||||
}
|
|
@ -25,8 +25,6 @@ import javax.annotation.Nullable;
|
|||
|
||||
import org.apache.hadoop.util.WeakReferenceMap;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* A WeakReferenceMap for threads.
|
||||
* @param <V> value type of the map
|
||||
|
@ -38,55 +36,30 @@ public class WeakReferenceThreadMap<V> extends WeakReferenceMap<Long, V> {
|
|||
super(factory, referenceLost);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the value for the current thread, creating if needed.
|
||||
* @return an instance.
|
||||
*/
|
||||
public V getForCurrentThread() {
|
||||
return get(currentThreadId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove the reference for the current thread.
|
||||
* @return any reference value which existed.
|
||||
*/
|
||||
public V removeForCurrentThread() {
|
||||
return remove(currentThreadId());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current thread ID.
|
||||
* @return thread ID.
|
||||
*/
|
||||
public long currentThreadId() {
|
||||
return Thread.currentThread().getId();
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new value for the current thread.
|
||||
* @param newVal new reference to set for the active thread.
|
||||
* @return the previously set value, possibly null
|
||||
*/
|
||||
public V setForCurrentThread(V newVal) {
|
||||
requireNonNull(newVal);
|
||||
long id = currentThreadId();
|
||||
|
||||
// if the same object is already in the map, just return it.
|
||||
WeakReference<V> existingWeakRef = lookup(id);
|
||||
|
||||
// The looked up reference could be one of
|
||||
// 1. null: nothing there
|
||||
// 2. valid but get() == null : reference lost by GC.
|
||||
// 3. different from the new value
|
||||
// 4. the same as the old value
|
||||
if (resolve(existingWeakRef) == newVal) {
|
||||
// case 4: do nothing, return the new value
|
||||
return newVal;
|
||||
} else {
|
||||
// cases 1, 2, 3: update the map and return the old value
|
||||
return put(id, newVal);
|
||||
WeakReference<V> ref = lookup(id);
|
||||
// Reference value could be set to null. Thus, ref.get() could return
|
||||
// null. Should be handled accordingly while using the returned value.
|
||||
if (ref != null && ref.get() == newVal) {
|
||||
return ref.get();
|
||||
}
|
||||
|
||||
return put(id, newVal);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,9 +23,6 @@ import java.io.Closeable;
|
|||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
|
||||
/**
|
||||
* Provides functionality necessary for caching blocks of data read from FileSystem.
|
||||
*/
|
||||
|
@ -67,10 +64,7 @@ public interface BlockCache extends Closeable {
|
|||
*
|
||||
* @param blockNumber the id of the given block.
|
||||
* @param buffer contents of the given block to be added to this cache.
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @throws IOException if there is an error writing the given block.
|
||||
*/
|
||||
void put(int blockNumber, ByteBuffer buffer, Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) throws IOException;
|
||||
void put(int blockNumber, ByteBuffer buffer) throws IOException;
|
||||
}
|
||||
|
|
|
@ -33,8 +33,6 @@ import java.util.function.Supplier;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
import org.apache.hadoop.fs.statistics.DurationTracker;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
@ -97,10 +95,6 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
|
||||
private final PrefetchingStatistics prefetchingStatistics;
|
||||
|
||||
private final Configuration conf;
|
||||
|
||||
private final LocalDirAllocator localDirAllocator;
|
||||
|
||||
/**
|
||||
* Constructs an instance of a {@code CachingBlockManager}.
|
||||
*
|
||||
|
@ -108,17 +102,14 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
* @param blockData information about each block of the underlying file.
|
||||
* @param bufferPoolSize size of the in-memory cache in terms of number of blocks.
|
||||
* @param prefetchingStatistics statistics for this stream.
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
*
|
||||
* @throws IllegalArgumentException if bufferPoolSize is zero or negative.
|
||||
*/
|
||||
public CachingBlockManager(
|
||||
ExecutorServiceFuturePool futurePool,
|
||||
BlockData blockData,
|
||||
int bufferPoolSize,
|
||||
PrefetchingStatistics prefetchingStatistics,
|
||||
Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) {
|
||||
PrefetchingStatistics prefetchingStatistics) {
|
||||
super(blockData);
|
||||
|
||||
Validate.checkPositiveInteger(bufferPoolSize, "bufferPoolSize");
|
||||
|
@ -138,8 +129,6 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
|
||||
this.ops = new BlockOperations();
|
||||
this.ops.setDebug(false);
|
||||
this.conf = requireNonNull(conf);
|
||||
this.localDirAllocator = localDirAllocator;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -313,12 +302,7 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
|
||||
private void read(BufferData data) throws IOException {
|
||||
synchronized (data) {
|
||||
try {
|
||||
readBlock(data, false, BufferData.State.BLANK);
|
||||
} catch (IOException e) {
|
||||
LOG.error("error reading block {}", data.getBlockNumber(), e);
|
||||
throw e;
|
||||
}
|
||||
readBlock(data, false, BufferData.State.BLANK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -378,6 +362,9 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
buffer.flip();
|
||||
data.setReady(expectedState);
|
||||
} catch (Exception e) {
|
||||
String message = String.format("error during readBlock(%s)", data.getBlockNumber());
|
||||
LOG.error(message, e);
|
||||
|
||||
if (isPrefetch && tracker != null) {
|
||||
tracker.failed();
|
||||
}
|
||||
|
@ -419,8 +406,7 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
try {
|
||||
blockManager.prefetch(data, taskQueuedStartTime);
|
||||
} catch (Exception e) {
|
||||
LOG.info("error prefetching block {}. {}", data.getBlockNumber(), e.getMessage());
|
||||
LOG.debug("error prefetching block {}", data.getBlockNumber(), e);
|
||||
LOG.error("error during prefetch", e);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -479,8 +465,7 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
blockFuture = cf;
|
||||
}
|
||||
|
||||
CachePutTask task =
|
||||
new CachePutTask(data, blockFuture, this, Instant.now());
|
||||
CachePutTask task = new CachePutTask(data, blockFuture, this, Instant.now());
|
||||
Future<Void> actionFuture = futurePool.executeFunction(task);
|
||||
data.setCaching(actionFuture);
|
||||
ops.end(op);
|
||||
|
@ -508,8 +493,7 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
return;
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info("error waiting on blockFuture: {}. {}", data, e.getMessage());
|
||||
LOG.debug("error waiting on blockFuture: {}", data, e);
|
||||
LOG.error("error waiting on blockFuture: {}", data, e);
|
||||
data.setDone();
|
||||
return;
|
||||
}
|
||||
|
@ -539,8 +523,8 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
data.setDone();
|
||||
} catch (Exception e) {
|
||||
numCachingErrors.incrementAndGet();
|
||||
LOG.info("error adding block to cache after wait: {}. {}", data, e.getMessage());
|
||||
LOG.debug("error adding block to cache after wait: {}", data, e);
|
||||
String message = String.format("error adding block to cache after wait: %s", data);
|
||||
LOG.error(message, e);
|
||||
data.setDone();
|
||||
}
|
||||
|
||||
|
@ -566,7 +550,7 @@ public abstract class CachingBlockManager extends BlockManager {
|
|||
return;
|
||||
}
|
||||
|
||||
cache.put(blockNumber, buffer, conf, localDirAllocator);
|
||||
cache.put(blockNumber, buffer);
|
||||
}
|
||||
|
||||
private static class CachePutTask implements Supplier<Void> {
|
||||
|
|
|
@ -22,13 +22,8 @@ package org.apache.hadoop.fs.impl.prefetch;
|
|||
import java.util.Locale;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
||||
|
||||
/**
|
||||
* A FuturePool implementation backed by a java.util.concurrent.ExecutorService.
|
||||
*
|
||||
|
@ -42,8 +37,7 @@ import org.apache.hadoop.util.concurrent.HadoopExecutors;
|
|||
*
|
||||
*/
|
||||
public class ExecutorServiceFuturePool {
|
||||
|
||||
private final ExecutorService executor;
|
||||
private ExecutorService executor;
|
||||
|
||||
public ExecutorServiceFuturePool(ExecutorService executor) {
|
||||
this.executor = executor;
|
||||
|
@ -70,18 +64,6 @@ public class ExecutorServiceFuturePool {
|
|||
return (Future<Void>) executor.submit(r::run);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility to shutdown the {@link ExecutorService} used by this class. Will wait up to a
|
||||
* certain timeout for the ExecutorService to gracefully shutdown.
|
||||
*
|
||||
* @param logger Logger
|
||||
* @param timeout the maximum time to wait
|
||||
* @param unit the time unit of the timeout argument
|
||||
*/
|
||||
public void shutdown(Logger logger, long timeout, TimeUnit unit) {
|
||||
HadoopExecutors.shutdown(executor, logger, timeout, unit);
|
||||
}
|
||||
|
||||
public String toString() {
|
||||
return String.format(Locale.ROOT, "ExecutorServiceFuturePool(executor=%s)", executor);
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ public final class FilePosition {
|
|||
readOffset,
|
||||
"readOffset",
|
||||
startOffset,
|
||||
startOffset + bufferData.getBuffer().limit());
|
||||
startOffset + bufferData.getBuffer().limit() - 1);
|
||||
|
||||
data = bufferData;
|
||||
buffer = bufferData.getBuffer().duplicate();
|
||||
|
@ -182,7 +182,7 @@ public final class FilePosition {
|
|||
*/
|
||||
public boolean isWithinCurrentBuffer(long pos) {
|
||||
throwIfInvalidBuffer();
|
||||
long bufferEndOffset = bufferStartOffset + buffer.limit();
|
||||
long bufferEndOffset = bufferStartOffset + buffer.limit() - 1;
|
||||
return (pos >= bufferStartOffset) && (pos <= bufferEndOffset);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,9 +27,10 @@ import java.nio.channels.WritableByteChannel;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.nio.file.StandardOpenOption;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.nio.file.attribute.PosixFilePermission;
|
||||
import java.nio.file.attribute.PosixFilePermissions;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
|
@ -37,16 +38,10 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.LocalDirAllocator;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.apache.hadoop.fs.impl.prefetch.Validate.checkNotNull;
|
||||
|
||||
|
@ -72,22 +67,6 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
|
||||
private final PrefetchingStatistics prefetchingStatistics;
|
||||
|
||||
/**
|
||||
* Timeout to be used by close, while acquiring prefetch block write lock.
|
||||
*/
|
||||
private static final int PREFETCH_WRITE_LOCK_TIMEOUT = 5;
|
||||
|
||||
/**
|
||||
* Lock timeout unit to be used by the thread while acquiring prefetch block write lock.
|
||||
*/
|
||||
private static final TimeUnit PREFETCH_WRITE_LOCK_TIMEOUT_UNIT = TimeUnit.SECONDS;
|
||||
|
||||
/**
|
||||
* File attributes attached to any intermediate temporary file created during index creation.
|
||||
*/
|
||||
private static final Set<PosixFilePermission> TEMP_FILE_ATTRS =
|
||||
ImmutableSet.of(PosixFilePermission.OWNER_READ, PosixFilePermission.OWNER_WRITE);
|
||||
|
||||
/**
|
||||
* Cache entry.
|
||||
* Each block is stored as a separate file.
|
||||
|
@ -97,18 +76,12 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
private final Path path;
|
||||
private final int size;
|
||||
private final long checksum;
|
||||
private final ReentrantReadWriteLock lock;
|
||||
private enum LockType {
|
||||
READ,
|
||||
WRITE
|
||||
}
|
||||
|
||||
Entry(int blockNumber, Path path, int size, long checksum) {
|
||||
this.blockNumber = blockNumber;
|
||||
this.path = path;
|
||||
this.size = size;
|
||||
this.checksum = checksum;
|
||||
this.lock = new ReentrantReadWriteLock();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -117,54 +90,6 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
"([%03d] %s: size = %d, checksum = %d)",
|
||||
blockNumber, path, size, checksum);
|
||||
}
|
||||
|
||||
/**
|
||||
* Take the read or write lock.
|
||||
*
|
||||
* @param lockType type of the lock.
|
||||
*/
|
||||
private void takeLock(LockType lockType) {
|
||||
if (LockType.READ == lockType) {
|
||||
lock.readLock().lock();
|
||||
} else if (LockType.WRITE == lockType) {
|
||||
lock.writeLock().lock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Release the read or write lock.
|
||||
*
|
||||
* @param lockType type of the lock.
|
||||
*/
|
||||
private void releaseLock(LockType lockType) {
|
||||
if (LockType.READ == lockType) {
|
||||
lock.readLock().unlock();
|
||||
} else if (LockType.WRITE == lockType) {
|
||||
lock.writeLock().unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to take the read or write lock within the given timeout.
|
||||
*
|
||||
* @param lockType type of the lock.
|
||||
* @param timeout the time to wait for the given lock.
|
||||
* @param unit the time unit of the timeout argument.
|
||||
* @return true if the lock of the given lock type was acquired.
|
||||
*/
|
||||
private boolean takeLock(LockType lockType, long timeout, TimeUnit unit) {
|
||||
try {
|
||||
if (LockType.READ == lockType) {
|
||||
return lock.readLock().tryLock(timeout, unit);
|
||||
} else if (LockType.WRITE == lockType) {
|
||||
return lock.writeLock().tryLock(timeout, unit);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Thread interrupted while trying to acquire {} lock", lockType, e);
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -214,15 +139,11 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
checkNotNull(buffer, "buffer");
|
||||
|
||||
Entry entry = getEntry(blockNumber);
|
||||
entry.takeLock(Entry.LockType.READ);
|
||||
try {
|
||||
buffer.clear();
|
||||
readFile(entry.path, buffer);
|
||||
buffer.rewind();
|
||||
validateEntry(entry, buffer);
|
||||
} finally {
|
||||
entry.releaseLock(Entry.LockType.READ);
|
||||
}
|
||||
buffer.clear();
|
||||
readFile(entry.path, buffer);
|
||||
buffer.rewind();
|
||||
|
||||
validateEntry(entry, buffer);
|
||||
}
|
||||
|
||||
protected int readFile(Path path, ByteBuffer buffer) throws IOException {
|
||||
|
@ -251,17 +172,11 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
/**
|
||||
* Puts the given block in this cache.
|
||||
*
|
||||
* @param blockNumber the block number, used as a key for blocks map.
|
||||
* @param buffer buffer contents of the given block to be added to this cache.
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @throws IOException if either local dir allocator fails to allocate file or if IO error
|
||||
* occurs while writing the buffer content to the file.
|
||||
* @throws IllegalArgumentException if buffer is null, or if buffer.limit() is zero or negative.
|
||||
* @throws IllegalArgumentException if buffer is null.
|
||||
* @throws IllegalArgumentException if buffer.limit() is zero or negative.
|
||||
*/
|
||||
@Override
|
||||
public void put(int blockNumber, ByteBuffer buffer, Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) throws IOException {
|
||||
public void put(int blockNumber, ByteBuffer buffer) throws IOException {
|
||||
if (closed) {
|
||||
return;
|
||||
}
|
||||
|
@ -270,18 +185,13 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
|
||||
if (blocks.containsKey(blockNumber)) {
|
||||
Entry entry = blocks.get(blockNumber);
|
||||
entry.takeLock(Entry.LockType.READ);
|
||||
try {
|
||||
validateEntry(entry, buffer);
|
||||
} finally {
|
||||
entry.releaseLock(Entry.LockType.READ);
|
||||
}
|
||||
validateEntry(entry, buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
Validate.checkPositiveInteger(buffer.limit(), "buffer.limit()");
|
||||
|
||||
Path blockFilePath = getCacheFilePath(conf, localDirAllocator);
|
||||
Path blockFilePath = getCacheFilePath();
|
||||
long size = Files.size(blockFilePath);
|
||||
if (size != 0) {
|
||||
String message =
|
||||
|
@ -291,15 +201,10 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
}
|
||||
|
||||
writeFile(blockFilePath, buffer);
|
||||
prefetchingStatistics.blockAddedToFileCache();
|
||||
long checksum = BufferData.getChecksum(buffer);
|
||||
Entry entry = new Entry(blockNumber, blockFilePath, buffer.limit(), checksum);
|
||||
blocks.put(blockNumber, entry);
|
||||
// Update stream_read_blocks_in_cache stats only after blocks map is updated with new file
|
||||
// entry to avoid any discrepancy related to the value of stream_read_blocks_in_cache.
|
||||
// If stream_read_blocks_in_cache is updated before updating the blocks map here, closing of
|
||||
// the input stream can lead to the removal of the cache file even before blocks is added with
|
||||
// the new cache file, leading to incorrect value of stream_read_blocks_in_cache.
|
||||
prefetchingStatistics.blockAddedToFileCache();
|
||||
}
|
||||
|
||||
private static final Set<? extends OpenOption> CREATE_OPTIONS =
|
||||
|
@ -316,19 +221,8 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
writeChannel.close();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return temporary file created based on the file path retrieved from local dir allocator.
|
||||
*
|
||||
* @param conf The configuration object.
|
||||
* @param localDirAllocator Local dir allocator instance.
|
||||
* @return Path of the temporary file created.
|
||||
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
|
||||
* from local FS or file creation fails or permission set fails.
|
||||
*/
|
||||
protected Path getCacheFilePath(final Configuration conf,
|
||||
final LocalDirAllocator localDirAllocator)
|
||||
throws IOException {
|
||||
return getTempFilePath(conf, localDirAllocator);
|
||||
protected Path getCacheFilePath() throws IOException {
|
||||
return getTempFilePath();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -343,22 +237,12 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
int numFilesDeleted = 0;
|
||||
|
||||
for (Entry entry : blocks.values()) {
|
||||
boolean lockAcquired = entry.takeLock(Entry.LockType.WRITE, PREFETCH_WRITE_LOCK_TIMEOUT,
|
||||
PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
|
||||
if (!lockAcquired) {
|
||||
LOG.error("Cache file {} deletion would not be attempted as write lock could not"
|
||||
+ " be acquired within {} {}", entry.path, PREFETCH_WRITE_LOCK_TIMEOUT,
|
||||
PREFETCH_WRITE_LOCK_TIMEOUT_UNIT);
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
Files.deleteIfExists(entry.path);
|
||||
prefetchingStatistics.blockRemovedFromFileCache();
|
||||
numFilesDeleted++;
|
||||
} catch (IOException e) {
|
||||
LOG.debug("Failed to delete cache file {}", entry.path, e);
|
||||
} finally {
|
||||
entry.releaseLock(Entry.LockType.WRITE);
|
||||
// Ignore while closing so that we can delete as many cache files as possible.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -439,19 +323,9 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
|
||||
private static final String CACHE_FILE_PREFIX = "fs-cache-";
|
||||
|
||||
/**
|
||||
* Determine if the cache space is available on the local FS.
|
||||
*
|
||||
* @param fileSize The size of the file.
|
||||
* @param conf The configuration.
|
||||
* @param localDirAllocator Local dir allocator instance.
|
||||
* @return True if the given file size is less than the available free space on local FS,
|
||||
* False otherwise.
|
||||
*/
|
||||
public static boolean isCacheSpaceAvailable(long fileSize, Configuration conf,
|
||||
LocalDirAllocator localDirAllocator) {
|
||||
public static boolean isCacheSpaceAvailable(long fileSize) {
|
||||
try {
|
||||
Path cacheFilePath = getTempFilePath(conf, localDirAllocator);
|
||||
Path cacheFilePath = getTempFilePath();
|
||||
long freeSpace = new File(cacheFilePath.toString()).getUsableSpace();
|
||||
LOG.info("fileSize = {}, freeSpace = {}", fileSize, freeSpace);
|
||||
Files.deleteIfExists(cacheFilePath);
|
||||
|
@ -465,25 +339,16 @@ public class SingleFilePerBlockCache implements BlockCache {
|
|||
// The suffix (file extension) of each serialized index file.
|
||||
private static final String BINARY_FILE_SUFFIX = ".bin";
|
||||
|
||||
/**
|
||||
* Create temporary file based on the file path retrieved from local dir allocator
|
||||
* instance. The file is created with .bin suffix. The created file has been granted
|
||||
* posix file permissions available in TEMP_FILE_ATTRS.
|
||||
*
|
||||
* @param conf the configuration.
|
||||
* @param localDirAllocator the local dir allocator instance.
|
||||
* @return path of the file created.
|
||||
* @throws IOException if IO error occurs while local dir allocator tries to retrieve path
|
||||
* from local FS or file creation fails or permission set fails.
|
||||
*/
|
||||
private static Path getTempFilePath(final Configuration conf,
|
||||
final LocalDirAllocator localDirAllocator) throws IOException {
|
||||
org.apache.hadoop.fs.Path path =
|
||||
localDirAllocator.getLocalPathForWrite(CACHE_FILE_PREFIX, conf);
|
||||
File dir = new File(path.getParent().toUri().getPath());
|
||||
String prefix = path.getName();
|
||||
File tmpFile = File.createTempFile(prefix, BINARY_FILE_SUFFIX, dir);
|
||||
Path tmpFilePath = Paths.get(tmpFile.toURI());
|
||||
return Files.setPosixFilePermissions(tmpFilePath, TEMP_FILE_ATTRS);
|
||||
// File attributes attached to any intermediate temporary file created during index creation.
|
||||
private static final FileAttribute<Set<PosixFilePermission>> TEMP_FILE_ATTRS =
|
||||
PosixFilePermissions.asFileAttribute(EnumSet.of(PosixFilePermission.OWNER_READ,
|
||||
PosixFilePermission.OWNER_WRITE));
|
||||
|
||||
private static Path getTempFilePath() throws IOException {
|
||||
return Files.createTempFile(
|
||||
CACHE_FILE_PREFIX,
|
||||
BINARY_FILE_SUFFIX,
|
||||
TEMP_FILE_ATTRS
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Filesystem implementations that allow Hadoop to read directly from
|
||||
* the local file system.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.local;
|
||||
|
|
|
@ -333,24 +333,15 @@ class CopyCommands {
|
|||
*/
|
||||
public static class AppendToFile extends CommandWithDestination {
|
||||
public static final String NAME = "appendToFile";
|
||||
public static final String USAGE = "[-n] <localsrc> ... <dst>";
|
||||
public static final String USAGE = "<localsrc> ... <dst>";
|
||||
public static final String DESCRIPTION =
|
||||
"Appends the contents of all the given local files to the " +
|
||||
"given dst file. The dst file will be created if it does " +
|
||||
"not exist. If <localSrc> is -, then the input is read " +
|
||||
"from stdin. Option -n represents that use NEW_BLOCK create flag to append file.";
|
||||
"from stdin.";
|
||||
|
||||
private static final int DEFAULT_IO_LENGTH = 1024 * 1024;
|
||||
boolean readStdin = false;
|
||||
private boolean appendToNewBlock = false;
|
||||
|
||||
public boolean isAppendToNewBlock() {
|
||||
return appendToNewBlock;
|
||||
}
|
||||
|
||||
public void setAppendToNewBlock(boolean appendToNewBlock) {
|
||||
this.appendToNewBlock = appendToNewBlock;
|
||||
}
|
||||
|
||||
// commands operating on local paths have no need for glob expansion
|
||||
@Override
|
||||
|
@ -381,9 +372,6 @@ class CopyCommands {
|
|||
throw new IOException("missing destination argument");
|
||||
}
|
||||
|
||||
CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "n");
|
||||
cf.parse(args);
|
||||
appendToNewBlock = cf.getOpt("n");
|
||||
getRemoteDestination(args);
|
||||
super.processOptions(args);
|
||||
}
|
||||
|
@ -397,8 +385,7 @@ class CopyCommands {
|
|||
}
|
||||
|
||||
InputStream is = null;
|
||||
try (FSDataOutputStream fos = appendToNewBlock ?
|
||||
dst.fs.append(dst.path, true) : dst.fs.append(dst.path)) {
|
||||
try (FSDataOutputStream fos = dst.fs.append(dst.path)) {
|
||||
if (readStdin) {
|
||||
if (args.size() == 0) {
|
||||
IOUtils.copyBytes(System.in, fos, DEFAULT_IO_LENGTH);
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.io.DataInputBuffer;
|
|||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.SequenceFile;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
import org.apache.hadoop.io.compress.CompressionCodecFactory;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
@ -216,8 +217,8 @@ class Display extends FsCommand {
|
|||
|
||||
protected class TextRecordInputStream extends InputStream {
|
||||
SequenceFile.Reader r;
|
||||
Object key;
|
||||
Object val;
|
||||
Writable key;
|
||||
Writable val;
|
||||
|
||||
DataInputBuffer inbuf;
|
||||
DataOutputBuffer outbuf;
|
||||
|
@ -227,8 +228,10 @@ class Display extends FsCommand {
|
|||
final Configuration lconf = getConf();
|
||||
r = new SequenceFile.Reader(lconf,
|
||||
SequenceFile.Reader.file(fpath));
|
||||
key = ReflectionUtils.newInstance(r.getKeyClass(), lconf);
|
||||
val = ReflectionUtils.newInstance(r.getValueClass(), lconf);
|
||||
key = ReflectionUtils.newInstance(
|
||||
r.getKeyClass().asSubclass(Writable.class), lconf);
|
||||
val = ReflectionUtils.newInstance(
|
||||
r.getValueClass().asSubclass(Writable.class), lconf);
|
||||
inbuf = new DataInputBuffer();
|
||||
outbuf = new DataOutputBuffer();
|
||||
}
|
||||
|
@ -237,11 +240,8 @@ class Display extends FsCommand {
|
|||
public int read() throws IOException {
|
||||
int ret;
|
||||
if (null == inbuf || -1 == (ret = inbuf.read())) {
|
||||
key = r.next(key);
|
||||
if (key == null) {
|
||||
if (!r.next(key, val)) {
|
||||
return -1;
|
||||
} else {
|
||||
val = r.getCurrentValue(val);
|
||||
}
|
||||
byte[] tmp = key.toString().getBytes(StandardCharsets.UTF_8);
|
||||
outbuf.write(tmp, 0, tmp.length);
|
||||
|
|
|
@ -633,7 +633,7 @@ public class PathData implements Comparable<PathData> {
|
|||
return awaitFuture(fs.openFile(path)
|
||||
.opt(FS_OPTION_OPENFILE_READ_POLICY,
|
||||
policy)
|
||||
.optLong(FS_OPTION_OPENFILE_LENGTH,
|
||||
.opt(FS_OPTION_OPENFILE_LENGTH,
|
||||
stat.getLen()) // file length hint for object stores
|
||||
.build());
|
||||
}
|
||||
|
|
|
@ -15,10 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for the execution of a file system command.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.fs.shell;
|
||||
|
|
|
@ -20,8 +20,6 @@ package org.apache.hadoop.fs.statistics;
|
|||
|
||||
import org.apache.hadoop.fs.statistics.impl.IOStatisticsContextIntegration;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* An interface defined to capture thread-level IOStatistics by using per
|
||||
* thread context.
|
||||
|
@ -69,11 +67,7 @@ public interface IOStatisticsContext extends IOStatisticsSource {
|
|||
* @return instance of IOStatisticsContext for the context.
|
||||
*/
|
||||
static IOStatisticsContext getCurrentIOStatisticsContext() {
|
||||
// the null check is just a safety check to highlight exactly where a null value would
|
||||
// be returned if HADOOP-18456 has resurfaced.
|
||||
return requireNonNull(
|
||||
IOStatisticsContextIntegration.getCurrentIOStatisticsContext(),
|
||||
"Null IOStatisticsContext");
|
||||
return IOStatisticsContextIntegration.getCurrentIOStatisticsContext();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,14 +80,4 @@ public interface IOStatisticsContext extends IOStatisticsSource {
|
|||
IOStatisticsContextIntegration.setThreadIOStatisticsContext(
|
||||
statisticsContext);
|
||||
}
|
||||
|
||||
/**
|
||||
* Static probe to check if the thread-level IO statistics enabled.
|
||||
*
|
||||
* @return if the thread-level IO statistics enabled.
|
||||
*/
|
||||
static boolean enabled() {
|
||||
return IOStatisticsContextIntegration.isIOStatisticsThreadLevelEnabled();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.fs.statistics;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Setter for IOStatistics entries.
|
||||
* These operations have been in the read/write API
|
||||
* {@code IOStatisticsStore} since IOStatistics
|
||||
* was added; extracting into its own interface allows for
|
||||
* {@link IOStatisticsSnapshot} to also support it.
|
||||
* These are the simple setters, they don't provide for increments,
|
||||
* decrements, calculation of min/max/mean etc.
|
||||
* @since The interface and IOStatisticsSnapshot support was added <i>after</i> Hadoop 3.3.5
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public interface IOStatisticsSetters extends IOStatistics {
|
||||
|
||||
/**
|
||||
* Set a counter.
|
||||
*
|
||||
* No-op if the counter is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setCounter(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a gauge.
|
||||
*
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setGauge(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a maximum.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMaximum(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a minimum.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMinimum(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a mean statistic to a given value.
|
||||
* @param key statistic key
|
||||
* @param value new value.
|
||||
*/
|
||||
void setMeanStatistic(String key, MeanStatistic value);
|
||||
}
|
|
@ -62,8 +62,7 @@ import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.snapshotM
|
|||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public final class IOStatisticsSnapshot
|
||||
implements IOStatistics, Serializable, IOStatisticsAggregator,
|
||||
IOStatisticsSetters {
|
||||
implements IOStatistics, Serializable, IOStatisticsAggregator {
|
||||
|
||||
private static final long serialVersionUID = -1762522703841538084L;
|
||||
|
||||
|
@ -223,33 +222,6 @@ public final class IOStatisticsSnapshot
|
|||
return meanStatistics;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setCounter(final String key, final long value) {
|
||||
counters().put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setGauge(final String key, final long value) {
|
||||
gauges().put(key, value);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setMaximum(final String key, final long value) {
|
||||
maximums().put(key, value);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void setMinimum(final String key, final long value) {
|
||||
minimums().put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMeanStatistic(final String key, final MeanStatistic value) {
|
||||
meanStatistics().put(key, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return ioStatisticsToString(this);
|
||||
|
|
|
@ -47,7 +47,7 @@ public final class StreamStatisticNames {
|
|||
public static final String STREAM_READ_ABORTED = "stream_aborted";
|
||||
|
||||
/**
|
||||
* Bytes read from an input stream in read()/readVectored() calls.
|
||||
* Bytes read from an input stream in read() calls.
|
||||
* Does not include bytes read and then discarded in seek/close etc.
|
||||
* These are the bytes returned to the caller.
|
||||
* Value: {@value}.
|
||||
|
@ -110,34 +110,6 @@ public final class StreamStatisticNames {
|
|||
public static final String STREAM_READ_OPERATIONS =
|
||||
"stream_read_operations";
|
||||
|
||||
/**
|
||||
* Count of readVectored() operations in an input stream.
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_OPERATIONS =
|
||||
"stream_read_vectored_operations";
|
||||
|
||||
/**
|
||||
* Count of bytes discarded during readVectored() operation
|
||||
* in an input stream.
|
||||
* Value: {@value}.
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_READ_BYTES_DISCARDED =
|
||||
"stream_read_vectored_read_bytes_discarded";
|
||||
|
||||
/**
|
||||
* Count of incoming file ranges during readVectored() operation.
|
||||
* Value: {@value}
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_INCOMING_RANGES =
|
||||
"stream_read_vectored_incoming_ranges";
|
||||
/**
|
||||
* Count of combined file ranges during readVectored() operation.
|
||||
* Value: {@value}
|
||||
*/
|
||||
public static final String STREAM_READ_VECTORED_COMBINED_RANGES =
|
||||
"stream_read_vectored_combined_ranges";
|
||||
|
||||
/**
|
||||
* Count of incomplete read() operations in an input stream,
|
||||
* that is, when the bytes returned were less than that requested.
|
||||
|
|
|
@ -29,8 +29,8 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.fs.impl.WeakReferenceThreadMap;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsContext;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_THREAD_LEVEL_ENABLED;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.IOSTATISTICS_THREAD_LEVEL_ENABLED_DEFAULT;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.THREAD_LEVEL_IOSTATISTICS_ENABLED;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.THREAD_LEVEL_IOSTATISTICS_ENABLED_DEFAULT;
|
||||
|
||||
/**
|
||||
* A Utility class for IOStatisticsContext, which helps in creating and
|
||||
|
@ -76,17 +76,8 @@ public final class IOStatisticsContextIntegration {
|
|||
// Work out if the current context has thread level IOStatistics enabled.
|
||||
final Configuration configuration = new Configuration();
|
||||
isThreadIOStatsEnabled =
|
||||
configuration.getBoolean(IOSTATISTICS_THREAD_LEVEL_ENABLED,
|
||||
IOSTATISTICS_THREAD_LEVEL_ENABLED_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Static probe to check if the thread-level IO statistics enabled.
|
||||
*
|
||||
* @return if the thread-level IO statistics enabled.
|
||||
*/
|
||||
public static boolean isIOStatisticsThreadLevelEnabled() {
|
||||
return isThreadIOStatsEnabled;
|
||||
configuration.getBoolean(THREAD_LEVEL_IOSTATISTICS_ENABLED,
|
||||
THREAD_LEVEL_IOSTATISTICS_ENABLED_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -100,10 +91,7 @@ public final class IOStatisticsContextIntegration {
|
|||
* @return an instance of IOStatisticsContext.
|
||||
*/
|
||||
private static IOStatisticsContext createNewInstance(Long key) {
|
||||
IOStatisticsContextImpl instance =
|
||||
new IOStatisticsContextImpl(key, INSTANCE_ID.getAndIncrement());
|
||||
LOG.debug("Created instance {}", instance);
|
||||
return instance;
|
||||
return new IOStatisticsContextImpl(key, INSTANCE_ID.getAndIncrement());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -134,11 +122,9 @@ public final class IOStatisticsContextIntegration {
|
|||
IOStatisticsContext statisticsContext) {
|
||||
if (isThreadIOStatsEnabled) {
|
||||
if (statisticsContext == null) {
|
||||
// new value is null, so remove it
|
||||
ACTIVE_IOSTATS_CONTEXT.removeForCurrentThread();
|
||||
} else {
|
||||
// the setter is efficient in that it does not create a new
|
||||
// reference if the context is unchanged.
|
||||
}
|
||||
if (ACTIVE_IOSTATS_CONTEXT.getForCurrentThread() != statisticsContext) {
|
||||
ACTIVE_IOSTATS_CONTEXT.setForCurrentThread(statisticsContext);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
import org.apache.hadoop.fs.statistics.IOStatistics;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsAggregator;
|
||||
import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
|
||||
import org.apache.hadoop.fs.statistics.IOStatisticsSetters;
|
||||
import org.apache.hadoop.fs.statistics.MeanStatistic;
|
||||
|
||||
/**
|
||||
|
@ -32,7 +31,6 @@ import org.apache.hadoop.fs.statistics.MeanStatistic;
|
|||
* use in classes which track statistics for reporting.
|
||||
*/
|
||||
public interface IOStatisticsStore extends IOStatistics,
|
||||
IOStatisticsSetters,
|
||||
IOStatisticsAggregator,
|
||||
DurationTrackerFactory {
|
||||
|
||||
|
@ -58,6 +56,24 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
long incrementCounter(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a counter.
|
||||
*
|
||||
* No-op if the counter is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setCounter(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a gauge.
|
||||
*
|
||||
* No-op if the gauge is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setGauge(String key, long value);
|
||||
|
||||
/**
|
||||
* Increment a gauge.
|
||||
* <p>
|
||||
|
@ -69,6 +85,14 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
long incrementGauge(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a maximum.
|
||||
* No-op if the maximum is unknown.
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMaximum(String key, long value);
|
||||
|
||||
/**
|
||||
* Increment a maximum.
|
||||
* <p>
|
||||
|
@ -80,6 +104,16 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
long incrementMaximum(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a minimum.
|
||||
* <p>
|
||||
* No-op if the minimum is unknown.
|
||||
* </p>
|
||||
* @param key statistics key
|
||||
* @param value value to set
|
||||
*/
|
||||
void setMinimum(String key, long value);
|
||||
|
||||
/**
|
||||
* Increment a minimum.
|
||||
* <p>
|
||||
|
@ -113,6 +147,16 @@ public interface IOStatisticsStore extends IOStatistics,
|
|||
*/
|
||||
void addMaximumSample(String key, long value);
|
||||
|
||||
/**
|
||||
* Set a mean statistic to a given value.
|
||||
* <p>
|
||||
* No-op if the key is unknown.
|
||||
* </p>
|
||||
* @param key statistic key
|
||||
* @param value new value.
|
||||
*/
|
||||
void setMeanStatistic(String key, MeanStatistic value);
|
||||
|
||||
/**
|
||||
* Add a sample to the mean statistics.
|
||||
* <p>
|
||||
|
|
|
@ -67,17 +67,6 @@ public interface IOStatisticsStoreBuilder {
|
|||
IOStatisticsStoreBuilder withDurationTracking(
|
||||
String... prefixes);
|
||||
|
||||
/**
|
||||
* A value which is tracked with counter/min/max/mean.
|
||||
* Similar to {@link #withDurationTracking(String...)}
|
||||
* but without the failure option and with the same name
|
||||
* across all categories.
|
||||
* @param prefixes prefixes to add.
|
||||
* @return the builder
|
||||
*/
|
||||
IOStatisticsStoreBuilder withSampleTracking(
|
||||
String... prefixes);
|
||||
|
||||
/**
|
||||
* Build the collector.
|
||||
* @return a new collector.
|
||||
|
|
|
@ -92,18 +92,6 @@ final class IOStatisticsStoreBuilderImpl implements
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IOStatisticsStoreBuilderImpl withSampleTracking(
|
||||
final String... prefixes) {
|
||||
for (String p : prefixes) {
|
||||
withCounters(p);
|
||||
withMinimums(p);
|
||||
withMaximums(p);
|
||||
withMeanStatistics(p);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IOStatisticsStore build() {
|
||||
return new IOStatisticsStoreImpl(counters, gauges, minimums,
|
||||
|
|
|
@ -190,7 +190,7 @@ final class IOStatisticsStoreImpl extends WrappedIOStatistics
|
|||
return counter.get();
|
||||
} else {
|
||||
long l = incAtomicLong(counter, value);
|
||||
LOG.trace("Incrementing counter {} by {} with final value {}",
|
||||
LOG.debug("Incrementing counter {} by {} with final value {}",
|
||||
key, value, l);
|
||||
return l;
|
||||
}
|
||||
|
|
|
@ -144,7 +144,7 @@ public final class HttpServer2 implements FilterContainer {
|
|||
|
||||
public static final String HTTP_SOCKET_BACKLOG_SIZE_KEY =
|
||||
"hadoop.http.socket.backlog.size";
|
||||
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 500;
|
||||
public static final int HTTP_SOCKET_BACKLOG_SIZE_DEFAULT = 128;
|
||||
public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
|
||||
public static final String HTTP_ACCEPTOR_COUNT_KEY =
|
||||
"hadoop.http.acceptor.count";
|
||||
|
@ -497,12 +497,7 @@ public final class HttpServer2 implements FilterContainer {
|
|||
prefix -> this.conf.get(prefix + "type")
|
||||
.equals(PseudoAuthenticationHandler.TYPE))
|
||||
) {
|
||||
server.initSpnego(
|
||||
conf,
|
||||
hostName,
|
||||
getFilterProperties(conf, authFilterConfigurationPrefixes),
|
||||
usernameConfKey,
|
||||
keytabConfKey);
|
||||
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
|
||||
}
|
||||
|
||||
for (URI ep : endpoints) {
|
||||
|
@ -1345,12 +1340,8 @@ public final class HttpServer2 implements FilterContainer {
|
|||
}
|
||||
|
||||
private void initSpnego(Configuration conf, String hostName,
|
||||
Properties authFilterConfigurationPrefixes, String usernameConfKey, String keytabConfKey)
|
||||
throws IOException {
|
||||
String usernameConfKey, String keytabConfKey) throws IOException {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
for (Map.Entry<Object, Object> entry : authFilterConfigurationPrefixes.entrySet()) {
|
||||
params.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue()));
|
||||
}
|
||||
String principalInConf = conf.get(usernameConfKey);
|
||||
if (principalInConf != null && !principalInConf.isEmpty()) {
|
||||
params.put("kerberos.principal", SecurityUtil.getServerPrincipal(
|
||||
|
@ -1976,8 +1967,4 @@ public final class HttpServer2 implements FilterContainer {
|
|||
return metrics;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
List<ServerConnector> getListeners() {
|
||||
return listeners;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,10 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Support for embedded HTTP services.
|
||||
*/
|
||||
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.http;
|
||||
|
|
|
@ -158,9 +158,6 @@ public class DefaultStringifier<T> implements Stringifier<T> {
|
|||
public static <K> void storeArray(Configuration conf, K[] items,
|
||||
String keyName) throws IOException {
|
||||
|
||||
if (items.length == 0) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
|
||||
GenericsUtil.getClass(items[0]));
|
||||
try {
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.nio.file.StandardOpenOption;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -245,6 +246,30 @@ public class IOUtils {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
*
|
||||
* @param log the log to record problems to at debug level. Can be null.
|
||||
* @param closeables the objects to close
|
||||
* @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)}
|
||||
* instead
|
||||
*/
|
||||
@Deprecated
|
||||
public static void cleanup(Log log, java.io.Closeable... closeables) {
|
||||
for (java.io.Closeable c : closeables) {
|
||||
if (c != null) {
|
||||
try {
|
||||
c.close();
|
||||
} catch(Throwable e) {
|
||||
if (log != null && log.isDebugEnabled()) {
|
||||
log.debug("Exception in closing " + c, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Closeable objects and <b>ignore</b> any {@link Throwable} or
|
||||
* null pointers. Must only be used for cleanup in exception handlers.
|
||||
|
|
|
@ -2006,7 +2006,7 @@ public class SequenceFile {
|
|||
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
|
||||
.opt(FS_OPTION_OPENFILE_BUFFER_SIZE, bufferSize);
|
||||
if (length >= 0) {
|
||||
builder.optLong(FS_OPTION_OPENFILE_LENGTH, length);
|
||||
builder.opt(FS_OPTION_OPENFILE_LENGTH, length);
|
||||
}
|
||||
return awaitFuture(builder.build());
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class WritableName {
|
|||
) throws IOException {
|
||||
Class<?> writableClass = NAME_TO_CLASS.get(name);
|
||||
if (writableClass != null)
|
||||
return writableClass;
|
||||
return writableClass.asSubclass(Writable.class);
|
||||
try {
|
||||
return conf.getClassByName(name);
|
||||
} catch (ClassNotFoundException e) {
|
||||
|
|
|
@ -335,7 +335,6 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
|
|||
private boolean isSubHeaderStripped = false;
|
||||
private READ_MODE readMode = READ_MODE.CONTINUOUS;
|
||||
private long startingPos = 0L;
|
||||
private boolean didInitialRead;
|
||||
|
||||
// Following state machine handles different states of compressed stream
|
||||
// position
|
||||
|
@ -481,42 +480,24 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
|
|||
*/
|
||||
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
if (b == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
if (off < 0 || len < 0 || len > b.length - off) {
|
||||
throw new IndexOutOfBoundsException();
|
||||
}
|
||||
if (len == 0) {
|
||||
return 0;
|
||||
}
|
||||
if (needsReset) {
|
||||
internalReset();
|
||||
}
|
||||
// When startingPos > 0, the stream should be initialized at the end of
|
||||
// one block (which would correspond to be the start of another block).
|
||||
// Thus, the initial read would technically be reading one byte passed a
|
||||
// BZip2 end of block marker. To be consistent, we should also be
|
||||
// updating the position to be one byte after the end of an block on the
|
||||
// initial read.
|
||||
boolean initializedAtEndOfBlock =
|
||||
!didInitialRead && startingPos > 0 && readMode == READ_MODE.BYBLOCK;
|
||||
int result = initializedAtEndOfBlock
|
||||
? BZip2Constants.END_OF_BLOCK
|
||||
: this.input.read(b, off, len);
|
||||
|
||||
int result = 0;
|
||||
result = this.input.read(b, off, len);
|
||||
if (result == BZip2Constants.END_OF_BLOCK) {
|
||||
this.posSM = POS_ADVERTISEMENT_STATE_MACHINE.ADVERTISE;
|
||||
}
|
||||
|
||||
if (this.posSM == POS_ADVERTISEMENT_STATE_MACHINE.ADVERTISE) {
|
||||
result = this.input.read(b, off, 1);
|
||||
result = this.input.read(b, off, off + 1);
|
||||
// This is the precise time to update compressed stream position
|
||||
// to the client of this code.
|
||||
this.updatePos(true);
|
||||
this.posSM = POS_ADVERTISEMENT_STATE_MACHINE.HOLD;
|
||||
}
|
||||
|
||||
didInitialRead = true;
|
||||
return result;
|
||||
|
||||
}
|
||||
|
@ -532,7 +513,6 @@ public class BZip2Codec implements Configurable, SplittableCompressionCodec {
|
|||
needsReset = false;
|
||||
BufferedInputStream bufferedIn = readStreamHeader();
|
||||
input = new CBZip2InputStream(bufferedIn, this.readMode);
|
||||
didInitialRead = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -205,7 +205,6 @@ public class CodecPool {
|
|||
}
|
||||
// if the compressor can't be reused, don't pool it.
|
||||
if (compressor.getClass().isAnnotationPresent(DoNotPool.class)) {
|
||||
compressor.end();
|
||||
return;
|
||||
}
|
||||
compressor.reset();
|
||||
|
@ -226,7 +225,6 @@ public class CodecPool {
|
|||
}
|
||||
// if the decompressor can't be reused, don't pool it.
|
||||
if (decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
|
||||
decompressor.end();
|
||||
return;
|
||||
}
|
||||
decompressor.reset();
|
||||
|
|
|
@ -15,11 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression for the BZip2
|
||||
* compression algorithm.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.bzip2;
|
||||
|
|
|
@ -15,13 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression for the LZ4
|
||||
* compression algorithm.
|
||||
*
|
||||
* @see <a href="http://code.google.com/p/lz4/">LZ4</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.lz4;
|
||||
|
|
|
@ -15,13 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression for the Snappy
|
||||
* compression algorithm.
|
||||
*
|
||||
* @see <a href="http://code.google.com/p/snappy/">Snappy</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.snappy;
|
||||
|
|
|
@ -24,7 +24,6 @@ import java.util.zip.Deflater;
|
|||
import java.util.zip.GZIPOutputStream;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.compress.AlreadyClosedException;
|
||||
import org.apache.hadoop.io.compress.Compressor;
|
||||
import org.apache.hadoop.io.compress.DoNotPool;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
@ -84,10 +83,6 @@ public class BuiltInGzipCompressor implements Compressor {
|
|||
throw new IOException("compress called on finished compressor");
|
||||
}
|
||||
|
||||
if (state == BuiltInGzipDecompressor.GzipStateLabel.ENDED) {
|
||||
throw new AlreadyClosedException("compress called on closed compressor");
|
||||
}
|
||||
|
||||
int compressedBytesWritten = 0;
|
||||
|
||||
// If we are not within uncompressed data yet, output the header.
|
||||
|
@ -144,8 +139,6 @@ public class BuiltInGzipCompressor implements Compressor {
|
|||
@Override
|
||||
public void end() {
|
||||
deflater.end();
|
||||
|
||||
state = BuiltInGzipDecompressor.GzipStateLabel.ENDED;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -23,7 +23,6 @@ import java.util.zip.Checksum;
|
|||
import java.util.zip.DataFormatException;
|
||||
import java.util.zip.Inflater;
|
||||
|
||||
import org.apache.hadoop.io.compress.AlreadyClosedException;
|
||||
import org.apache.hadoop.io.compress.Decompressor;
|
||||
import org.apache.hadoop.io.compress.DoNotPool;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
@ -110,11 +109,7 @@ public class BuiltInGzipDecompressor implements Decompressor {
|
|||
* Immediately after the trailer (and potentially prior to the next gzip
|
||||
* member/substream header), without reset() having been called.
|
||||
*/
|
||||
FINISHED,
|
||||
/**
|
||||
* Immediately after end() has been called.
|
||||
*/
|
||||
ENDED;
|
||||
FINISHED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -191,10 +186,6 @@ public class BuiltInGzipDecompressor implements Decompressor {
|
|||
throws IOException {
|
||||
int numAvailBytes = 0;
|
||||
|
||||
if (state == GzipStateLabel.ENDED) {
|
||||
throw new AlreadyClosedException("decompress called on closed decompressor");
|
||||
}
|
||||
|
||||
if (state != GzipStateLabel.DEFLATE_STREAM) {
|
||||
executeHeaderState();
|
||||
|
||||
|
@ -485,8 +476,6 @@ public class BuiltInGzipDecompressor implements Decompressor {
|
|||
@Override
|
||||
public synchronized void end() {
|
||||
inflater.end();
|
||||
|
||||
state = GzipStateLabel.ENDED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -15,13 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression based on the popular
|
||||
* gzip compressed file format.
|
||||
*
|
||||
* @see <a href="http://www.gzip.org/">gzip</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.zlib;
|
||||
|
|
|
@ -15,13 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Implementation of compression/decompression based on the zStandard
|
||||
* compression algorithm.
|
||||
*
|
||||
* @see <a href="https://github.com/facebook/zstd">zStandard</a>
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.compress.zstd;
|
||||
|
|
|
@ -78,11 +78,6 @@ public final class CodecUtil {
|
|||
public static final String IO_ERASURECODE_CODEC_XOR_RAWCODERS_KEY =
|
||||
IO_ERASURECODE_CODEC + "xor.rawcoders";
|
||||
|
||||
public static final String IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY =
|
||||
IO_ERASURECODE_CODEC + "native.enabled";
|
||||
|
||||
public static final boolean IO_ERASURECODE_CODEC_NATIVE_ENABLED_DEFAULT = true;
|
||||
|
||||
private CodecUtil() { }
|
||||
|
||||
/**
|
||||
|
@ -175,14 +170,8 @@ public final class CodecUtil {
|
|||
|
||||
private static RawErasureEncoder createRawEncoderWithFallback(
|
||||
Configuration conf, String codecName, ErasureCoderOptions coderOptions) {
|
||||
boolean nativeEncoderEnabled = conf.getBoolean(IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY,
|
||||
IO_ERASURECODE_CODEC_NATIVE_ENABLED_DEFAULT);
|
||||
String[] rawCoderNames = getRawCoderNames(conf, codecName);
|
||||
for (String rawCoderName : rawCoderNames) {
|
||||
if (!nativeEncoderEnabled && rawCoderName.contains("native")) {
|
||||
LOG.debug("Disable the encoder with ISA-L.");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
if (rawCoderName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(
|
||||
|
@ -203,14 +192,8 @@ public final class CodecUtil {
|
|||
|
||||
private static RawErasureDecoder createRawDecoderWithFallback(
|
||||
Configuration conf, String codecName, ErasureCoderOptions coderOptions) {
|
||||
boolean nativeDecoderEnabled = conf.getBoolean(IO_ERASURECODE_CODEC_NATIVE_ENABLED_KEY,
|
||||
IO_ERASURECODE_CODEC_NATIVE_ENABLED_DEFAULT);
|
||||
String[] coders = getRawCoderNames(conf, codecName);
|
||||
for (String rawCoderName : coders) {
|
||||
if (!nativeDecoderEnabled && rawCoderName.contains("native")) {
|
||||
LOG.debug("Disable the decoder with ISA-L.");
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
if (rawCoderName != null) {
|
||||
RawErasureCoderFactory fact = createRawCoderFactory(
|
||||
|
|
|
@ -15,12 +15,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Various native IO-related calls not available in Java. These
|
||||
* functions should generally be used alongside a fallback to another
|
||||
* more portable mechanism.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
package org.apache.hadoop.io.nativeio;
|
||||
|
|
|
@ -46,7 +46,7 @@ public interface AlignmentContext {
|
|||
void updateResponseState(RpcResponseHeaderProto.Builder header);
|
||||
|
||||
/**
|
||||
* This is the intended client method call to implement to receive state info
|
||||
* This is the intended client method call to implement to recieve state info
|
||||
* during RPC response processing.
|
||||
*
|
||||
* @param header The RPC response header.
|
||||
|
|
|
@ -49,8 +49,7 @@ public final class CallerContext {
|
|||
public static final String CLIENT_PORT_STR = "clientPort";
|
||||
public static final String CLIENT_ID_STR = "clientId";
|
||||
public static final String CLIENT_CALL_ID_STR = "clientCallId";
|
||||
public static final String REAL_USER_STR = "realUser";
|
||||
public static final String PROXY_USER_PORT = "proxyUserPort";
|
||||
|
||||
/** The caller context.
|
||||
*
|
||||
* It will be truncated if it exceeds the maximum allowed length in
|
||||
|
@ -141,8 +140,12 @@ public final class CallerContext {
|
|||
}
|
||||
|
||||
public Builder(String context, Configuration conf) {
|
||||
this(context, conf.get(HADOOP_CALLER_CONTEXT_SEPARATOR_KEY,
|
||||
HADOOP_CALLER_CONTEXT_SEPARATOR_DEFAULT));
|
||||
if (isValid(context)) {
|
||||
sb.append(context);
|
||||
}
|
||||
fieldSeparator = conf.get(HADOOP_CALLER_CONTEXT_SEPARATOR_KEY,
|
||||
HADOOP_CALLER_CONTEXT_SEPARATOR_DEFAULT);
|
||||
checkFieldSeparator(fieldSeparator);
|
||||
}
|
||||
|
||||
public Builder(String context, String separator) {
|
||||
|
|
|
@ -18,10 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.ipc;
|
||||
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.classification.VisibleForTesting;
|
||||
import org.apache.hadoop.util.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
@ -124,28 +124,12 @@ public class Client implements AutoCloseable {
|
|||
Preconditions.checkArgument(cid != RpcConstants.INVALID_CALL_ID);
|
||||
Preconditions.checkState(callId.get() == null);
|
||||
Preconditions.checkArgument(rc != RpcConstants.INVALID_RETRY_COUNT);
|
||||
setCallIdAndRetryCountUnprotected(cid, rc, externalHandler);
|
||||
}
|
||||
|
||||
public static void setCallIdAndRetryCountUnprotected(Integer cid, int rc,
|
||||
Object externalHandler) {
|
||||
callId.set(cid);
|
||||
retryCount.set(rc);
|
||||
EXTERNAL_CALL_HANDLER.set(externalHandler);
|
||||
}
|
||||
|
||||
public static int getCallId() {
|
||||
return callId.get() != null ? callId.get() : nextCallId();
|
||||
}
|
||||
|
||||
public static int getRetryCount() {
|
||||
return retryCount.get() != null ? retryCount.get() : 0;
|
||||
}
|
||||
|
||||
public static Object getExternalHandler() {
|
||||
return EXTERNAL_CALL_HANDLER.get();
|
||||
}
|
||||
|
||||
private final ConcurrentMap<ConnectionId, Connection> connections =
|
||||
new ConcurrentHashMap<>();
|
||||
private final Object putLock = new Object();
|
||||
|
@ -166,6 +150,73 @@ public class Client implements AutoCloseable {
|
|||
private final int maxAsyncCalls;
|
||||
private final AtomicInteger asyncCallCounter = new AtomicInteger(0);
|
||||
|
||||
/**
|
||||
* Executor on which IPC calls' parameters are sent.
|
||||
* Deferring the sending of parameters to a separate
|
||||
* thread isolates them from thread interruptions in the
|
||||
* calling code.
|
||||
*/
|
||||
private final ExecutorService sendParamsExecutor;
|
||||
private final static ClientExecutorServiceFactory clientExcecutorFactory =
|
||||
new ClientExecutorServiceFactory();
|
||||
|
||||
private static class ClientExecutorServiceFactory {
|
||||
private int executorRefCount = 0;
|
||||
private ExecutorService clientExecutor = null;
|
||||
|
||||
/**
|
||||
* Get Executor on which IPC calls' parameters are sent.
|
||||
* If the internal reference counter is zero, this method
|
||||
* creates the instance of Executor. If not, this method
|
||||
* just returns the reference of clientExecutor.
|
||||
*
|
||||
* @return An ExecutorService instance
|
||||
*/
|
||||
synchronized ExecutorService refAndGetInstance() {
|
||||
if (executorRefCount == 0) {
|
||||
clientExecutor = Executors.newCachedThreadPool(
|
||||
new ThreadFactoryBuilder()
|
||||
.setDaemon(true)
|
||||
.setNameFormat("IPC Parameter Sending Thread #%d")
|
||||
.build());
|
||||
}
|
||||
executorRefCount++;
|
||||
|
||||
return clientExecutor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cleanup Executor on which IPC calls' parameters are sent.
|
||||
* If reference counter is zero, this method discards the
|
||||
* instance of the Executor. If not, this method
|
||||
* just decrements the internal reference counter.
|
||||
*
|
||||
* @return An ExecutorService instance if it exists.
|
||||
* Null is returned if not.
|
||||
*/
|
||||
synchronized ExecutorService unrefAndCleanup() {
|
||||
executorRefCount--;
|
||||
assert(executorRefCount >= 0);
|
||||
|
||||
if (executorRefCount == 0) {
|
||||
clientExecutor.shutdown();
|
||||
try {
|
||||
if (!clientExecutor.awaitTermination(1, TimeUnit.MINUTES)) {
|
||||
clientExecutor.shutdownNow();
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("Interrupted while waiting for clientExecutor" +
|
||||
" to stop");
|
||||
clientExecutor.shutdownNow();
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
clientExecutor = null;
|
||||
}
|
||||
|
||||
return clientExecutor;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* set the ping interval value in configuration
|
||||
*
|
||||
|
@ -234,6 +285,11 @@ public class Client implements AutoCloseable {
|
|||
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static final ExecutorService getClientExecutor() {
|
||||
return Client.clientExcecutorFactory.clientExecutor;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment this client's reference count
|
||||
*/
|
||||
|
@ -363,7 +419,7 @@ public class Client implements AutoCloseable {
|
|||
* socket: responses may be delivered out of order. */
|
||||
private class Connection extends Thread {
|
||||
private InetSocketAddress server; // server ip:port
|
||||
private final ConnectionId remoteId; // connection id
|
||||
private final ConnectionId remoteId; // connection id
|
||||
private AuthMethod authMethod; // authentication method
|
||||
private AuthProtocol authProtocol;
|
||||
private int serviceClass;
|
||||
|
@ -391,9 +447,7 @@ public class Client implements AutoCloseable {
|
|||
private AtomicBoolean shouldCloseConnection = new AtomicBoolean(); // indicate if the connection is closed
|
||||
private IOException closeException; // close reason
|
||||
|
||||
private final Thread rpcRequestThread;
|
||||
private final SynchronousQueue<Pair<Call, ResponseBuffer>> rpcRequestQueue =
|
||||
new SynchronousQueue<>(true);
|
||||
private final Object sendRpcRequestLock = new Object();
|
||||
|
||||
private AtomicReference<Thread> connectingThread = new AtomicReference<>();
|
||||
private final Consumer<Connection> removeMethod;
|
||||
|
@ -402,9 +456,6 @@ public class Client implements AutoCloseable {
|
|||
Consumer<Connection> removeMethod) {
|
||||
this.remoteId = remoteId;
|
||||
this.server = remoteId.getAddress();
|
||||
this.rpcRequestThread = new Thread(new RpcRequestSender(),
|
||||
"IPC Parameter Sending Thread for " + remoteId);
|
||||
this.rpcRequestThread.setDaemon(true);
|
||||
|
||||
this.maxResponseLength = remoteId.conf.getInt(
|
||||
CommonConfigurationKeys.IPC_MAXIMUM_RESPONSE_LENGTH,
|
||||
|
@ -590,12 +641,10 @@ public class Client implements AutoCloseable {
|
|||
InetSocketAddress currentAddr = NetUtils.createSocketAddrForHost(
|
||||
server.getHostName(), server.getPort());
|
||||
|
||||
if (!currentAddr.isUnresolved() && !server.equals(currentAddr)) {
|
||||
LOG.warn("Address change detected. Old: {} New: {}", server, currentAddr);
|
||||
if (!server.equals(currentAddr)) {
|
||||
LOG.warn("Address change detected. Old: " + server.toString() +
|
||||
" New: " + currentAddr.toString());
|
||||
server = currentAddr;
|
||||
// Update the remote address so that reconnections are with the updated address.
|
||||
// This avoids thrashing.
|
||||
remoteId.setAddress(currentAddr);
|
||||
UserGroupInformation ticket = remoteId.getTicket();
|
||||
this.setName("IPC Client (" + socketFactory.hashCode()
|
||||
+ ") connection to " + server.toString() + " from "
|
||||
|
@ -703,7 +752,7 @@ public class Client implements AutoCloseable {
|
|||
* handle that, a relogin is attempted.
|
||||
*/
|
||||
private synchronized void handleSaslConnectionFailure(
|
||||
final int currRetries, final int maxRetries, final IOException ex,
|
||||
final int currRetries, final int maxRetries, final Exception ex,
|
||||
final Random rand, final UserGroupInformation ugi) throws IOException,
|
||||
InterruptedException {
|
||||
ugi.doAs(new PrivilegedExceptionAction<Object>() {
|
||||
|
@ -714,7 +763,10 @@ public class Client implements AutoCloseable {
|
|||
disposeSasl();
|
||||
if (shouldAuthenticateOverKrb()) {
|
||||
if (currRetries < maxRetries) {
|
||||
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug("Exception encountered while connecting to "
|
||||
+ "the server : " + ex);
|
||||
}
|
||||
// try re-login
|
||||
if (UserGroupInformation.isLoginKeytabBased()) {
|
||||
UserGroupInformation.getLoginUser().reloginFromKeytab();
|
||||
|
@ -732,11 +784,7 @@ public class Client implements AutoCloseable {
|
|||
+ UserGroupInformation.getLoginUser().getUserName() + " to "
|
||||
+ remoteId;
|
||||
LOG.warn(msg, ex);
|
||||
throw NetUtils.wrapException(remoteId.getAddress().getHostName(),
|
||||
remoteId.getAddress().getPort(),
|
||||
NetUtils.getHostname(),
|
||||
0,
|
||||
ex);
|
||||
throw (IOException) new IOException(msg).initCause(ex);
|
||||
}
|
||||
} else {
|
||||
// With RequestHedgingProxyProvider, one rpc call will send multiple
|
||||
|
@ -744,9 +792,11 @@ public class Client implements AutoCloseable {
|
|||
// all other requests will be interrupted. It's not a big problem,
|
||||
// and should not print a warning log.
|
||||
if (ex instanceof InterruptedIOException) {
|
||||
LOG.debug("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||
LOG.debug("Exception encountered while connecting to the server",
|
||||
ex);
|
||||
} else {
|
||||
LOG.warn("Exception encountered while connecting to the server {}", remoteId, ex);
|
||||
LOG.warn("Exception encountered while connecting to the server ",
|
||||
ex);
|
||||
}
|
||||
}
|
||||
if (ex instanceof RemoteException)
|
||||
|
@ -1081,10 +1131,6 @@ public class Client implements AutoCloseable {
|
|||
|
||||
@Override
|
||||
public void run() {
|
||||
// Don't start the ipc parameter sending thread until we start this
|
||||
// thread, because the shutdown logic only gets triggered if this
|
||||
// thread is started.
|
||||
rpcRequestThread.start();
|
||||
if (LOG.isDebugEnabled())
|
||||
LOG.debug(getName() + ": starting, having connections "
|
||||
+ connections.size());
|
||||
|
@ -1108,52 +1154,9 @@ public class Client implements AutoCloseable {
|
|||
+ connections.size());
|
||||
}
|
||||
|
||||
/**
|
||||
* A thread to write rpc requests to the socket.
|
||||
*/
|
||||
private class RpcRequestSender implements Runnable {
|
||||
@Override
|
||||
public void run() {
|
||||
while (!shouldCloseConnection.get()) {
|
||||
ResponseBuffer buf = null;
|
||||
try {
|
||||
Pair<Call, ResponseBuffer> pair =
|
||||
rpcRequestQueue.poll(maxIdleTime, TimeUnit.MILLISECONDS);
|
||||
if (pair == null || shouldCloseConnection.get()) {
|
||||
continue;
|
||||
}
|
||||
buf = pair.getRight();
|
||||
synchronized (ipcStreams.out) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
Call call = pair.getLeft();
|
||||
LOG.debug(getName() + "{} sending #{} {}", getName(), call.id,
|
||||
call.rpcRequest);
|
||||
}
|
||||
// RpcRequestHeader + RpcRequest
|
||||
ipcStreams.sendRequest(buf.toByteArray());
|
||||
ipcStreams.flush();
|
||||
}
|
||||
} catch (InterruptedException ie) {
|
||||
// stop this thread
|
||||
return;
|
||||
} catch (IOException e) {
|
||||
// exception at this point would leave the connection in an
|
||||
// unrecoverable state (eg half a call left on the wire).
|
||||
// So, close the connection, killing any outstanding calls
|
||||
markClosed(e);
|
||||
} finally {
|
||||
//the buffer is just an in-memory buffer, but it is still polite to
|
||||
// close early
|
||||
IOUtils.closeStream(buf);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Initiates a rpc call by sending the rpc request to the remote server.
|
||||
* Note: this is not called from the current thread, but by another
|
||||
* thread, so that if the current thread is interrupted that the socket
|
||||
* state isn't corrupted with a partially written message.
|
||||
* Note: this is not called from the Connection thread, but by other
|
||||
* threads.
|
||||
* @param call - the rpc request
|
||||
*/
|
||||
public void sendRpcRequest(final Call call)
|
||||
|
@ -1163,7 +1166,8 @@ public class Client implements AutoCloseable {
|
|||
}
|
||||
|
||||
// Serialize the call to be sent. This is done from the actual
|
||||
// caller thread, rather than the rpcRequestThread in the connection,
|
||||
// caller thread, rather than the sendParamsExecutor thread,
|
||||
|
||||
// so that if the serialization throws an error, it is reported
|
||||
// properly. This also parallelizes the serialization.
|
||||
//
|
||||
|
@ -1180,12 +1184,49 @@ public class Client implements AutoCloseable {
|
|||
final ResponseBuffer buf = new ResponseBuffer();
|
||||
header.writeDelimitedTo(buf);
|
||||
RpcWritable.wrap(call.rpcRequest).writeTo(buf);
|
||||
// Wait for the message to be sent. We offer with timeout to
|
||||
// prevent a race condition between checking the shouldCloseConnection
|
||||
// and the stopping of the polling thread
|
||||
while (!shouldCloseConnection.get()) {
|
||||
if (rpcRequestQueue.offer(Pair.of(call, buf), 1, TimeUnit.SECONDS)) {
|
||||
break;
|
||||
|
||||
synchronized (sendRpcRequestLock) {
|
||||
Future<?> senderFuture = sendParamsExecutor.submit(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
synchronized (ipcStreams.out) {
|
||||
if (shouldCloseConnection.get()) {
|
||||
return;
|
||||
}
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug(getName() + " sending #" + call.id
|
||||
+ " " + call.rpcRequest);
|
||||
}
|
||||
// RpcRequestHeader + RpcRequest
|
||||
ipcStreams.sendRequest(buf.toByteArray());
|
||||
ipcStreams.flush();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// exception at this point would leave the connection in an
|
||||
// unrecoverable state (eg half a call left on the wire).
|
||||
// So, close the connection, killing any outstanding calls
|
||||
markClosed(e);
|
||||
} finally {
|
||||
//the buffer is just an in-memory buffer, but it is still polite to
|
||||
// close early
|
||||
IOUtils.closeStream(buf);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
senderFuture.get();
|
||||
} catch (ExecutionException e) {
|
||||
Throwable cause = e.getCause();
|
||||
|
||||
// cause should only be a RuntimeException as the Runnable above
|
||||
// catches IOException
|
||||
if (cause instanceof RuntimeException) {
|
||||
throw (RuntimeException) cause;
|
||||
} else {
|
||||
throw new RuntimeException("unexpected checked exception", cause);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1336,6 +1377,7 @@ public class Client implements AutoCloseable {
|
|||
CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT);
|
||||
|
||||
this.clientId = ClientId.getClientId();
|
||||
this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance();
|
||||
this.maxAsyncCalls = conf.getInt(
|
||||
CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
|
||||
CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_DEFAULT);
|
||||
|
@ -1379,7 +1421,6 @@ public class Client implements AutoCloseable {
|
|||
// wake up all connections
|
||||
for (Connection conn : connections.values()) {
|
||||
conn.interrupt();
|
||||
conn.rpcRequestThread.interrupt();
|
||||
conn.interruptConnectingThread();
|
||||
}
|
||||
|
||||
|
@ -1396,6 +1437,7 @@ public class Client implements AutoCloseable {
|
|||
}
|
||||
}
|
||||
}
|
||||
clientExcecutorFactory.unrefAndCleanup();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1658,9 +1700,9 @@ public class Client implements AutoCloseable {
|
|||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Evolving
|
||||
public static class ConnectionId {
|
||||
private InetSocketAddress address;
|
||||
private final UserGroupInformation ticket;
|
||||
private final Class<?> protocol;
|
||||
InetSocketAddress address;
|
||||
UserGroupInformation ticket;
|
||||
final Class<?> protocol;
|
||||
private static final int PRIME = 16777619;
|
||||
private final int rpcTimeout;
|
||||
private final int maxIdleTime; //connections will be culled if it was idle for
|
||||
|
@ -1712,27 +1754,6 @@ public class Client implements AutoCloseable {
|
|||
return address;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is used to update the remote address when an address change is detected. This method
|
||||
* ensures that the {@link #hashCode()} won't change.
|
||||
*
|
||||
* @param address the updated address
|
||||
* @throws IllegalArgumentException if the hostname or port doesn't match
|
||||
* @see Connection#updateAddress()
|
||||
*/
|
||||
void setAddress(InetSocketAddress address) {
|
||||
if (!Objects.equals(this.address.getHostName(), address.getHostName())) {
|
||||
throw new IllegalArgumentException("Hostname must match: " + this.address + " vs "
|
||||
+ address);
|
||||
}
|
||||
if (this.address.getPort() != address.getPort()) {
|
||||
throw new IllegalArgumentException("Port must match: " + this.address + " vs " + address);
|
||||
}
|
||||
|
||||
this.address = address;
|
||||
}
|
||||
|
||||
|
||||
Class<?> getProtocol() {
|
||||
return protocol;
|
||||
}
|
||||
|
@ -1843,11 +1864,7 @@ public class Client implements AutoCloseable {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
int result = connectionRetryPolicy.hashCode();
|
||||
// We calculate based on the host name and port without the IP address, since the hashCode
|
||||
// must be stable even if the IP address is updated.
|
||||
result = PRIME * result + ((address == null || address.getHostName() == null) ? 0 :
|
||||
address.getHostName().hashCode());
|
||||
result = PRIME * result + ((address == null) ? 0 : address.getPort());
|
||||
result = PRIME * result + ((address == null) ? 0 : address.hashCode());
|
||||
result = PRIME * result + (doPing ? 1231 : 1237);
|
||||
result = PRIME * result + maxIdleTime;
|
||||
result = PRIME * result + pingInterval;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue