Merge branch 'trunk' into HDFS-7240

This commit is contained in:
Anu Engineer 2017-03-03 14:03:35 -08:00
commit 0951726233
1868 changed files with 83828 additions and 31870 deletions

13
.gitignore vendored
View File

@ -15,6 +15,11 @@
.settings
target
build
dependency-reduced-pom.xml
# Filesystem contract test options and credentials
auth-keys.xml
azure-auth-keys.xml
# External tool builders
*/.externalToolBuilders
@ -23,8 +28,6 @@ build
hadoop-common-project/hadoop-kms/downloads/
hadoop-hdfs-project/hadoop-hdfs/downloads
hadoop-hdfs-project/hadoop-hdfs-httpfs/downloads
hadoop-common-project/hadoop-common/src/test/resources/contract-test-options.xml
hadoop-tools/hadoop-openstack/src/test/resources/contract-test-options.xml
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/tla/yarnregistry.toolbox
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/dist
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/tmp
@ -40,10 +43,4 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/testem.log
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp
yarnregistry.pdf
hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml
hadoop-tools/hadoop-aws/src/test/resources/contract-test-options.xml
hadoop-tools/hadoop-azure/src/test/resources/azure-auth-keys.xml
hadoop-tools/hadoop-openstack/src/test/resources/auth-keys.xml
patchprocess/
hadoop-tools/hadoop-aliyun/src/test/resources/auth-keys.xml
hadoop-tools/hadoop-aliyun/src/test/resources/contract-test-options.xml

View File

@ -83,6 +83,8 @@ Optional packages:
$ sudo apt-get install libjansson-dev
* Linux FUSE
$ sudo apt-get install fuse libfuse-dev
* ZStandard compression
$ sudo apt-get install zstd
----------------------------------------------------------------------------------
Maven main modules:
@ -131,6 +133,8 @@ Maven build goals:
* Use -Dtar to create a TAR with the distribution (using -Pdist)
* Use -Preleasedocs to include the changelog and release docs (requires Internet connectivity)
* Use -Pyarn-ui to build YARN UI v2. (Requires Internet connectivity)
* Use -DskipShade to disable client jar shading to speed up build times (in
development environments only, not to build release artifacts)
Snappy build options:
@ -155,6 +159,29 @@ Maven build goals:
and it ignores the -Dsnappy.prefix option. If -Dsnappy.lib isn't given, the
bundling and building will fail.
ZStandard build options:
ZStandard is a compression library that can be utilized by the native code.
It is currently an optional component, meaning that Hadoop can be built with
or without this dependency.
* Use -Drequire.zstd to fail the build if libzstd.so is not found.
If this option is not specified and the zstd library is missing.
* Use -Dzstd.prefix to specify a nonstandard location for the libzstd
header files and library files. You do not need this option if you have
installed zstandard using a package manager.
* Use -Dzstd.lib to specify a nonstandard location for the libzstd library
files. Similarly to zstd.prefix, you do not need this option if you have
installed using a package manager.
* Use -Dbundle.zstd to copy the contents of the zstd.lib directory into
the final tar file. This option requires that -Dzstd.lib is also given,
and it ignores the -Dzstd.prefix option. If -Dzstd.lib isn't given, the
bundling and building will fail.
OpenSSL build options:
OpenSSL includes a crypto library that can be utilized by the native code.
@ -390,7 +417,7 @@ http://www.zlib.net/
----------------------------------------------------------------------------------
Building distributions:
* Build distribution with native code : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar]
* Build distribution with native code : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar][-Dmaven.javadoc.skip=true]
----------------------------------------------------------------------------------
Running compatibility checks with checkcompatibility.py
@ -402,3 +429,12 @@ managers to compare the compatibility of a previous and current release.
As an example, this invocation will check the compatibility of interfaces annotated as Public or LimitedPrivate:
./dev-support/bin/checkcompatibility.py --annotation org.apache.hadoop.classification.InterfaceAudience.Public --annotation org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate --include "hadoop.*" branch-2.7.2 trunk
----------------------------------------------------------------------------------
Changing the Hadoop version declared returned by VersionInfo
If for compatibility reasons the version of Hadoop has to be declared as a 2.x release in the information returned by
org.apache.hadoop.util.VersionInfo, set the property declared.hadoop.version to the desired version.
For example: mvn package -Pdist -Ddeclared.hadoop.version=2.11
If unset, the project version declared in the POM file is used.

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,11 @@ which has the following notices:
* This product includes software developed by IBM Corporation and others.
The binary distribution of this product bundles binaries of
AWS Java SDK 1.10.6,
AWS SDK for Java - Core 1.11.45,
AWS Java SDK for AWS KMS 1.11.45,
AWS Java SDK for Amazon S3 1.11.45,
AWS Java SDK for AWS STS 1.11.45,
JMES Path Query library 1.0,
which has the following notices:
* This software includes third party software subject to the following
copyrights: - XML parsing and utility functions from JetS3t - Copyright
@ -257,6 +261,13 @@ provides utilities for the java.lang API, which can be obtained at:
* HOMEPAGE:
* https://commons.apache.org/proper/commons-lang/
This product contains a modified portion of 'JDOM 1.1', which can be obtained at:
* LICENSE:
* https://github.com/hunterhacker/jdom/blob/jdom-1.1/core/LICENSE.txt
* HOMEPAGE:
* http://www.jdom.org/
The binary distribution of this product bundles binaries of
Commons Codec 1.4,
which has the following notices:
@ -283,7 +294,7 @@ which has the following notices:
Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams
The binary distribution of this product bundles binaries of
Java Concurrency in Practice book annotations 1.0,
"Java Concurrency in Practice" book annotations 1.0,
which has the following notices:
* Copyright (c) 2005 Brian Goetz and Tim Peierls Released under the Creative
Commons Attribution License (http://creativecommons.org/licenses/by/2.5)
@ -292,7 +303,15 @@ which has the following notices:
notice.
The binary distribution of this product bundles binaries of
Jetty 6.1.26,
Jetty :: Http Utility 9.3.11.,
Jetty :: IO Utility 9.3.11.,
Jetty :: Security 9.3.11.,
Jetty :: Server Core 9.3.11.,
Jetty :: Servlet Handling 9.3.11.,
Jetty :: Utilities 9.3.11.,
Jetty :: Utilities :: Ajax,
Jetty :: Webapp Application Support 9.3.11.,
Jetty :: XML utilities 9.3.11.,
which has the following notices:
* ==============================================================
Jetty Web Container
@ -453,3 +472,107 @@ which has the following notices:
- voluntary contributions made by Paul Eng on behalf of the
Apache Software Foundation that were originally developed at iClick, Inc.,
software copyright (c) 1999.
The binary distribution of this product bundles binaries of
Logback Classic Module 1.1.2,
Logback Core Module 1.1.2,
which has the following notices:
* Logback: the reliable, generic, fast and flexible logging framework.
Copyright (C) 1999-2012, QOS.ch. All rights reserved.
The binary distribution of this product bundles binaries of
Apache HBase - Annotations 1.2.4,
Apache HBase - Client 1.2.4,
Apache HBase - Common 1.2.4,
Apache HBase - Hadoop Compatibility 1.2.4,
Apache HBase - Hadoop Two Compatibility 1.2.4,
Apache HBase - Prefix Tree 1.2.4,
Apache HBase - Procedure 1.2.4,
Apache HBase - Protocol 1.2.4,
Apache HBase - Server 1.2.4,
which has the following notices:
* Apache HBase
Copyright 2007-2015 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).
--
This product incorporates portions of the 'Hadoop' project
Copyright 2007-2009 The Apache Software Foundation
Licensed under the Apache License v2.0
--
Our Orca logo we got here: http://www.vectorfree.com/jumping-orca
It is licensed Creative Commons Attribution 3.0.
See https://creativecommons.org/licenses/by/3.0/us/
We changed the logo by stripping the colored background, inverting
it and then rotating it some.
Later we found that vectorfree.com image is not properly licensed.
The original is owned by vectorportal.com. The original was
relicensed so we could use it as Creative Commons Attribution 3.0.
The license is bundled with the download available here:
http://www.vectorportal.com/subcategory/205/KILLER-WHALE-FREE-VECTOR.eps/ifile/9136/detailtest.asp
--
This product includes portions of the Bootstrap project v3.0.0
Copyright 2013 Twitter, Inc.
Licensed under the Apache License v2.0
This product uses the Glyphicons Halflings icon set.
http://glyphicons.com/
Copyright Jan Kovařík
Licensed under the Apache License v2.0 as a part of the Bootstrap project.
--
This product includes portions of the Guava project v14, specifically
'hbase-common/src/main/java/org/apache/hadoop/hbase/io/LimitInputStream.java'
Copyright (C) 2007 The Guava Authors
Licensed under the Apache License, Version 2.0
The binary distribution of this product bundles binaries of
Phoenix Core 4.7.0,
which has the following notices:
Apache Phoenix
Copyright 2013-2016 The Apache Software Foundation
This product includes software developed by The Apache Software
Foundation (http://www.apache.org/).
This also includes:
The phoenix-spark module has been adapted from the phoenix-spark library
distributed under the terms of the Apache 2 license. Original source copyright:
Copyright 2014 Simply Measured, Inc.
Copyright 2015 Interset Software Inc.
The file bin/daemon.py is based on the file of the same name in python-daemon 2.0.5
(https://pypi.python.org/pypi/python-daemon/). Original source copyright:
# Copyright © 20082015 Ben Finney <ben+python@benfinney.id.au>
# Copyright © 20072008 Robert Niederreiter, Jens Klein
# Copyright © 20042005 Chad J. Schroeder
# Copyright © 2003 Clark Evans
# Copyright © 2002 Noah Spurrier
# Copyright © 2001 Jürgen Hermann
The binary distribution of this product bundles binaries of
Plexus Cipher: encryption/decryption Component 1.4,
which has the following notices:
* The code in this component contains a class - Base64 taken from http://juliusdavies.ca/svn/not-yet-commons-ssl/tags/commons-ssl-0.3.10/src/java/org/apache/commons/ssl/Base64.java
which is Apache license: http://www.apache.org/licenses/LICENSE-2.0
The PBE key processing routine PBECipher.createCipher() is adopted from http://juliusdavies.ca/svn/not-yet-commons-ssl/tags/commons-ssl-0.3.10/src/java/org/apache/commons/ssl/OpenSSL.java
which is also Apache APL-2.0 license: http://www.apache.org/licenses/LICENSE-2.0
The binary distribution of this product bundles binaries of
software.amazon.ion:ion-java 1.0.1,
which has the following notices:
* Amazon Ion Java Copyright 2007-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.

View File

@ -249,7 +249,7 @@ function startgpgagent
if [[ "${SIGN}" = true ]]; then
if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
echo "starting gpg agent"
echo "default-cache-ttl 7200" > "${LOGDIR}/gpgagent.conf"
echo "default-cache-ttl 14400" > "${LOGDIR}/gpgagent.conf"
# shellcheck disable=2046
eval $("${GPGAGENT}" --daemon \
--options "${LOGDIR}/gpgagent.conf" \
@ -506,6 +506,9 @@ function makearelease
mkdir -p "${LOGDIR}"
# Install the Hadoop maven plugins first
run_and_redirect "${LOGDIR}/mvn_install_maven_plugins.log" "${MVN}" "${MVN_ARGS[@]}" -pl hadoop-maven-plugins -am clean install
# mvn clean for sanity
run_and_redirect "${LOGDIR}/mvn_clean.log" "${MVN}" "${MVN_ARGS[@]}" clean

View File

@ -114,6 +114,15 @@ for i in "$@"; do
--snappylibbundle=*)
SNAPPYLIBBUNDLE=${i#*=}
;;
--zstdbinbundle=*)
ZSTDBINBUNDLE=${i#*=}
;;
--zstdlib=*)
ZSTDLIB=${i#*=}
;;
--zstdlibbundle=*)
ZSTDLIBBUNDLE=${i#*=}
;;
esac
done
@ -139,6 +148,8 @@ if [[ -d "${LIB_DIR}" ]]; then
bundle_native_lib "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}"
bundle_native_lib "${ZSTDLIBBUNDLE}" "zstd.lib" "zstd" "${ZSTDLIB}"
bundle_native_lib "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}"
bundle_native_lib "${ISALBUNDLE}" "isal.lib" "isa" "${ISALLIB}"
@ -159,6 +170,8 @@ if [[ -d "${BIN_DIR}" ]] ; then
bundle_native_bin "${SNAPPYBINBUNDLE}" "${SNAPPYLIBBUNDLE}" "snappy.lib" "snappy" "${SNAPPYLIB}"
bundle_native_bin "${ZSTDBINBUNDLE}" "${ZSTDLIBBUNDLE}" "zstd.lib" "zstd" "${ZSTDLIB}"
bundle_native_bin "${OPENSSLBINBUNDLE}" "${OPENSSLLIBBUNDLE}" "openssl.lib" "crypto" "${OPENSSLLIB}"
fi

View File

@ -137,6 +137,12 @@ run copy "${ROOT}/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${VERS
run cp -pr "${ROOT}/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${VERSION}"/* .
run cp -pr "${ROOT}/hadoop-common-project/hadoop-kms/target/hadoop-kms-${VERSION}"/* .
# copy client jars as-is
run mkdir -p "share/hadoop/client"
run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-${VERSION}.jar" share/hadoop/client/
run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-${VERSION}.jar" share/hadoop/client/
run cp -p "${ROOT}/hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-${VERSION}.jar" share/hadoop/client/
echo
echo "Hadoop dist layout available at: ${BASEDIR}/hadoop-${VERSION}"
echo
echo

0
dev-support/bin/qbt Normal file → Executable file
View File

View File

@ -138,6 +138,7 @@ ENV MAVEN_OPTS -Xms256m -Xmx512m
RUN apt-get -y install nodejs && \
ln -s /usr/bin/nodejs /usr/bin/node && \
apt-get -y install npm && \
npm install npm@latest -g && \
npm install -g bower && \
npm install -g ember-cli

View File

@ -23,12 +23,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-assemblies</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<name>Apache Hadoop Assemblies</name>
<description>Apache Hadoop Assemblies</description>

View File

@ -93,20 +93,6 @@
<directory>${project.build.directory}/webapps</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/webapps</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/conf</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/templates</outputDirectory>
<includes>
<include>*-site.xml</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/packages/templates/conf</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/templates/conf</outputDirectory>
<includes>
<include>*</include>
</includes>
</fileSet>
<fileSet>
<directory>${project.build.directory}</directory>
<outputDirectory>/share/hadoop/${hadoop.component}</outputDirectory>

View File

@ -21,6 +21,14 @@
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<fileSets>
<!-- Jar file -->
<fileSet>
<directory>target</directory>
<outputDirectory>/share/hadoop/hdfs</outputDirectory>
<includes>
<include>${project.artifactId}-${project.version}.jar</include>
</includes>
</fileSet>
<!-- Configuration files -->
<fileSet>
<directory>${basedir}/src/main/conf</directory>
@ -41,7 +49,7 @@
<directory>${basedir}/src/main/libexec</directory>
<outputDirectory>/libexec</outputDirectory>
<includes>
<include>*</include>
<include>**/*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
@ -51,4 +59,19 @@
<outputDirectory>/share/doc/hadoop/httpfs</outputDirectory>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<useProjectArtifact>false</useProjectArtifact>
<outputDirectory>/share/hadoop/hdfs/lib</outputDirectory>
<!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
<excludes>
<exclude>org.apache.hadoop:hadoop-common</exclude>
<exclude>org.apache.hadoop:hadoop-hdfs</exclude>
<!-- use slf4j from common to avoid multiple binding warnings -->
<exclude>org.slf4j:slf4j-api</exclude>
<exclude>org.slf4j:slf4j-log4j12</exclude>
<exclude>org.hsqldb:hsqldb</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -21,6 +21,14 @@
</formats>
<includeBaseDirectory>false</includeBaseDirectory>
<fileSets>
<!-- Jar file -->
<fileSet>
<directory>target</directory>
<outputDirectory>/share/hadoop/common</outputDirectory>
<includes>
<include>${project.artifactId}-${project.version}.jar</include>
</includes>
</fileSet>
<!-- Configuration files -->
<fileSet>
<directory>${basedir}/src/main/conf</directory>
@ -41,7 +49,7 @@
<directory>${basedir}/src/main/libexec</directory>
<outputDirectory>/libexec</outputDirectory>
<includes>
<include>*</include>
<include>**/*</include>
</includes>
<fileMode>0755</fileMode>
</fileSet>
@ -51,4 +59,19 @@
<outputDirectory>/share/doc/hadoop/kms</outputDirectory>
</fileSet>
</fileSets>
<dependencySets>
<dependencySet>
<useProjectArtifact>false</useProjectArtifact>
<outputDirectory>/share/hadoop/common/lib</outputDirectory>
<!-- Exclude hadoop artifacts. They will be found via HADOOP* env -->
<excludes>
<exclude>org.apache.hadoop:hadoop-common</exclude>
<exclude>org.apache.hadoop:hadoop-hdfs</exclude>
<!-- use slf4j from common to avoid multiple binding warnings -->
<exclude>org.slf4j:slf4j-api</exclude>
<exclude>org.slf4j:slf4j-log4j12</exclude>
<exclude>org.hsqldb:hsqldb</exclude>
</excludes>
</dependencySet>
</dependencySets>
</assembly>

View File

@ -72,20 +72,6 @@
<directory>${project.build.directory}/webapps</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/webapps</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/conf</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/templates</outputDirectory>
<includes>
<include>*-site.xml</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/packages/templates/conf</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/templates/conf</outputDirectory>
<includes>
<include>*</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/dev-support/jdiff</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>

View File

@ -166,20 +166,6 @@
<directory>${project.build.directory}/webapps</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/webapps</outputDirectory>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/conf</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/templates</outputDirectory>
<includes>
<include>*-site.xml</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/src/main/packages/templates/conf</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/templates/conf</outputDirectory>
<includes>
<include>*</include>
</includes>
</fileSet>
<fileSet>
<directory>${basedir}/dev-support/jdiff</directory>
<outputDirectory>/share/hadoop/${hadoop.component}/jdiff</outputDirectory>

View File

@ -18,7 +18,7 @@
<parent>
<artifactId>hadoop-main</artifactId>
<groupId>org.apache.hadoop</groupId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
</parent>
<modelVersion>4.0.0</modelVersion>
<artifactId>hadoop-build-tools</artifactId>
@ -54,6 +54,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<version>${maven-resources-plugin.version}</version>
<executions>
<execution>
<id>copy-resources</id>
@ -80,6 +81,7 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-remote-resources-plugin</artifactId>
<version>${maven-remote-resources-plugin.version}</version>
<executions>
<execution>
<phase>process-resources</phase>

View File

@ -123,9 +123,7 @@
<!-- Checks for Size Violations. -->
<!-- See http://checkstyle.sf.net/config_sizes.html -->
<module name="LineLength">
<property name="ignorePattern" value="^(package|import) .*"/>
</module>
<module name="LineLength"/>
<module name="MethodLength"/>
<module name="ParameterNumber"/>

View File

@ -0,0 +1,254 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-api</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Apache Hadoop Client</description>
<name>Apache Hadoop Client API</name>
<properties>
<shaded.dependency.prefix>org.apache.hadoop.shaded</shaded.dependency.prefix>
<!-- We contain no source -->
<maven.javadoc.skip>true</maven.javadoc.skip>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<!-- We list this as optional because as a type-pom it won't get included in the shading.
Marking it optional means it doesn't count as a transitive dependency of this artifact.
-->
<optional>true</optional>
<exclusions>
<!-- these APIs are a part of the SE JDK -->
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>xml-apis</groupId>
<artifactId>xml-apis</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- This comes from our parent pom. If we don't expressly change it here to get included,
downstream will get warnings at compile time. -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<profiles>
<profile>
<id>shade</id>
<activation>
<property><name>!skipShade</name></property>
</activation>
<build>
<plugins>
<!-- We contain no source -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<configuration>
<skipSource>true</skipSource>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<artifactSet>
<includes>
<include>org.apache.hadoop:*</include>
</includes>
</artifactSet>
<filters>
<!-- We get these package level classes from various yarn api jars -->
<filter>
<artifact>org.apache.hadoop:hadoop-yarn-common</artifact>
<excludes>
<exclude>org/apache/hadoop/yarn/factories/package-info.class</exclude>
<exclude>org/apache/hadoop/yarn/util/package-info.class</exclude>
<exclude>org/apache/hadoop/yarn/factory/providers/package-info.class</exclude>
<exclude>org/apache/hadoop/yarn/client/api/impl/package-info.class</exclude>
<exclude>org/apache/hadoop/yarn/client/api/package-info.class</exclude>
</excludes>
</filter>
</filters>
<relocations>
<relocation>
<pattern>org/</pattern>
<shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
<excludes>
<exclude>org/apache/hadoop/*</exclude>
<exclude>org/apache/hadoop/**/*</exclude>
<!-- Our non-shaded htrace and logging libraries -->
<exclude>org/apache/htrace/*</exclude>
<exclude>org/apache/htrace/**/*</exclude>
<exclude>org/slf4j/*</exclude>
<exclude>org/slf4j/**/*</exclude>
<exclude>org/apache/commons/logging/*</exclude>
<exclude>org/apache/commons/logging/**/*</exclude>
<exclude>org/apache/log4j/*</exclude>
<exclude>org/apache/log4j/**/*</exclude>
<exclude>**/pom.xml</exclude>
<!-- Not the org/ packages that are a part of the jdk -->
<exclude>org/ietf/jgss/*</exclude>
<exclude>org/omg/**/*</exclude>
<exclude>org/w3c/dom/*</exclude>
<exclude>org/w3c/dom/**/*</exclude>
<exclude>org/xml/sax/*</exclude>
<exclude>org/xml/sax/**/*</exclude>
</excludes>
</relocation>
<relocation>
<pattern>com/</pattern>
<shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Not the com/ packages that are a part of particular jdk implementations -->
<exclude>com/sun/tools/*</exclude>
<exclude>com/sun/javadoc/*</exclude>
<exclude>com/sun/security/*</exclude>
<exclude>com/sun/jndi/*</exclude>
<exclude>com/sun/management/*</exclude>
<exclude>com/sun/tools/**/*</exclude>
<exclude>com/sun/javadoc/**/*</exclude>
<exclude>com/sun/security/**/*</exclude>
<exclude>com/sun/jndi/**/*</exclude>
<exclude>com/sun/management/**/*</exclude>
</excludes>
</relocation>
<relocation>
<pattern>io/</pattern>
<shadedPattern>${shaded.dependency.prefix}.io.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Exclude config keys for Hadoop that look like package names -->
<exclude>io/compression/*</exclude>
<exclude>io/compression/**/*</exclude>
<exclude>io/mapfile/*</exclude>
<exclude>io/mapfile/**/*</exclude>
<exclude>io/map/index/*</exclude>
<exclude>io/seqfile/*</exclude>
<exclude>io/seqfile/**/*</exclude>
<exclude>io/file/buffer/size</exclude>
<exclude>io/skip/checksum/errors</exclude>
<exclude>io/sort/*</exclude>
<exclude>io/serializations</exclude>
</excludes>
</relocation>
<relocation>
<pattern>javax/servlet/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.servlet.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
<relocation>
<pattern>net/</pattern>
<shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Exclude config keys for Hadoop that look like package names -->
<exclude>net/topology/*</exclude>
<exclude>net/topology/**/*</exclude>
</excludes>
</relocation>
</relocations>
<transformers>
<!-- Needed until MSHADE-182 -->
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
<resource>NOTICE.txt</resource>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
<resource>META-INF/LICENSE.txt</resource>
<file>${basedir}/../../LICENSE.txt</file>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
<resource>META-INF/NOTICE.txt</resource>
<file>${basedir}/../../NOTICE.txt</file>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>noshade</id>
<activation>
<property><name>skipShade</name></property>
</activation>
<build>
<plugins>
<!-- We contain no source -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<configuration>
<skipSource>true</skipSource>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,124 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-check-invariants</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>pom</packaging>
<description>Enforces our invariants for the api and runtime client modules.</description>
<name>Apache Hadoop Client Packaging Invariants</name>
<properties>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-runtime</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>1.4</version>
<dependencies>
<dependency>
<groupId>org.codehaus.mojo</groupId>
<artifactId>extra-enforcer-rules</artifactId>
<version>1.0-beta-3</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>enforce-banned-dependencies</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<banTransitiveDependencies>
<!--
<message>
Our client-facing artifacts are not supposed to have additional dependencies
and one or more of them do. The output from the enforcer plugin should give
specifics.
</message>
-->
<excludes>
<!-- annotations is provided, and both artifacts exclude the tools transitive,
but enforcer still sees it.
-->
<exclude>org.apache.hadoop:hadoop-annotations</exclude>
<!-- We leave HTrace as an unshaded dependnecy on purpose so that tracing within a JVM will work -->
<exclude>org.apache.htrace:htrace-core4</exclude>
<!-- Leave slf4j unshaded so downstream users can configure logging. -->
<exclude>org.slf4j:slf4j-api</exclude>
<!-- Leave commons-logging unshaded so downstream users can configure logging. -->
<exclude>commons-logging:commons-logging</exclude>
<!-- Leave log4j unshaded so downstream users can configure logging. -->
<exclude>log4j:log4j</exclude>
</excludes>
</banTransitiveDependencies>
<banDuplicateClasses>
<findAllDuplicates>true</findAllDuplicates>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<ignoreClasses>
<ignoreClass>*</ignoreClass>
</ignoreClasses>
</dependency>
</dependencies>
</banDuplicateClasses>
</rules>
<!-- TODO we need a rule for "we don't have classes that are outside of the org.apache.hadoop package" -->
<!-- TODO we need a rule for "the constants in this set of classes haven't been shaded / don't have this prefix"
Manually checking the set of Keys that look like packages we relocate:
cat `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'` | grep -E "\"(io\.|org\.|com\.|net\.)" | grep -v "^package" | grep -v "^import" | grep -v "\"org.apache.hadoop"
Manually check the set of shaded artifacts to see if the Keys constants have been relocated:
for clazz in `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`; do
clazz=${clazz#*src/main/java/}
clazz="${clazz%.java}"
javap -cp hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-3.0.0-alpha2-SNAPSHOT.jar \
-constants "${clazz//\//.}" | grep "org.apache.hadoop.shaded"
done
-->
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,143 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-check-test-invariants</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>pom</packaging>
<description>Enforces our invariants for the testing client modules.</description>
<name>Apache Hadoop Client Packaging Invariants for Test</name>
<properties>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-runtime</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-minicluster</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>1.4</version>
<dependencies>
<dependency>
<groupId>org.codehaus.mojo</groupId>
<artifactId>extra-enforcer-rules</artifactId>
<version>1.0-beta-3</version>
</dependency>
</dependencies>
<executions>
<execution>
<id>enforce-banned-dependencies</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<rules>
<banTransitiveDependencies>
<!--
<message>
Our client-facing artifacts are not supposed to have additional dependencies
and one or more of them do. The output from the enforcer plugin should give
specifics.
</message>
-->
<excludes>
<!-- annotations is provided, and both artifacts exclude the tools transitive,
but enforcer still sees it.
-->
<exclude>org.apache.hadoop:hadoop-annotations</exclude>
<!-- We leave HTrace as an unshaded dependnecy on purpose so that tracing within a JVM will work -->
<exclude>org.apache.htrace:htrace-core4</exclude>
<!-- Leave slf4j unshaded so downstream users can configure logging. -->
<exclude>org.slf4j:slf4j-api</exclude>
<!-- Leave commons-logging unshaded so downstream users can configure logging. -->
<exclude>commons-logging:commons-logging</exclude>
<!-- Leave log4j unshaded so downstream users can configure logging. -->
<exclude>log4j:log4j</exclude>
<!-- Leave JUnit unshaded so downstream can use our test helper classes -->
<exclude>junit:junit</exclude>
<!-- JUnit brings in hamcrest -->
<exclude> org.hamcrest:hamcrest-core</exclude>
</excludes>
</banTransitiveDependencies>
<banDuplicateClasses>
<findAllDuplicates>true</findAllDuplicates>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<ignoreClasses>
<ignoreClass>*</ignoreClass>
</ignoreClasses>
</dependency>
<dependency>
<!--Duplicate classes found:-->
<!--Found in:-->
<!--org.apache.hadoop:hadoop-client-runtime:jar:3.0.0-alpha3-SNAPSHOT:compile-->
<!--org.apache.hadoop:hadoop-client-minicluster:jar:3.0.0-alpha3-SNAPSHOT:compile-->
<ignoreClasses>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
<ignoreClass>*</ignoreClass>
</ignoreClasses>
</dependency>
</dependencies>
</banDuplicateClasses>
</rules>
<!-- TODO we need a rule for "we don't have classes that are outside of the org.apache.hadoop package" -->
<!-- TODO we need a rule for "the constants in this set of classes haven't been shaded / don't have this prefix"
Manually checking the set of Keys that look like packages we relocate:
cat `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'` | grep -E "\"(io\.|org\.|com\.|net\.)" | grep -v "^package" | grep -v "^import" | grep -v "\"org.apache.hadoop"
Manually check the set of shaded artifacts to see if the Keys constants have been relocated:
for clazz in `find . \( -name '*Keys.java' -o -name '*KeysPublic.java' \) -a -path '*/src/main/*'`; do
clazz=${clazz#*src/main/java/}
clazz="${clazz%.java}"
javap -cp hadoop-client-modules/hadoop-client-api/target/hadoop-client-api-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-runtime/target/hadoop-client-runtime-3.0.0-alpha2-SNAPSHOT.jar:hadoop-client-modules/hadoop-client-minicluster/target/hadoop-client-minicluster-3.0.0-alpha2-SNAPSHOT.jar \
-constants "${clazz//\//.}" | grep "org.apache.hadoop.shaded"
done
-->
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

View File

@ -0,0 +1,159 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-integration-tests</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<description>Checks that we can use the generated artifacts</description>
<name>Apache Hadoop Client Packaging Integration Tests</name>
<properties>
<failsafe.timeout>400</failsafe.timeout>
</properties>
<dependencies>
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<profiles>
<profile>
<id>shade</id>
<activation>
<property><name>!skipShade</name></property>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-api</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-runtime</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-minicluster</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<!-- Because our tests rely on our shaded artifacts, we can't compile
them until after the package phase has run.
-->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<executions>
<!--
First, let's make sure the normal test-compile doesn't try to
compile our integration tests.
-->
<execution>
<id>default-testCompile</id>
<phase>test-compile</phase>
<configuration>
<testExcludes>
<testExclude>**/IT*</testExclude>
<testExclude>**/*IT</testExclude>
</testExcludes>
</configuration>
</execution>
<!--
Finally, let's make a 'just for integration tests'-compile that
fires off prior to our integration tests but after the package
phase has created our shaded artifacts.
-->
<execution>
<id>compile-integration-tests</id>
<phase>pre-integration-test</phase>
<goals>
<goal>testCompile</goal>
</goals>
<configuration>
<testIncludes>
<testInclude>**/IT*</testInclude>
<testInclude>**/*IT</testInclude>
</testIncludes>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-failsafe-plugin</artifactId>
<executions>
<execution>
<goals>
<goal>integration-test</goal>
<goal>verify</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>noshade</id>
<activation>
<property><name>skipShade</name></property>
</activation>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,113 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.hadoop.example;
import java.io.IOException;
import java.net.URISyntaxException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.hdfs.web.WebHdfsConstants;
/**
* Ensure that we can perform operations against the shaded minicluster
* given the API and runtime jars by performing some simple smoke tests.
*/
public class ITUseMiniCluster {
private static final Logger LOG =
LoggerFactory.getLogger(ITUseMiniCluster.class);
private MiniDFSCluster cluster;
private static final String TEST_PATH = "/foo/bar/cats/dee";
private static final String FILENAME = "test.file";
private static final String TEXT = "Lorem ipsum dolor sit amet, consectetur "
+ "adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore "
+ "magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation "
+ "ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute "
+ "irure dolor in reprehenderit in voluptate velit esse cillum dolore eu "
+ "fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident,"
+ " sunt in culpa qui officia deserunt mollit anim id est laborum.";
@Before
public void clusterUp() throws IOException {
final Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.build();
cluster.waitActive();
}
@After
public void clusterDown() {
cluster.close();
}
@Test
public void useHdfsFileSystem() throws IOException {
try (final FileSystem fs = cluster.getFileSystem()) {
simpleReadAfterWrite(fs);
}
}
public void simpleReadAfterWrite(final FileSystem fs) throws IOException {
LOG.info("Testing read-after-write with FS implementation: {}", fs);
final Path path = new Path(TEST_PATH, FILENAME);
if (!fs.mkdirs(path.getParent())) {
throw new IOException("Mkdirs failed to create " +
TEST_PATH);
}
try (final FSDataOutputStream out = fs.create(path)) {
out.writeUTF(TEXT);
}
try (final FSDataInputStream in = fs.open(path)) {
final String result = in.readUTF();
Assert.assertEquals("Didn't read back text we wrote.", TEXT, result);
}
}
@Test
public void useWebHDFS() throws IOException, URISyntaxException {
try (final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(
cluster.getConfiguration(0), WebHdfsConstants.WEBHDFS_SCHEME)) {
simpleReadAfterWrite(fs);
}
}
}

View File

@ -1,3 +1,5 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
@ -14,11 +16,19 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<FindBugsFilter>
<!-- Buffer object is accessed withing trusted code and intentionally assigned instead of array copy -->
<Match>
<Class name="org.apache.hadoop.hdfs.web.PrivateAzureDataLakeFileSystem$BatchAppendOutputStream$CommitTask"/>
<Bug pattern="EI_EXPOSE_REP2"/>
<Priority value="2"/>
</Match>
</FindBugsFilter>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- Turn security off for tests by default -->
<property>
<name>hadoop.security.authentication</name>
<value>simple</value>
</property>
<!-- Disable min block size since most tests use tiny blocks -->
<property>
<name>dfs.namenode.fs-limits.min-block-size</name>
<value>0</value>
</property>
</configuration>

View File

@ -0,0 +1,24 @@
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# log4j configuration used during build and unit tests
log4j.rootLogger=info,stdout
log4j.threshold=ALL
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n

View File

@ -0,0 +1,792 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-minicluster</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Apache Hadoop Minicluster for Clients</description>
<name>Apache Hadoop Client Test Minicluster</name>
<properties>
<shaded.dependency.prefix>org.apache.hadoop.shaded</shaded.dependency.prefix>
<!-- We contain no source -->
<maven.javadoc.skip>true</maven.javadoc.skip>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-api</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-runtime</artifactId>
<scope>runtime</scope>
</dependency>
<!-- Leave JUnit as a direct dependency -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>runtime</scope>
</dependency>
<!-- Adding hadoop-annotations so we can make it optional to remove from our transitive tree -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<scope>compile</scope>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- uncomment this dependency if you need to use
`mvn dependency:tree -Dverbose` to determine if a dependency shows up
in both the hadoop-client-* artifacts and something under minicluster.
-->
<!--
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<scope>provided</scope>
</dependency>
-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<optional>true</optional>
<exclusions>
<!-- Exclude the in-development timeline service and
add it as an optional runtime dependency
-->
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice</artifactId>
</exclusion>
<!-- exclude everything that comes in via the shaded runtime and api TODO remove once we have a filter for "is in these artifacts" -->
<!-- Skip jersey, since we need it again here. -->
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs-client</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-app</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-core</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
</exclusion>
<!-- exclude things that came in via transitive in shaded runtime and api -->
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.curator</groupId>
<artifactId>curator-client</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<exclusion>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>commons-collections</groupId>
<artifactId>commons-collections</artifactId>
</exclusion>
<exclusion>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</exclusion>
<exclusion>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-annotations</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</exclusion>
<exclusion>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
</exclusion>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>xmlenc</groupId>
<artifactId>xmlenc</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Add optional runtime dependency on the in-development timeline server module
to indicate that downstream folks interested in turning it on need that dep.
-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-timelineservice</artifactId>
<scope>runtime</scope>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Add back in transitive dependencies of hadoop-minicluster that are test-jar artifacts excluded as a side effect of excluding the jar
Note that all of these must be marked "optional" because they won't be removed from the reduced-dependencies pom after they're included.
-->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>compile</scope>
<type>test-jar</type>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>compile</scope>
<type>test-jar</type>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<scope>compile</scope>
<type>test-jar</type>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Add back in the transitive dependencies excluded from hadoop-common in client TODO remove once we have a filter for "is in these artifacts" -->
<!-- skip javax.servlet:servlet-api because it's in client -->
<!-- Skip commons-logging:commons-logging-api because it looks like nothing actually included it -->
<!-- Skip jetty-util because it's in client -->
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>javax.ws.rs</groupId>
<artifactId>jsr311-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-client</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-jaxrs</artifactId>
</exclusion>
<exclusion>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-xc</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-servlet</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.eclipse.jdt</groupId>
<artifactId>core</artifactId>
<optional>true</optional>
</dependency>
<!-- skip org.apache.avro:avro-ipc because it doesn't look like hadoop-common actually uses it -->
<dependency>
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
<optional>true</optional>
</dependency>
<dependency>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpcore</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.jcraft</groupId>
<artifactId>jsch</artifactId>
<optional>true</optional>
</dependency>
<!-- add back in transitive dependencies of hadoop-mapreduce-client-app removed in client -->
<!-- Skipping javax.servlet:servlet-api because it's in client -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-nodemanager</artifactId>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.fusesource.leveldbjni</groupId>
<artifactId>leveldbjni-all</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</exclusion>
<exclusion>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
</exclusion>
<exclusion>
<groupId>commons-lang</groupId>
<artifactId>commons-lang</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-web-proxy</artifactId>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-common</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-yarn-server-common</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
</exclusion>
<exclusion>
<groupId>org.eclipse.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- skipping hadoop-annotations -->
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-servlet</artifactId>
<optional>true</optional>
</dependency>
<!-- skipping junit:junit because it is test scope -->
<!-- skipping avro because it is in client via hadoop-common -->
<!-- skipping jline:jline because it is only present at test scope in the original -->
<!-- skipping io.netty:netty because it's in client -->
<!-- add back in transitive dependencies of hadoop-yarn-api removed in client -->
<!-- skipping hadoop-annotations -->
<dependency>
<groupId>com.google.inject</groupId>
<artifactId>guice</artifactId>
<optional>true</optional>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.sun.jersey.jersey-test-framework</groupId>
<artifactId>jersey-test-framework-grizzly2</artifactId>
<optional>true</optional>
<exclusions>
<!-- excluding because client already has the tomcat version -->
<exclusion>
<groupId>org.glassfish</groupId>
<artifactId>javax.servlet</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- skipping jersey-server because it's above -->
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
<optional>true</optional>
</dependency>
<!-- skipping guice-servlet because it's above -->
<!-- skipping avro because it is in client via hadoop-common -->
<!-- skipping jersey-core because it's above -->
<!-- skipping jersey-json because it's above. -->
<!-- skipping io.netty:netty because it's in client -->
<!-- Add back in transitive dependencies from hadoop-mapreduce-client-core that were excluded by client -->
<!-- skipping junit:junit because it is test scope -->
<!-- skipping guice because it's above -->
<!-- skipping jersey-test-framework-grizzly2 because it's above -->
<!-- skipping jersey-server because it's above -->
<!-- skipping jersey-guice because it's above -->
<!-- skipping avro because it is in client via hadoop-common -->
<!-- skipping hadoop-annotations -->
<!-- skipping guice-servlet because it's above -->
<!-- skipping jersey-json because it's above. -->
<!-- skipping io.netty:netty because it's in client -->
<!-- add back in transitive dependencies of hadoop-mapreduce-client-jobclient that were excluded from client -->
<!-- skipping junit:junit because it is test scope -->
<!-- skipping avro because it is in client via hadoop-common -->
<!-- skipping hadoop-annotations -->
<!-- skipping guice-servlet because it's above -->
<!-- skipping io.netty:netty because it's in client -->
</dependencies>
<profiles>
<profile>
<id>shade</id>
<activation>
<property><name>!skipShade</name></property>
</activation>
<build>
<plugins>
<!-- We contain no source -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<configuration>
<skipSource>true</skipSource>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<artifactSet>
<excludes>
<!-- Fine to expose our already-shaded deps as dependencies -->
<exclude>org.apache.hadoop:hadoop-annotations</exclude>
<exclude>org.apache.hadoop:hadoop-client-api</exclude>
<exclude>org.apache.hadoop:hadoop-client-runtime</exclude>
<!-- Fine to expose our purposefully not-shaded deps as dependencies -->
<exclude>org.apache.htrace:htrace-core4</exclude>
<exclude>org.slf4j:slf4j-api</exclude>
<exclude>commons-logging:commons-logging</exclude>
<exclude>junit:junit</exclude>
<!-- Keep optional runtime deps out of the shading -->
<exclude>org.apache.hadoop:hadoop-yarn-server-timelineservice</exclude>
<exclude>log4j:log4j</exclude>
<!-- We need a filter that matches just those things that are included in the above artiacts -->
</excludes>
</artifactSet>
<filters>
<!-- Some of our dependencies include source, so remove it. -->
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>**/*.java</exclude>
</excludes>
</filter>
<!-- We pull in several test jars; keep out the actual test classes -->
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>**/Test*.class</exclude>
</excludes>
</filter>
<!-- Since runtime has classes for these jars, we exclude them.
We still want the java services api files, since those were excluded in runtime
-->
<filter>
<artifact>com.sun.jersey:jersey-client</artifact>
<excludes>
<exclude>**/*.class</exclude>
</excludes>
</filter>
<filter>
<artifact>com.sun.jersey:jersey-core</artifact>
<excludes>
<exclude>**/*.class</exclude>
</excludes>
</filter>
<filter>
<artifact>com.sun.jersey:jersey-servlet</artifact>
<excludes>
<exclude>**/*.class</exclude>
</excludes>
</filter>
</filters>
<relocations>
<relocation>
<pattern>org/</pattern>
<shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
<excludes>
<exclude>org/apache/hadoop/*</exclude>
<exclude>org/apache/hadoop/**/*</exclude>
<!-- Our non-shaded htrace and logging libraries -->
<exclude>org/apache/htrace/*</exclude>
<exclude>org/apache/htrace/**/*</exclude>
<exclude>org/slf4j/*</exclude>
<exclude>org/slf4j/**/*</exclude>
<exclude>org/apache/commons/logging/*</exclude>
<exclude>org/apache/commons/logging/**/*</exclude>
<exclude>org/apache/log4j/*</exclude>
<exclude>org/apache/log4j/**/*</exclude>
<exclude>**/pom.xml</exclude>
<!-- Our non-shaded JUnit library -->
<exclude>org/junit/*</exclude>
<exclude>org/junit/**/*</exclude>
<!-- Not the org/ packages that are a part of the jdk -->
<exclude>org/ietf/jgss/*</exclude>
<exclude>org/omg/**/*</exclude>
<exclude>org/w3c/dom/*</exclude>
<exclude>org/w3c/dom/**/*</exclude>
<exclude>org/xml/sax/*</exclude>
<exclude>org/xml/sax/**/*</exclude>
</excludes>
</relocation>
<relocation>
<pattern>com/</pattern>
<shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Not the com/ packages that are a part of particular jdk implementations -->
<exclude>com/sun/tools/*</exclude>
<exclude>com/sun/javadoc/*</exclude>
<exclude>com/sun/security/*</exclude>
<exclude>com/sun/jndi/*</exclude>
<exclude>com/sun/management/*</exclude>
<exclude>com/sun/tools/**/*</exclude>
<exclude>com/sun/javadoc/**/*</exclude>
<exclude>com/sun/security/**/*</exclude>
<exclude>com/sun/jndi/**/*</exclude>
<exclude>com/sun/management/**/*</exclude>
</excludes>
</relocation>
<relocation>
<pattern>io/</pattern>
<shadedPattern>${shaded.dependency.prefix}.io.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Exclude config keys for Hadoop that look like package names -->
<exclude>io/compression/*</exclude>
<exclude>io/compression/**/*</exclude>
<exclude>io/mapfile/*</exclude>
<exclude>io/mapfile/**/*</exclude>
<exclude>io/map/index/*</exclude>
<exclude>io/seqfile/*</exclude>
<exclude>io/seqfile/**/*</exclude>
<exclude>io/file/buffer/size</exclude>
<exclude>io/skip/checksum/errors</exclude>
<exclude>io/sort/*</exclude>
<exclude>io/serializations</exclude>
</excludes>
</relocation>
<relocation>
<pattern>javax/el/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
<relocation>
<pattern>javax/inject/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.inject.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
<relocation>
<pattern>javax/servlet/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.servlet.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
<relocation>
<pattern>net/</pattern>
<shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Exclude config keys for Hadoop that look like package names -->
<exclude>net/topology/*</exclude>
<exclude>net/topology/**/*</exclude>
</excludes>
</relocation>
</relocations>
<transformers>
<!-- Needed until MSHADE-182 -->
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
<resources>
<resource>LICENSE</resource>
<resource>LICENSE.txt</resource>
<resource>NOTICE</resource>
<resource>NOTICE.txt</resource>
<resource>Grizzly_THIRDPARTYLICENSEREADME.txt</resource>
<resource>LICENSE.dom-documentation.txt</resource>
<resource>LICENSE.dom-software.txt</resource>
<resource>LICENSE.dom-documentation.txt</resource>
<resource>LICENSE.sax.txt</resource>
</resources>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
<resource>META-INF/LICENSE.txt</resource>
<file>${basedir}/../../LICENSE.txt</file>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
<resource>META-INF/NOTICE.txt</resource>
<file>${basedir}/../../NOTICE.txt</file>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>noshade</id>
<activation>
<property><name>skipShade</name></property>
</activation>
<build>
<plugins>
<!-- We contain no source -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<configuration>
<skipSource>true</skipSource>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -0,0 +1,359 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-runtime</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Apache Hadoop Client</description>
<name>Apache Hadoop Client Runtime</name>
<properties>
<shaded.dependency.prefix>org.apache.hadoop.shaded</shaded.dependency.prefix>
<!-- We contain no source -->
<maven.javadoc.skip>true</maven.javadoc.skip>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<!-- We list this as optional because as a type-pom it won't get included in the shading.
Marking it optional means it doesn't count as a transitive dependency of this artifact.
-->
<optional>true</optional>
<exclusions>
<!-- these APIs are a part of the SE JDK -->
<exclusion>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
</exclusion>
<exclusion>
<groupId>xml-apis</groupId>
<artifactId>xml-apis</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- At runtime anyone using us must have the api present -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client-api</artifactId>
<scope>runtime</scope>
</dependency>
<!-- This comes from our parent pom. If we don't expressly change it here to get included,
downstream will get warnings at compile time. -->
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>jdk.tools</groupId>
<artifactId>jdk.tools</artifactId>
</exclusion>
</exclusions>
</dependency>
<!-- Since hadoop-client is listed as optional, we have to list transitive
dependencies that we still want to show up.
* HTrace
* Slf4j API
* commons-logging
-->
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core4</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<scope>runtime</scope>
</dependency>
<!-- Move log4j to optional, since it is needed for some pieces folks might not use:
* one of the three custom log4j appenders we have
* JobConf (?!) (so essentially any user of MapReduce)
-->
<dependency>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
<scope>runtime</scope>
<optional>true</optional>
</dependency>
</dependencies>
<profiles>
<profile>
<id>shade</id>
<activation>
<property><name>!skipShade</name></property>
</activation>
<build>
<plugins>
<!-- We contain no source -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<configuration>
<skipSource>true</skipSource>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-shade-plugin</artifactId>
<dependencies>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-maven-plugins</artifactId>
<version>${project.version}</version>
</dependency>
</dependencies>
<executions>
<execution>
<phase>package</phase>
<goals>
<goal>shade</goal>
</goals>
<configuration>
<artifactSet>
<excludes>
<!-- We need a filter that matches just those things that aer included in the api jar -->
<exclude>org.apache.hadoop:hadoop-client-api</exclude>
<!-- Leave HTrace as an unshaded dependency on purpose, since a static class member is used to trace within a given JVM instance -->
<exclude>org.apache.htrace:htrace-core4</exclude>
<!-- Leave slf4j unshaded so downstream users can configure logging. -->
<exclude>org.slf4j:slf4j-api</exclude>
<!-- Leave commons-logging unshaded so downstream users can configure logging. -->
<exclude>commons-logging:commons-logging</exclude>
<!-- Leave log4j unshaded so downstream users can configure logging. -->
<exclude>log4j:log4j</exclude>
</excludes>
</artifactSet>
<filters>
<!-- We need a filter that matches just those things that are included in the api jar -->
<filter>
<artifact>org.apache.hadoop:*</artifact>
<excludes>
<exclude>**/*</exclude>
<exclude>*</exclude>
</excludes>
</filter>
<!-- Some of our dependencies include source, so remove it. -->
<filter>
<artifact>*:*</artifact>
<excludes>
<exclude>**/*.java</exclude>
</excludes>
</filter>
<!-- We only want one copy of the Localizer class. So long as we keep jasper compiler and runtime on the same version, which one doesn't matter -->
<filter>
<artifact>tomcat:jasper-compiler</artifact>
<excludes>
<exclude>org/apache/jasper/compiler/Localizer.class</exclude>
</excludes>
</filter>
<!-- We only have xerces as a dependency for XML output for the fsimage edits, we don't need anything specific to it for javax xml support -->
<filter>
<artifact>xerces:xercesImpl</artifact>
<excludes>
<exclude>META-INF/services/*</exclude>
</excludes>
</filter>
<!-- We rely on jersey for our web interfaces. We want to use its java services stuff only internal to jersey -->
<filter>
<artifact>com.sun.jersey:*</artifact>
<excludes>
<exclude>META-INF/services/javax.*</exclude>
</excludes>
</filter>
</filters>
<relocations>
<relocation>
<pattern>org/</pattern>
<shadedPattern>${shaded.dependency.prefix}.org.</shadedPattern>
<excludes>
<exclude>org/apache/hadoop/*</exclude>
<exclude>org/apache/hadoop/**/*</exclude>
<!-- Our non-shaded htrace and logging libraries -->
<exclude>org/apache/htrace/*</exclude>
<exclude>org/apache/htrace/**/*</exclude>
<exclude>org/slf4j/*</exclude>
<exclude>org/slf4j/**/*</exclude>
<exclude>org/apache/commons/logging/*</exclude>
<exclude>org/apache/commons/logging/**/*</exclude>
<exclude>org/apache/log4j/*</exclude>
<exclude>org/apache/log4j/**/*</exclude>
<exclude>**/pom.xml</exclude>
<!-- Not the org/ packages that are a part of the jdk -->
<exclude>org/ietf/jgss/*</exclude>
<exclude>org/omg/**/*</exclude>
<exclude>org/w3c/dom/*</exclude>
<exclude>org/w3c/dom/**/*</exclude>
<exclude>org/xml/sax/*</exclude>
<exclude>org/xml/sax/**/*</exclude>
</excludes>
</relocation>
<relocation>
<pattern>com/</pattern>
<shadedPattern>${shaded.dependency.prefix}.com.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Not the com/ packages that are a part of particular jdk implementations -->
<exclude>com/sun/tools/*</exclude>
<exclude>com/sun/javadoc/*</exclude>
<exclude>com/sun/security/*</exclude>
<exclude>com/sun/jndi/*</exclude>
<exclude>com/sun/management/*</exclude>
<exclude>com/sun/tools/**/*</exclude>
<exclude>com/sun/javadoc/**/*</exclude>
<exclude>com/sun/security/**/*</exclude>
<exclude>com/sun/jndi/**/*</exclude>
<exclude>com/sun/management/**/*</exclude>
</excludes>
</relocation>
<relocation>
<pattern>io/</pattern>
<shadedPattern>${shaded.dependency.prefix}.io.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Exclude config keys for Hadoop that look like package names -->
<exclude>io/compression/*</exclude>
<exclude>io/compression/**/*</exclude>
<exclude>io/mapfile/*</exclude>
<exclude>io/mapfile/**/*</exclude>
<exclude>io/map/index/*</exclude>
<exclude>io/seqfile/*</exclude>
<exclude>io/seqfile/**/*</exclude>
<exclude>io/file/buffer/size</exclude>
<exclude>io/skip/checksum/errors</exclude>
<exclude>io/sort/*</exclude>
<exclude>io/serializations</exclude>
</excludes>
</relocation>
<relocation>
<pattern>javax/el/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.el.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
<relocation>
<pattern>javax/servlet/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.servlet.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
<relocation>
<pattern>net/</pattern>
<shadedPattern>${shaded.dependency.prefix}.net.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
<!-- Exclude config keys for Hadoop that look like package names -->
<exclude>net/topology/*</exclude>
<exclude>net/topology/**/*</exclude>
</excludes>
</relocation>
<!-- probably not. -->
<!--
<relocation>
<pattern>javax/</pattern>
<shadedPattern>${shaded.dependency.prefix}.javax.</shadedPattern>
<excludes>
<exclude>**/pom.xml</exclude>
</excludes>
</relocation>
-->
</relocations>
<transformers>
<!-- Needed until MSHADE-182 -->
<transformer implementation="org.apache.hadoop.maven.plugin.shade.resource.ServicesResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.ApacheLicenseResourceTransformer"/>
<transformer implementation="org.apache.maven.plugins.shade.resource.DontIncludeResourceTransformer">
<resources>
<resource>NOTICE.txt</resource>
<resource>NOTICE</resource>
<resource>LICENSE</resource>
</resources>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
<resource>META-INF/LICENSE.txt</resource>
<file>${basedir}/../../LICENSE.txt</file>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.IncludeResourceTransformer">
<resource>META-INF/NOTICE.txt</resource>
<file>${basedir}/../../NOTICE.txt</file>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.XmlAppendingTransformer">
<resource>META-INF/jboss-beans.xml</resource>
<!-- Add this to enable loading of DTDs
<ignoreDtd>false</ignoreDtd>
-->
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>META-INF/mailcap.default</resource>
</transformer>
<transformer implementation="org.apache.maven.plugins.shade.resource.AppendingTransformer">
<resource>META-INF/mimetypes.default</resource>
</transformer>
</transformers>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
<profile>
<id>noshade</id>
<activation>
<property><name>skipShade</name></property>
</activation>
<build>
<plugins>
<!-- We contain no source -->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>
<configuration>
<skipSource>true</skipSource>
</configuration>
</plugin>
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>license-maven-plugin</artifactId>
</plugin>
</plugins>
</build>
</profile>
</profiles>
</project>

View File

@ -18,16 +18,14 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<relativePath>../hadoop-project-dist</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath>
</parent>
<artifactId>hadoop-client</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<packaging>jar</packaging>
<version>3.0.0-alpha3-SNAPSHOT</version>
<description>Apache Hadoop Client</description>
<name>Apache Hadoop Client</name>
<description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
<name>Apache Hadoop Client Aggregator</name>
<properties>
<hadoop.component>client</hadoop.component>
@ -87,10 +85,6 @@
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>com.jcraft</groupId>
<artifactId>jsch</artifactId>
@ -99,6 +93,11 @@
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
</exclusion>
<!-- No slf4j backends for downstream clients -->
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
@ -171,6 +170,11 @@
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<!-- No slf4j backends for downstream clients -->
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.zookeeper</groupId>
<artifactId>zookeeper</artifactId>
@ -271,6 +275,11 @@
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<!-- No slf4j backends for downstream clients -->
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
@ -299,6 +308,11 @@
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<!-- No slf4j backends for downstream clients -->
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
</exclusions>
</dependency>
@ -313,7 +327,6 @@
</exclusion>
</exclusions>
</dependency>
</dependencies>
</project>

View File

@ -0,0 +1,45 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<artifactId>hadoop-client-modules</artifactId>
<packaging>pom</packaging>
<description>multi-module for Apache Hadoop client artifacts</description>
<name>Apache Hadoop Client Modules</name>
<modules>
<!-- Left as an empty artifact w/dep for compat -->
<module>hadoop-client</module>
<!-- Should be used at compile scope for access to IA.Public classes -->
<module>hadoop-client-api</module>
<!-- Should be used at runtime scope for remaining classes necessary for hadoop-client-api to function -->
<module>hadoop-client-runtime</module>
<!-- Should be used at test scope for those that need access to mini cluster that works with above api and runtime -->
<module>hadoop-client-minicluster</module>
<!-- Checks invariants above -->
<module>hadoop-client-check-invariants</module>
<module>hadoop-client-check-test-invariants</module>
<!-- Attempt to use the created libraries -->
<module>hadoop-client-integration-tests</module>
</modules>
</project>

View File

@ -18,12 +18,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cloud-storage</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>jar</packaging>
<description>Apache Hadoop Cloud Storage</description>
@ -94,10 +93,6 @@
<groupId>net.sf.kosmosfs</groupId>
<artifactId>kfs</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
</exclusion>
<exclusion>
<groupId>com.jcraft</groupId>
<artifactId>jsch</artifactId>

View File

@ -20,12 +20,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-cloud-storage-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<description>Apache Hadoop Cloud Storage Project</description>
<name>Apache Hadoop Cloud Storage Project</name>
<packaging>pom</packaging>

View File

@ -20,12 +20,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-annotations</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<description>Apache Hadoop Annotations</description>
<name>Apache Hadoop Annotations</name>
<packaging>jar</packaging>

View File

@ -20,12 +20,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth-examples</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>war</packaging>
<name>Apache Hadoop Auth Examples</name>

View File

@ -20,12 +20,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-auth</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<packaging>jar</packaging>
<name>Apache Hadoop Auth</name>

View File

@ -142,7 +142,7 @@ public class AuthenticationFilter implements Filter {
private String cookieDomain;
private String cookiePath;
private boolean isCookiePersistent;
private boolean isInitializedByTomcat;
private boolean destroySecretProvider;
/**
* <p>Initializes the authentication filter and signer secret provider.</p>
@ -209,7 +209,7 @@ protected void initializeSecretProvider(FilterConfig filterConfig)
secretProvider = constructSecretProvider(
filterConfig.getServletContext(),
config, false);
isInitializedByTomcat = true;
destroySecretProvider = true;
} catch (Exception ex) {
throw new ServletException(ex);
}
@ -356,7 +356,7 @@ public void destroy() {
authHandler.destroy();
authHandler = null;
}
if (secretProvider != null && isInitializedByTomcat) {
if (secretProvider != null && destroySecretProvider) {
secretProvider.destroy();
secretProvider = null;
}

View File

@ -18,6 +18,7 @@
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.security.authentication.util.KerberosName;
import org.apache.hadoop.security.authentication.util.KerberosUtil;
import org.ietf.jgss.GSSException;
import org.ietf.jgss.GSSContext;
import org.ietf.jgss.GSSCredential;
import org.ietf.jgss.GSSManager;
@ -48,25 +49,32 @@
import java.util.Set;
import java.util.regex.Pattern;
import com.google.common.collect.HashMultimap;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
/**
* The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP.
* The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO
* authentication mechanism for HTTP.
* <p>
* The supported configuration properties are:
* <ul>
* <li>kerberos.principal: the Kerberos principal to used by the server. As stated by the Kerberos SPNEGO
* specification, it should be <code>HTTP/${HOSTNAME}@{REALM}</code>. The realm can be omitted from the
* principal as the JDK GSS libraries will use the realm name of the configured default realm.
* <li>kerberos.principal: the Kerberos principal to used by the server. As
* stated by the Kerberos SPNEGO specification, it should be
* <code>HTTP/${HOSTNAME}@{REALM}</code>. The realm can be omitted from the
* principal as the JDK GSS libraries will use the realm name of the configured
* default realm.
* It does not have a default value.</li>
* <li>kerberos.keytab: the keytab file containing the credentials for the Kerberos principal.
* <li>kerberos.keytab: the keytab file containing the credentials for the
* Kerberos principal.
* It does not have a default value.</li>
* <li>kerberos.name.rules: kerberos names rules to resolve principal names, see
* <li>kerberos.name.rules: kerberos names rules to resolve principal names, see
* {@link KerberosName#setRules(String)}</li>
* </ul>
*/
public class KerberosAuthenticationHandler implements AuthenticationHandler {
private static Logger LOG = LoggerFactory.getLogger(KerberosAuthenticationHandler.class);
public static final Logger LOG = LoggerFactory.getLogger(
KerberosAuthenticationHandler.class);
/**
* Kerberos context configuration for the JDK GSS library.
@ -117,8 +125,8 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
return new AppConfigurationEntry[]{
new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options),};
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options), };
}
}
@ -128,12 +136,14 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
public static final String TYPE = "kerberos";
/**
* Constant for the configuration property that indicates the kerberos principal.
* Constant for the configuration property that indicates the kerberos
* principal.
*/
public static final String PRINCIPAL = TYPE + ".principal";
/**
* Constant for the configuration property that indicates the keytab file path.
* Constant for the configuration property that indicates the keytab
* file path.
*/
public static final String KEYTAB = TYPE + ".keytab";
@ -148,6 +158,42 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
private GSSManager gssManager;
private Subject serverSubject = new Subject();
private List<LoginContext> loginContexts = new ArrayList<LoginContext>();
/**
* HADOOP-10158 added support of running HTTP with multiple SPNs
* but implicit requirements is that they must come from the SAME local realm.
*
* This is a regression for use cases where HTTP service needs to run with
* with SPN from foreign realm, which is not supported after HADOOP-10158.
*
* HADOOP-13565 brings back support of SPNs from foreign realms
* without dependency on specific Kerberos domain_realm mapping mechanism.
*
* There are several reasons for not using native Kerberos domain_realm
* mapping:
* 1. As commented in KerberosUtil#getDomainRealm(), JDK's
* domain_realm mapping routines are private to the security.krb5
* package. As a result, KerberosUtil#getDomainRealm() always return local
* realm.
*
* 2. Server krb5.conf is not the only place that contains the domain_realm
* mapping in real deployment. Based on MIT KDC document here:
* https://web.mit.edu/kerberos/krb5-1.13/doc/admin/realm_config.html, the
* Kerberos domain_realm mapping can be implemented in one of the three
* mechanisms:
* 1) Server host-based krb5.conf on HTTP server
* 2) KDC-based krb5.conf on KDC server
* 3) DNS-based with TXT record with _kerberos prefix to the hostname.
*
* We choose to maintain domain_realm mapping based on HTTP principals
* from keytab. The mapping is built at login time with HTTP principals
* key-ed by server name and is used later to
* looked up SPNs based on server name from request for authentication.
* The multi-map implementation allows SPNs of same server from
* different realms.
*
*/
private HashMultimap<String, String> serverPrincipalMap =
HashMultimap.create();
/**
* Creates a Kerberos SPNEGO authentication handler with the default
@ -170,7 +216,8 @@ public KerberosAuthenticationHandler(String type) {
/**
* Initializes the authentication handler instance.
* <p>
* It creates a Kerberos context using the principal and keytab specified in the configuration.
* It creates a Kerberos context using the principal and keytab specified in
* the configuration.
* <p>
* This method is invoked by the {@link AuthenticationFilter#init} method.
*
@ -225,15 +272,27 @@ public void init(Properties config) throws ServletException {
throw new AuthenticationException(le);
}
loginContexts.add(loginContext);
KerberosName kerbName = new KerberosName(spnegoPrincipal);
if (kerbName.getHostName() != null
&& kerbName.getServiceName() != null
&& kerbName.getServiceName().equals("HTTP")) {
boolean added = serverPrincipalMap.put(kerbName.getHostName(),
spnegoPrincipal);
LOG.info("Map server: {} to principal: [{}], added = {}",
kerbName.getHostName(), spnegoPrincipal, added);
} else {
LOG.warn("HTTP principal: [{}] is invalid for SPNEGO!",
spnegoPrincipal);
}
}
try {
gssManager = Subject.doAs(serverSubject, new PrivilegedExceptionAction<GSSManager>() {
@Override
public GSSManager run() throws Exception {
return GSSManager.getInstance();
}
});
gssManager = Subject.doAs(serverSubject,
new PrivilegedExceptionAction<GSSManager>() {
@Override
public GSSManager run() throws Exception {
return GSSManager.getInstance();
}
});
} catch (PrivilegedActionException ex) {
throw ex.getException();
}
@ -312,91 +371,84 @@ public boolean managementOperation(AuthenticationToken token,
}
/**
* It enforces the the Kerberos SPNEGO authentication sequence returning an {@link AuthenticationToken} only
* after the Kerberos SPNEGO sequence has completed successfully.
* It enforces the the Kerberos SPNEGO authentication sequence returning an
* {@link AuthenticationToken} only after the Kerberos SPNEGO sequence has
* completed successfully.
*
* @param request the HTTP client request.
* @param response the HTTP client response.
*
* @return an authentication token if the Kerberos SPNEGO sequence is complete and valid,
* <code>null</code> if it is in progress (in this case the handler handles the response to the client).
* @return an authentication token if the Kerberos SPNEGO sequence is complete
* and valid, <code>null</code> if it is in progress (in this case the handler
* handles the response to the client).
*
* @throws IOException thrown if an IO error occurred.
* @throws AuthenticationException thrown if Kerberos SPNEGO sequence failed.
*/
@Override
public AuthenticationToken authenticate(HttpServletRequest request, final HttpServletResponse response)
throws IOException, AuthenticationException {
public AuthenticationToken authenticate(HttpServletRequest request,
final HttpServletResponse response)
throws IOException, AuthenticationException {
AuthenticationToken token = null;
String authorization = request.getHeader(KerberosAuthenticator.AUTHORIZATION);
String authorization = request.getHeader(
KerberosAuthenticator.AUTHORIZATION);
if (authorization == null || !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
if (authorization == null
|| !authorization.startsWith(KerberosAuthenticator.NEGOTIATE)) {
response.setHeader(WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE);
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
if (authorization == null) {
LOG.trace("SPNEGO starting");
LOG.trace("SPNEGO starting for url: {}", request.getRequestURL());
} else {
LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION + "' does not start with '" +
LOG.warn("'" + KerberosAuthenticator.AUTHORIZATION +
"' does not start with '" +
KerberosAuthenticator.NEGOTIATE + "' : {}", authorization);
}
} else {
authorization = authorization.substring(KerberosAuthenticator.NEGOTIATE.length()).trim();
authorization = authorization.substring(
KerberosAuthenticator.NEGOTIATE.length()).trim();
final Base64 base64 = new Base64(0);
final byte[] clientToken = base64.decode(authorization);
final String serverName = InetAddress.getByName(request.getServerName())
.getCanonicalHostName();
try {
token = Subject.doAs(serverSubject, new PrivilegedExceptionAction<AuthenticationToken>() {
@Override
public AuthenticationToken run() throws Exception {
AuthenticationToken token = null;
GSSContext gssContext = null;
GSSCredential gssCreds = null;
try {
gssCreds = gssManager.createCredential(
gssManager.createName(
KerberosUtil.getServicePrincipal("HTTP", serverName),
KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
GSSCredential.INDEFINITE_LIFETIME,
new Oid[]{
KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
GSSCredential.ACCEPT_ONLY);
gssContext = gssManager.createContext(gssCreds);
byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
if (serverToken != null && serverToken.length > 0) {
String authenticate = base64.encodeToString(serverToken);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE,
KerberosAuthenticator.NEGOTIATE + " " + authenticate);
token = Subject.doAs(serverSubject,
new PrivilegedExceptionAction<AuthenticationToken>() {
private Set<String> serverPrincipals =
serverPrincipalMap.get(serverName);
@Override
public AuthenticationToken run() throws Exception {
if (LOG.isTraceEnabled()) {
LOG.trace("SPNEGO with server principals: {} for {}",
serverPrincipals.toString(), serverName);
}
AuthenticationToken token = null;
Exception lastException = null;
for (String serverPrincipal : serverPrincipals) {
try {
token = runWithPrincipal(serverPrincipal, clientToken,
base64, response);
} catch (Exception ex) {
lastException = ex;
LOG.trace("Auth {} failed with {}", serverPrincipal, ex);
} finally {
if (token != null) {
LOG.trace("Auth {} successfully", serverPrincipal);
break;
}
}
}
if (token != null) {
return token;
} else {
throw new AuthenticationException(lastException);
}
}
if (!gssContext.isEstablished()) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
LOG.trace("SPNEGO in progress");
} else {
String clientPrincipal = gssContext.getSrcName().toString();
KerberosName kerberosName = new KerberosName(clientPrincipal);
String userName = kerberosName.getShortName();
token = new AuthenticationToken(userName, clientPrincipal, getType());
response.setStatus(HttpServletResponse.SC_OK);
LOG.trace("SPNEGO completed for principal [{}]", clientPrincipal);
}
} finally {
if (gssContext != null) {
gssContext.dispose();
}
if (gssCreds != null) {
gssCreds.dispose();
}
}
return token;
}
});
});
} catch (PrivilegedActionException ex) {
if (ex.getException() instanceof IOException) {
throw (IOException) ex.getException();
}
else {
} else {
throw new AuthenticationException(ex.getException());
}
}
@ -404,4 +456,52 @@ public AuthenticationToken run() throws Exception {
return token;
}
private AuthenticationToken runWithPrincipal(String serverPrincipal,
byte[] clientToken, Base64 base64, HttpServletResponse response) throws
IOException, AuthenticationException, ClassNotFoundException,
GSSException, IllegalAccessException, NoSuchFieldException {
GSSContext gssContext = null;
GSSCredential gssCreds = null;
AuthenticationToken token = null;
try {
LOG.trace("SPNEGO initiated with server principal [{}]", serverPrincipal);
gssCreds = this.gssManager.createCredential(
this.gssManager.createName(serverPrincipal,
KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL")),
GSSCredential.INDEFINITE_LIFETIME,
new Oid[]{
KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
GSSCredential.ACCEPT_ONLY);
gssContext = this.gssManager.createContext(gssCreds);
byte[] serverToken = gssContext.acceptSecContext(clientToken, 0,
clientToken.length);
if (serverToken != null && serverToken.length > 0) {
String authenticate = base64.encodeToString(serverToken);
response.setHeader(KerberosAuthenticator.WWW_AUTHENTICATE,
KerberosAuthenticator.NEGOTIATE + " " +
authenticate);
}
if (!gssContext.isEstablished()) {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED);
LOG.trace("SPNEGO in progress");
} else {
String clientPrincipal = gssContext.getSrcName().toString();
KerberosName kerberosName = new KerberosName(clientPrincipal);
String userName = kerberosName.getShortName();
token = new AuthenticationToken(userName, clientPrincipal, getType());
response.setStatus(HttpServletResponse.SC_OK);
LOG.trace("SPNEGO completed for client principal [{}]",
clientPrincipal);
}
} finally {
if (gssContext != null) {
gssContext.dispose();
}
if (gssCreds != null) {
gssCreds.dispose();
}
}
return token;
}
}

View File

@ -54,7 +54,7 @@ public class KerberosName {
* A pattern that matches a Kerberos name with at most 2 components.
*/
private static final Pattern nameParser =
Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
Pattern.compile("([^/@]+)(/([^/@]+))?(@([^/@]+))?");
/**
* A pattern that matches a string with out '$' and then a single
@ -109,7 +109,7 @@ public KerberosName(String name) {
} else {
serviceName = match.group(1);
hostName = match.group(3);
realm = match.group(4);
realm = match.group(5);
}
}

View File

@ -258,7 +258,7 @@ private synchronized void pushToZK(byte[] newSecret, byte[] currentSecret,
} catch (KeeperException.BadVersionException bve) {
LOG.debug("Unable to push to znode; another server already did it");
} catch (Exception ex) {
LOG.error("An unexpected exception occured pushing data to ZooKeeper",
LOG.error("An unexpected exception occurred pushing data to ZooKeeper",
ex);
}
}

View File

@ -82,6 +82,28 @@ public void testAntiPatterns() throws Exception {
checkTranslation("root/joe@FOO.COM", "root/joe@FOO.COM");
}
@Test
public void testParsing() throws Exception {
final String principalNameFull = "HTTP/abc.com@EXAMPLE.COM";
final String principalNameWoRealm = "HTTP/abc.com";
final String principalNameWoHost = "HTTP@EXAMPLE.COM";
final KerberosName kerbNameFull = new KerberosName(principalNameFull);
Assert.assertEquals("HTTP", kerbNameFull.getServiceName());
Assert.assertEquals("abc.com", kerbNameFull.getHostName());
Assert.assertEquals("EXAMPLE.COM", kerbNameFull.getRealm());
final KerberosName kerbNamewoRealm = new KerberosName(principalNameWoRealm);
Assert.assertEquals("HTTP", kerbNamewoRealm.getServiceName());
Assert.assertEquals("abc.com", kerbNamewoRealm.getHostName());
Assert.assertEquals(null, kerbNamewoRealm.getRealm());
final KerberosName kerbNameWoHost = new KerberosName(principalNameWoHost);
Assert.assertEquals("HTTP", kerbNameWoHost.getServiceName());
Assert.assertEquals(null, kerbNameWoHost.getHostName());
Assert.assertEquals("EXAMPLE.COM", kerbNameWoHost.getRealm());
}
@Test
public void testToLowerCase() throws Exception {
String rules =

View File

@ -410,4 +410,10 @@
<Filed name="done"/>
<Bug pattern="JLM_JSR166_UTILCONCURRENT_MONITORENTER"/>
</Match>
<Match>
<Class name="org.apache.hadoop.metrics2.impl.MetricsConfig"/>
<Method name="toString"/>
<Bug pattern="DM_DEFAULT_ENCODING"/>
</Match>
</FindBugsFilter>

View File

@ -20,12 +20,11 @@
<parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-project-dist</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<relativePath>../../hadoop-project-dist</relativePath>
</parent>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>3.0.0-alpha2-SNAPSHOT</version>
<version>3.0.0-alpha3-SNAPSHOT</version>
<description>Apache Hadoop Common</description>
<name>Apache Hadoop Common</name>
<packaging>jar</packaging>
@ -46,6 +45,11 @@
<artifactId>hadoop-annotations</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minikdc</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -157,11 +161,6 @@
<artifactId>log4j</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
@ -173,8 +172,13 @@
<scope>compile</scope>
</dependency>
<dependency>
<groupId>commons-configuration</groupId>
<artifactId>commons-configuration</artifactId>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-configuration2</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
@ -187,16 +191,6 @@
<artifactId>slf4j-log4j12</artifactId>
<scope>runtime</scope>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-core-asl</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
<artifactId>jackson-mapper-asl</artifactId>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
@ -320,6 +314,10 @@
<groupId>org.apache.kerby</groupId>
<artifactId>kerb-simplekdc</artifactId>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.core</groupId>
<artifactId>jackson-databind</artifactId>
</dependency>
</dependencies>
<build>
@ -525,6 +523,7 @@
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.h</exclude>
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc.c</exclude>
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4hc_encoder.h</exclude>
<exclude>src/main/native/gtest/**/*</exclude>
<exclude>src/test/resources/test-untar.tgz</exclude>
<exclude>src/test/resources/test.har/_SUCCESS</exclude>
<exclude>src/test/resources/test.har/_index</exclude>
@ -596,6 +595,10 @@
<snappy.lib></snappy.lib>
<snappy.include></snappy.include>
<require.snappy>false</require.snappy>
<zstd.prefix></zstd.prefix>
<zstd.lib></zstd.lib>
<zstd.include></zstd.include>
<require.zstd>false</require.zstd>
<openssl.prefix></openssl.prefix>
<openssl.lib></openssl.lib>
<openssl.include></openssl.include>
@ -653,6 +656,8 @@
<javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardCompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardDecompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
@ -686,9 +691,13 @@
<JVM_ARCH_DATA_MODEL>${sun.arch.data.model}</JVM_ARCH_DATA_MODEL>
<REQUIRE_BZIP2>${require.bzip2}</REQUIRE_BZIP2>
<REQUIRE_SNAPPY>${require.snappy}</REQUIRE_SNAPPY>
<REQUIRE_ZSTD>${require.zstd}</REQUIRE_ZSTD>
<CUSTOM_SNAPPY_PREFIX>${snappy.prefix}</CUSTOM_SNAPPY_PREFIX>
<CUSTOM_SNAPPY_LIB>${snappy.lib} </CUSTOM_SNAPPY_LIB>
<CUSTOM_SNAPPY_INCLUDE>${snappy.include} </CUSTOM_SNAPPY_INCLUDE>
<CUSTOM_ZSTD_PREFIX>${zstd.prefix}</CUSTOM_ZSTD_PREFIX>
<CUSTOM_ZSTD_LIB>${zstd.lib} </CUSTOM_ZSTD_LIB>
<CUSTOM_ZSTD_INCLUDE>${zstd.include} </CUSTOM_ZSTD_INCLUDE>
<REQUIRE_ISAL>${require.isal} </REQUIRE_ISAL>
<CUSTOM_ISAL_PREFIX>${isal.prefix} </CUSTOM_ISAL_PREFIX>
<CUSTOM_ISAL_LIB>${isal.lib} </CUSTOM_ISAL_LIB>
@ -746,6 +755,11 @@
<isal.lib></isal.lib>
<require.snappy>false</require.snappy>
<bundle.snappy.in.bin>true</bundle.snappy.in.bin>
<zstd.prefix></zstd.prefix>
<zstd.lib></zstd.lib>
<zstd.include></zstd.include>
<require.ztsd>false</require.ztsd>
<bundle.zstd.in.bin>true</bundle.zstd.in.bin>
<openssl.prefix></openssl.prefix>
<openssl.lib></openssl.lib>
<openssl.include></openssl.include>
@ -795,6 +809,8 @@
<javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardCompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.zstd.ZStandardDecompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Compressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.lz4.Lz4Decompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.erasurecode.ErasureCodeNative</javahClassName>
@ -851,6 +867,10 @@
<argument>/p:CustomSnappyLib=${snappy.lib}</argument>
<argument>/p:CustomSnappyInclude=${snappy.include}</argument>
<argument>/p:RequireSnappy=${require.snappy}</argument>
<argument>/p:CustomZstdPrefix=${zstd.prefix}</argument>
<argument>/p:CustomZstdLib=${zstd.lib}</argument>
<argument>/p:CustomZstdInclude=${zstd.include}</argument>
<argument>/p:RequireZstd=${require.ztsd}</argument>
<argument>/p:CustomOpensslPrefix=${openssl.prefix}</argument>
<argument>/p:CustomOpensslLib=${openssl.lib}</argument>
<argument>/p:CustomOpensslInclude=${openssl.include}</argument>

View File

@ -94,6 +94,33 @@ else()
endif()
endif()
# Require zstandard
SET(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
hadoop_set_find_shared_library_version("1")
find_library(ZSTD_LIBRARY
NAMES zstd
PATHS ${CUSTOM_ZSTD_PREFIX} ${CUSTOM_ZSTD_PREFIX}/lib
${CUSTOM_ZSTD_PREFIX}/lib64 ${CUSTOM_ZSTD_LIB})
SET(CMAKE_FIND_LIBRARY_SUFFIXES ${STORED_CMAKE_FIND_LIBRARY_SUFFIXES})
find_path(ZSTD_INCLUDE_DIR
NAMES zstd.h
PATHS ${CUSTOM_ZSTD_PREFIX} ${CUSTOM_ZSTD_PREFIX}/include
${CUSTOM_ZSTD_INCLUDE})
if (ZSTD_LIBRARY AND ZSTD_INCLUDE_DIR)
GET_FILENAME_COMPONENT(HADOOP_ZSTD_LIBRARY ${ZSTD_LIBRARY} NAME)
set(ZSTD_SOURCE_FILES
"${SRC}/io/compress/zstd/ZStandardCompressor.c"
"${SRC}/io/compress/zstd/ZStandardDecompressor.c")
set(REQUIRE_ZSTD ${REQUIRE_ZSTD}) # Stop warning about unused variable.
message(STATUS "Found ZStandard: ${ZSTD_LIBRARY}")
else ()
set(ZSTD_INCLUDE_DIR "")
set(ZSTD_SOURCE_FILES "")
IF(REQUIRE_ZSTD)
MESSAGE(FATAL_ERROR "Required zstandard library could not be found. ZSTD_LIBRARY=${ZSTD_LIBRARY}, ZSTD_INCLUDE_DIR=${ZSTD_INCLUDE_DIR}, CUSTOM_ZSTD_INCLUDE_DIR=${CUSTOM_ZSTD_INCLUDE_DIR}, CUSTOM_ZSTD_PREFIX=${CUSTOM_ZSTD_PREFIX}, CUSTOM_ZSTD_INCLUDE=${CUSTOM_ZSTD_INCLUDE}")
ENDIF(REQUIRE_ZSTD)
endif ()
set(STORED_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
hadoop_set_find_shared_library_version("2")
find_library(ISAL_LIBRARY
@ -208,6 +235,7 @@ include_directories(
${BZIP2_INCLUDE_DIR}
${SNAPPY_INCLUDE_DIR}
${ISAL_INCLUDE_DIR}
${ZSTD_INCLUDE_DIR}
${OPENSSL_INCLUDE_DIR}
${SRC}/util
)
@ -222,6 +250,7 @@ hadoop_add_dual_library(hadoop
${SRC}/io/compress/lz4/lz4hc.c
${ISAL_SOURCE_FILES}
${SNAPPY_SOURCE_FILES}
${ZSTD_SOURCE_FILES}
${OPENSSL_SOURCE_FILES}
${SRC}/io/compress/zlib/ZlibCompressor.c
${SRC}/io/compress/zlib/ZlibDecompressor.c

View File

@ -21,6 +21,7 @@
#cmakedefine HADOOP_ZLIB_LIBRARY "@HADOOP_ZLIB_LIBRARY@"
#cmakedefine HADOOP_BZIP2_LIBRARY "@HADOOP_BZIP2_LIBRARY@"
#cmakedefine HADOOP_SNAPPY_LIBRARY "@HADOOP_SNAPPY_LIBRARY@"
#cmakedefine HADOOP_ZSTD_LIBRARY "@HADOOP_ZSTD_LIBRARY@"
#cmakedefine HADOOP_OPENSSL_LIBRARY "@HADOOP_OPENSSL_LIBRARY@"
#cmakedefine HADOOP_ISAL_LIBRARY "@HADOOP_ISAL_LIBRARY@"
#cmakedefine HAVE_SYNC_FILE_RANGE

View File

@ -183,13 +183,24 @@ else
exit 1
fi
if [ $# = 0 ]; then
# now that we have support code, let's abs MYNAME so we can use it later
MYNAME=$(hadoop_abs "${MYNAME}")
if [[ $# = 0 ]]; then
hadoop_exit_with_usage 1
fi
HADOOP_SUBCMD=$1
shift
if hadoop_need_reexec hadoop "${HADOOP_SUBCMD}"; then
hadoop_uservar_su hadoop "${HADOOP_SUBCMD}" \
"${MYNAME}" \
"--reexec" \
"${HADOOP_USER_PARAMS[@]}"
exit $?
fi
hadoop_verify_user "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"
HADOOP_SUBCMD_ARGS=("$@")

View File

@ -41,6 +41,42 @@ function hadoop_debug
fi
}
## @description Given a filename or dir, return the absolute version of it
## @description This works as an alternative to readlink, which isn't
## @description portable.
## @audience public
## @stability stable
## @param fsobj
## @replaceable no
## @return 0 success
## @return 1 failure
## @return stdout abspath
function hadoop_abs
{
declare obj=$1
declare dir
declare fn
declare dirret
if [[ ! -e ${obj} ]]; then
return 1
elif [[ -d ${obj} ]]; then
dir=${obj}
else
dir=$(dirname -- "${obj}")
fn=$(basename -- "${obj}")
fn="/${fn}"
fi
dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
dirret=$?
if [[ ${dirret} = 0 ]]; then
echo "${dir}${fn}"
return 0
fi
return 1
}
## @description Given variable $1 delete $2 from it
## @audience public
## @stability stable
@ -79,6 +115,101 @@ function hadoop_verify_entry
[[ ${!1} =~ \ ${2}\ ]]
}
## @description Check if we are running with privilege
## @description by default, this implementation looks for
## @description EUID=0. For OSes that have true privilege
## @description separation, this should be something more complex
## @audience private
## @stability evolving
## @replaceable yes
## @return 1 = no priv
## @return 0 = priv
function hadoop_privilege_check
{
[[ "${EUID}" = 0 ]]
}
## @description Execute a command via su when running as root
## @description if the given user is found or exit with
## @description failure if not.
## @description otherwise just run it. (This is intended to
## @description be used by the start-*/stop-* scripts.)
## @audience private
## @stability evolving
## @replaceable yes
## @param user
## @param commandstring
## @return exitstatus
function hadoop_su
{
declare user=$1
shift
declare idret
if hadoop_privilege_check; then
id -u "${user}" >/dev/null 2>&1
idret=$?
if [[ ${idret} != 0 ]]; then
hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
return 1
else
su -l "${user}" -- "$@"
fi
else
"$@"
fi
}
## @description Execute a command via su when running as root
## @description with extra support for commands that might
## @description legitimately start as root (e.g., datanode)
## @description (This is intended to
## @description be used by the start-*/stop-* scripts.)
## @audience private
## @stability evolving
## @replaceable no
## @param user
## @param commandstring
## @return exitstatus
function hadoop_uservar_su
{
## startup matrix:
#
# if $EUID != 0, then exec
# if $EUID =0 then
# if hdfs_subcmd_user is defined, call hadoop_su to exec
# if hdfs_subcmd_user is not defined, error
#
# For secure daemons, this means both the secure and insecure env vars need to be
# defined. e.g., HDFS_DATANODE_USER=root HDFS_DATANODE_SECURE_USER=hdfs
# This function will pick up the "normal" var, switch to that user, then
# execute the command which will then pick up the "secure" version.
#
declare program=$1
declare command=$2
shift 2
declare uprogram
declare ucommand
declare uvar
if hadoop_privilege_check; then
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
if [[ -n "${!uvar}" ]]; then
hadoop_su "${!uvar}" "$@"
else
hadoop_error "ERROR: Attempting to launch ${program} ${command} as root"
hadoop_error "ERROR: but there is no ${uvar} defined. Aborting launch."
return 1
fi
else
"$@"
fi
}
## @description Add a subcommand to the usage output
## @audience private
## @stability evolving
@ -262,6 +393,39 @@ function hadoop_deprecate_envvar
fi
}
## @description Declare `var` being used and print its value.
## @audience public
## @stability stable
## @replaceable yes
## @param var
function hadoop_using_envvar
{
local var=$1
local val=${!var}
if [[ -n "${val}" ]]; then
hadoop_debug "${var} = ${val}"
fi
}
## @description Create the directory 'dir'.
## @audience public
## @stability stable
## @replaceable yes
## @param dir
function hadoop_mkdir
{
local dir=$1
if [[ ! -w "${dir}" ]] && [[ ! -d "${dir}" ]]; then
hadoop_error "WARNING: ${dir} does not exist. Creating."
if ! mkdir -p "${dir}"; then
hadoop_error "ERROR: Unable to create ${dir}. Aborting."
exit 1
fi
fi
}
## @description Bootstraps the Hadoop shell environment
## @audience private
## @stability evolving
@ -310,6 +474,9 @@ function hadoop_bootstrap
# daemonization
HADOOP_SUBCMD_SUPPORTDAEMONIZATION=false
# by default, we have not been self-re-execed
HADOOP_REEXECED_CMD=false
# shellcheck disable=SC2034
HADOOP_SUBCMD_SECURESERVICE=false
@ -591,9 +758,10 @@ function hadoop_basic_init
fi
# if for some reason the shell doesn't have $USER defined
# let's define it as 'hadoop'
# (e.g., ssh'd in to execute a command)
# let's get the effective username and use that
USER=${USER:-$(id -nu)}
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-$USER}
HADOOP_IDENT_STRING=${HADOOP_IDENT_STRING:-hadoop}
HADOOP_LOG_DIR=${HADOOP_LOG_DIR:-"${HADOOP_HOME}/logs"}
HADOOP_LOGFILE=${HADOOP_LOGFILE:-hadoop.log}
HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO}
@ -1367,8 +1535,7 @@ function hadoop_verify_secure_prereq
# and you are using pfexec, you'll probably want to change
# this.
# ${EUID} comes from the shell itself!
if [[ "${EUID}" -ne 0 ]] && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
if ! hadoop_privilege_check && [[ -z "${HADOOP_SECURE_COMMAND}" ]]; then
hadoop_error "ERROR: You must be a privileged user in order to run a secure service."
exit 1
else
@ -1396,14 +1563,7 @@ function hadoop_verify_piddir
hadoop_error "No pid directory defined."
exit 1
fi
if [[ ! -w "${HADOOP_PID_DIR}" ]] && [[ ! -d "${HADOOP_PID_DIR}" ]]; then
hadoop_error "WARNING: ${HADOOP_PID_DIR} does not exist. Creating."
mkdir -p "${HADOOP_PID_DIR}" > /dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to create ${HADOOP_PID_DIR}. Aborting."
exit 1
fi
fi
hadoop_mkdir "${HADOOP_PID_DIR}"
touch "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to write in ${HADOOP_PID_DIR}. Aborting."
@ -1421,14 +1581,7 @@ function hadoop_verify_logdir
hadoop_error "No log directory defined."
exit 1
fi
if [[ ! -w "${HADOOP_LOG_DIR}" ]] && [[ ! -d "${HADOOP_LOG_DIR}" ]]; then
hadoop_error "WARNING: ${HADOOP_LOG_DIR} does not exist. Creating."
mkdir -p "${HADOOP_LOG_DIR}" > /dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to create ${HADOOP_LOG_DIR}. Aborting."
exit 1
fi
fi
hadoop_mkdir "${HADOOP_LOG_DIR}"
touch "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
if [[ $? -gt 0 ]]; then
hadoop_error "ERROR: Unable to write in ${HADOOP_LOG_DIR}. Aborting."
@ -1975,20 +2128,18 @@ function hadoop_secure_daemon_handler
esac
}
## @description Verify that ${USER} is allowed to execute the
## @description given subcommand.
## @description Get the environment variable used to validate users
## @audience public
## @stability stable
## @replaceable yes
## @param subcommand
## @return will exit on failure conditions
function hadoop_verify_user
## @return string
function hadoop_get_verify_uservar
{
declare program=$1
declare command=$2
declare uprogram
declare ucommand
declare uvar
if [[ -z "${BASH_VERSINFO[0]}" ]] \
|| [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
@ -1999,7 +2150,25 @@ function hadoop_verify_user
ucommand=${command^^}
fi
uvar="${uprogram}_${ucommand}_USER"
echo "${uprogram}_${ucommand}_USER"
}
## @description Verify that ${USER} is allowed to execute the
## @description given subcommand.
## @audience public
## @stability stable
## @replaceable yes
## @param command
## @param subcommand
## @return return 0 on success
## @return exit 1 on failure
function hadoop_verify_user
{
declare program=$1
declare command=$2
declare uvar
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
if [[ -n ${!uvar} ]]; then
if [[ ${!uvar} != "${USER}" ]]; then
@ -2007,6 +2176,42 @@ function hadoop_verify_user
exit 1
fi
fi
return 0
}
## @description Verify that ${USER} is allowed to execute the
## @description given subcommand.
## @audience public
## @stability stable
## @replaceable yes
## @param subcommand
## @return 1 on no re-exec needed
## @return 0 on need to re-exec
function hadoop_need_reexec
{
declare program=$1
declare command=$2
declare uvar
# we've already been re-execed, bail
if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
return 1
fi
# if we have privilege, and the _USER is defined, and _USER is
# set to someone who isn't us, then yes, we should re-exec.
# otherwise no, don't re-exec and let the system deal with it.
if hadoop_privilege_check; then
uvar=$(hadoop_get_verify_uservar "${program}" "${command}")
if [[ -n ${!uvar} ]]; then
if [[ ${!uvar} != "${USER}" ]]; then
return 0
fi
fi
fi
return 1
}
## @description Add custom (program)_(command)_OPTS to HADOOP_OPTS.
@ -2209,6 +2414,15 @@ function hadoop_parse_args
shift
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
;;
--reexec)
shift
if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called"
exit 1
fi
HADOOP_REEXECED_CMD=true
((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
;;
--workers)
shift
# shellcheck disable=SC2034

View File

@ -15,10 +15,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
echo "This script is deprecated. Use start-dfs.sh and start-yarn.sh instead."
exit 1
## @description catch the ctrl-c
## @audience private
## @stability evolving
## @replaceable no
function hadoop_abort_startall()
{
exit 1
}
# let's locate libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
@ -38,6 +42,16 @@ else
echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/hadoop-config.sh." 2>&1
exit 1
fi
if ! hadoop_privilege_check; then
trap hadoop_abort_startall INT
hadoop_error "WARNING: Attempting to start all Apache Hadoop daemons as ${USER} in 10 seconds."
hadoop_error "WARNING: This is not a recommended production deployment configuration."
hadoop_error "WARNING: Use CTRL-C to abort."
sleep 10
trap - INT
fi
# start hdfs daemons if hdfs is present
if [[ -f "${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" ]]; then
"${HADOOP_HDFS_HOME}/sbin/start-dfs.sh" --config "${HADOOP_CONF_DIR}"
@ -49,4 +63,3 @@ if [[ -f "${HADOOP_YARN_HOME}/sbin/start-yarn.sh" ]]; then
fi

View File

@ -15,12 +15,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
## @description catch the ctrl-c
## @audience private
## @stability evolving
## @replaceable no
function hadoop_abort_stopall()
{
exit 1
}
# Stop all hadoop daemons. Run this on master node.
echo "This script is deprecated. Use stop-dfs.sh and stop-yarn.sh instead."
exit 1
# let's locate libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
@ -40,6 +45,14 @@ else
exit 1
fi
if ! hadoop_privilege_check; then
trap hadoop_abort_stopall INT
hadoop_error "WARNING: Stopping all Apache Hadoop daemons as ${USER} in 10 seconds."
hadoop_error "WARNING: Use CTRL-C to abort."
sleep 10
trap - INT
fi
# stop hdfs daemons if hdfs is present
if [[ -f "${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" ]]; then
"${HADOOP_HDFS_HOME}/sbin/stop-dfs.sh" --config "${HADOOP_CONF_DIR}"

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.conf;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonGenerator;
import com.google.common.annotations.VisibleForTesting;
import java.io.BufferedInputStream;
@ -91,8 +93,6 @@
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringInterner;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.w3c.dom.Attr;
import org.w3c.dom.DOMException;
import org.w3c.dom.Document;
@ -1887,6 +1887,18 @@ public String toString() {
return result.toString();
}
/**
* Get range start for the first integer range.
* @return range start.
*/
public int getRangeStart() {
if (ranges == null || ranges.isEmpty()) {
return -1;
}
Range r = ranges.get(0);
return r.start;
}
@Override
public Iterator<Integer> iterator() {
return new RangeNumberIterator(ranges);
@ -3028,7 +3040,7 @@ public static void dumpConfiguration(Configuration config,
propertyName + " not found");
} else {
JsonFactory dumpFactory = new JsonFactory();
JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
dumpGenerator.writeStartObject();
dumpGenerator.writeFieldName("property");
appendJSONProperty(dumpGenerator, config, propertyName);
@ -3066,7 +3078,7 @@ public static void dumpConfiguration(Configuration config,
public static void dumpConfiguration(Configuration config,
Writer out) throws IOException {
JsonFactory dumpFactory = new JsonFactory();
JsonGenerator dumpGenerator = dumpFactory.createJsonGenerator(out);
JsonGenerator dumpGenerator = dumpFactory.createGenerator(out);
dumpGenerator.writeStartObject();
dumpGenerator.writeFieldName("properties");
dumpGenerator.writeStartArray();

View File

@ -0,0 +1,113 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.conf;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Logs access to {@link Configuration}.
* Sensitive data will be redacted.
*/
@InterfaceAudience.Private
public class ConfigurationWithLogging extends Configuration {
private static final Logger LOG =
LoggerFactory.getLogger(ConfigurationWithLogging.class);
private final Logger log;
private final ConfigRedactor redactor;
public ConfigurationWithLogging(Configuration conf) {
super(conf);
log = LOG;
redactor = new ConfigRedactor(conf);
}
/**
* @see Configuration#get(String).
*/
@Override
public String get(String name) {
String value = super.get(name);
log.info("Got {} = '{}'", name, redactor.redact(name, value));
return value;
}
/**
* @see Configuration#get(String, String).
*/
@Override
public String get(String name, String defaultValue) {
String value = super.get(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name,
redactor.redact(name, value), redactor.redact(name, defaultValue));
return value;
}
/**
* @see Configuration#getBoolean(String, boolean).
*/
@Override
public boolean getBoolean(String name, boolean defaultValue) {
boolean value = super.getBoolean(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
}
/**
* @see Configuration#getFloat(String, float).
*/
@Override
public float getFloat(String name, float defaultValue) {
float value = super.getFloat(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
}
/**
* @see Configuration#getInt(String, int).
*/
@Override
public int getInt(String name, int defaultValue) {
int value = super.getInt(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
}
/**
* @see Configuration#getLong(String, long).
*/
@Override
public long getLong(String name, long defaultValue) {
long value = super.getLong(name, defaultValue);
log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
return value;
}
/**
* @see Configuration#set(String, String, String).
*/
@Override
public void set(String name, String value, String source) {
log.info("Set {} to '{}'{}", name, redactor.redact(name, value),
source == null ? "" : " from " + source);
super.set(name, value, source);
}
}

View File

@ -141,8 +141,7 @@ public void deleteKey(String name) throws IOException {
public KeyVersion rollNewVersion(String name, byte[] material)
throws IOException {
KeyVersion key = getKeyProvider().rollNewVersion(name, material);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
invalidateCache(name);
return key;
}
@ -150,9 +149,18 @@ public KeyVersion rollNewVersion(String name, byte[] material)
public KeyVersion rollNewVersion(String name)
throws NoSuchAlgorithmException, IOException {
KeyVersion key = getKeyProvider().rollNewVersion(name);
invalidateCache(name);
return key;
}
@Override
public void invalidateCache(String name) throws IOException {
getKeyProvider().invalidateCache(name);
getExtension().currentKeyCache.invalidate(name);
getExtension().keyMetadataCache.invalidate(name);
return key;
// invalidating all key versions as we don't know
// which ones belonged to the deleted key
getExtension().keyVersionCache.invalidateAll();
}
@Override

View File

@ -36,6 +36,7 @@
import javax.crypto.spec.SecretKeySpec;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
@ -167,9 +168,9 @@ private void locateKeystore() throws IOException {
// rewrite the keystore in flush()
permissions = perm;
} catch (KeyStoreException e) {
throw new IOException("Can't create keystore", e);
throw new IOException("Can't create keystore: " + e, e);
} catch (GeneralSecurityException e) {
throw new IOException("Can't load keystore " + path, e);
throw new IOException("Can't load keystore " + path + " : " + e , e);
}
}
@ -190,9 +191,7 @@ private FsPermission tryLoadFromPath(Path path, Path backupPath)
try {
perm = loadFromPath(path, password);
// Remove _OLD if exists
if (fs.exists(backupPath)) {
fs.delete(backupPath, true);
}
fs.delete(backupPath, true);
LOG.debug("KeyStore loaded successfully !!");
} catch (IOException ioe) {
// If file is corrupted for some reason other than
@ -260,9 +259,7 @@ private FsPermission loadAndReturnPerm(Path pathToLoad, Path pathToDelete)
LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
pathToLoad));
}
if (fs.exists(pathToDelete)) {
fs.delete(pathToDelete, true);
}
fs.delete(pathToDelete, true);
} catch (IOException e) {
// Check for password issue : don't want to trash file due
// to wrong password
@ -539,13 +536,15 @@ public void flush() throws IOException {
return;
}
// Might exist if a backup has been restored etc.
if (fs.exists(newPath)) {
try {
renameOrFail(newPath, new Path(newPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
} catch (FileNotFoundException ignored) {
}
if (fs.exists(oldPath)) {
try {
renameOrFail(oldPath, new Path(oldPath.toString()
+ "_ORPHANED_" + System.currentTimeMillis()));
} catch (FileNotFoundException ignored) {
}
// put all of the updates into the keystore
for(Map.Entry<String, Metadata> entry: cache.entrySet()) {
@ -601,9 +600,7 @@ private void cleanupNewAndOld(Path newPath, Path oldPath) throws IOException {
// Rename _NEW to CURRENT
renameOrFail(newPath, path);
// Delete _OLD
if (fs.exists(oldPath)) {
fs.delete(oldPath, true);
}
fs.delete(oldPath, true);
}
protected void writeToNew(Path newPath) throws IOException {
@ -623,12 +620,12 @@ protected void writeToNew(Path newPath) throws IOException {
protected boolean backupToOld(Path oldPath)
throws IOException {
boolean fileExisted = false;
if (fs.exists(path)) {
try {
renameOrFail(path, oldPath);
fileExisted = true;
return true;
} catch (FileNotFoundException e) {
return false;
}
return fileExisted;
}
private void revertFromOld(Path oldPath, boolean fileExisted)

View File

@ -33,6 +33,8 @@
import com.google.gson.stream.JsonReader;
import com.google.gson.stream.JsonWriter;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -86,6 +88,7 @@ public byte[] getMaterial() {
return material;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("key(");
@ -105,6 +108,31 @@ public String toString() {
}
return buf.toString();
}
@Override
public boolean equals(Object rhs) {
if (this == rhs) {
return true;
}
if (rhs == null || getClass() != rhs.getClass()) {
return false;
}
final KeyVersion kv = (KeyVersion) rhs;
return new EqualsBuilder().
append(name, kv.name).
append(versionName, kv.versionName).
append(material, kv.material).
isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder().
append(name).
append(versionName).
append(material).
toHashCode();
}
}
/**
@ -565,6 +593,18 @@ public KeyVersion rollNewVersion(String name) throws NoSuchAlgorithmException,
return rollNewVersion(name, material);
}
/**
* Can be used by implementing classes to invalidate the caches. This could be
* used after rollNewVersion to provide a strong guarantee to return the new
* version of the given key.
*
* @param name the basename of the key
* @throws IOException
*/
public void invalidateCache(String name) throws IOException {
// NOP
}
/**
* Ensures that any changes to the keys are written to persistent store.
* @throws IOException

View File

@ -188,8 +188,8 @@ public void warmUpEncryptedKeys(String... keyNames)
public void drain(String keyName);
/**
* Generates a key material and encrypts it using the given key version name
* and initialization vector. The generated key material is of the same
* Generates a key material and encrypts it using the given key name.
* The generated key material is of the same
* length as the <code>KeyVersion</code> material of the latest key version
* of the key and is encrypted using the same cipher.
* <p/>
@ -210,7 +210,7 @@ public EncryptedKeyVersion generateEncryptedKey(
GeneralSecurityException;
/**
* Decrypts an encrypted byte[] key material using the given a key version
* Decrypts an encrypted byte[] key material using the given key version
* name and initialization vector.
*
* @param encryptedKeyVersion
@ -227,6 +227,26 @@ public EncryptedKeyVersion generateEncryptedKey(
public KeyVersion decryptEncryptedKey(
EncryptedKeyVersion encryptedKeyVersion) throws IOException,
GeneralSecurityException;
/**
* Re-encrypts an encrypted key version, using its initialization vector
* and key material, but with the latest key version name of its key name
* in the key provider.
* <p>
* If the latest key version name in the provider is the
* same as the one encrypted the passed-in encrypted key version, the same
* encrypted key version is returned.
* <p>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param ekv The EncryptedKeyVersion containing keyVersionName and IV.
* @return The re-encrypted EncryptedKeyVersion.
* @throws IOException If the key material could not be re-encrypted.
* @throws GeneralSecurityException If the key material could not be
* re-encrypted because of a cryptographic issue.
*/
EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
throws IOException, GeneralSecurityException;
}
private static class DefaultCryptoExtension implements CryptoExtension {
@ -258,24 +278,55 @@ public EncryptedKeyVersion generateEncryptedKey(String encryptionKeyName)
cc.generateSecureRandom(newKey);
final byte[] iv = new byte[cc.getCipherSuite().getAlgorithmBlockSize()];
cc.generateSecureRandom(iv);
Encryptor encryptor = cc.createEncryptor();
return generateEncryptedKey(encryptor, encryptionKey, newKey, iv);
}
private EncryptedKeyVersion generateEncryptedKey(final Encryptor encryptor,
final KeyVersion encryptionKey, final byte[] key, final byte[] iv)
throws IOException, GeneralSecurityException {
// Encryption key IV is derived from new key's IV
final byte[] encryptionIV = EncryptedKeyVersion.deriveIV(iv);
Encryptor encryptor = cc.createEncryptor();
encryptor.init(encryptionKey.getMaterial(), encryptionIV);
int keyLen = newKey.length;
final int keyLen = key.length;
ByteBuffer bbIn = ByteBuffer.allocateDirect(keyLen);
ByteBuffer bbOut = ByteBuffer.allocateDirect(keyLen);
bbIn.put(newKey);
bbIn.put(key);
bbIn.flip();
encryptor.encrypt(bbIn, bbOut);
bbOut.flip();
byte[] encryptedKey = new byte[keyLen];
bbOut.get(encryptedKey);
return new EncryptedKeyVersion(encryptionKeyName,
bbOut.get(encryptedKey);
return new EncryptedKeyVersion(encryptionKey.getName(),
encryptionKey.getVersionName(), iv,
new KeyVersion(encryptionKey.getName(), EEK, encryptedKey));
}
@Override
public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
throws IOException, GeneralSecurityException {
final String ekName = ekv.getEncryptionKeyName();
final KeyVersion ekNow = keyProvider.getCurrentKey(ekName);
Preconditions
.checkNotNull(ekNow, "KeyVersion name '%s' does not exist", ekName);
Preconditions.checkArgument(ekv.getEncryptedKeyVersion().getVersionName()
.equals(KeyProviderCryptoExtension.EEK),
"encryptedKey version name must be '%s', is '%s'",
KeyProviderCryptoExtension.EEK,
ekv.getEncryptedKeyVersion().getVersionName());
if (ekv.getEncryptedKeyVersion().equals(ekNow)) {
// no-op if same key version
return ekv;
}
final KeyVersion dek = decryptEncryptedKey(ekv);
final CryptoCodec cc = CryptoCodec.getInstance(keyProvider.getConf());
final Encryptor encryptor = cc.createEncryptor();
return generateEncryptedKey(encryptor, ekNow, dek.getMaterial(),
ekv.getEncryptedKeyIv());
}
@Override
public KeyVersion decryptEncryptedKey(
EncryptedKeyVersion encryptedKeyVersion) throws IOException,
@ -388,6 +439,28 @@ public KeyVersion decryptEncryptedKey(EncryptedKeyVersion encryptedKey)
return getExtension().decryptEncryptedKey(encryptedKey);
}
/**
* Re-encrypts an encrypted key version, using its initialization vector
* and key material, but with the latest key version name of its key name
* in the key provider.
* <p>
* If the latest key version name in the provider is the
* same as the one encrypted the passed-in encrypted key version, the same
* encrypted key version is returned.
* <p>
* NOTE: The generated key is not stored by the <code>KeyProvider</code>
*
* @param ekv The EncryptedKeyVersion containing keyVersionName and IV.
* @return The re-encrypted EncryptedKeyVersion.
* @throws IOException If the key material could not be re-encrypted
* @throws GeneralSecurityException If the key material could not be
* re-encrypted because of a cryptographic issue.
*/
public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
throws IOException, GeneralSecurityException {
return getExtension().reencryptEncryptedKey(ekv);
}
/**
* Creates a <code>KeyProviderCryptoExtension</code> using a given
* {@link KeyProvider}.
@ -427,8 +500,9 @@ public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
@Override
public void close() throws IOException {
if (getKeyProvider() != null) {
getKeyProvider().close();
KeyProvider provider = getKeyProvider();
if (provider != null && provider != this) {
provider.close();
}
}

View File

@ -117,6 +117,11 @@ public KeyVersion rollNewVersion(String name, byte[] material)
return keyProvider.rollNewVersion(name, material);
}
@Override
public void invalidateCache(String name) throws IOException {
keyProvider.invalidateCache(name);
}
@Override
public void flush() throws IOException {
keyProvider.flush();

View File

@ -46,7 +46,8 @@ public class KeyShell extends CommandShell {
" [" + CreateCommand.USAGE + "]\n" +
" [" + RollCommand.USAGE + "]\n" +
" [" + DeleteCommand.USAGE + "]\n" +
" [" + ListCommand.USAGE + "]\n";
" [" + ListCommand.USAGE + "]\n" +
" [" + InvalidateCacheCommand.USAGE + "]\n";
private static final String LIST_METADATA = "keyShell.list.metadata";
@VisibleForTesting
public static final String NO_VALID_PROVIDERS =
@ -70,6 +71,7 @@ public class KeyShell extends CommandShell {
* % hadoop key roll keyName [-provider providerPath]
* % hadoop key list [-provider providerPath]
* % hadoop key delete keyName [-provider providerPath] [-i]
* % hadoop key invalidateCache keyName [-provider providerPath]
* </pre>
* @param args Command line arguments.
* @return 0 on success, 1 on failure.
@ -111,6 +113,15 @@ protected int init(String[] args) throws IOException {
}
} else if ("list".equals(args[i])) {
setSubCommand(new ListCommand());
} else if ("invalidateCache".equals(args[i])) {
String keyName = "-help";
if (moreTokens) {
keyName = args[++i];
}
setSubCommand(new InvalidateCacheCommand(keyName));
if ("-help".equals(keyName)) {
return 1;
}
} else if ("-size".equals(args[i]) && moreTokens) {
options.setBitLength(Integer.parseInt(args[++i]));
} else if ("-cipher".equals(args[i]) && moreTokens) {
@ -168,6 +179,9 @@ public String getCommandUsage() {
sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
sbuf.append(banner + "\n");
sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
sbuf.append(banner + "\n");
sbuf.append(InvalidateCacheCommand.USAGE + ":\n\n"
+ InvalidateCacheCommand.DESC + "\n");
return sbuf.toString();
}
@ -466,6 +480,57 @@ public String getUsage() {
}
}
private class InvalidateCacheCommand extends Command {
public static final String USAGE =
"invalidateCache <keyname> [-provider <provider>] [-help]";
public static final String DESC =
"The invalidateCache subcommand invalidates the cached key versions\n"
+ "of the specified key, on the provider indicated using the"
+ " -provider argument.\n";
private String keyName = null;
InvalidateCacheCommand(String keyName) {
this.keyName = keyName;
}
public boolean validate() {
boolean rc = true;
provider = getKeyProvider();
if (provider == null) {
getOut().println("Invalid provider.");
rc = false;
}
if (keyName == null) {
getOut().println("Please provide a <keyname>.\n" +
"See the usage description by using -help.");
rc = false;
}
return rc;
}
public void execute() throws NoSuchAlgorithmException, IOException {
try {
warnIfTransientProvider();
getOut().println("Invalidating cache on KeyProvider: "
+ provider + "\n for key name: " + keyName);
provider.invalidateCache(keyName);
getOut().println("Cached keyversions of " + keyName
+ " has been successfully invalidated.");
printProviderWritten();
} catch (IOException e) {
getOut().println("Cannot invalidate cache for key: " + keyName +
" within KeyProvider: " + provider + ". " + e.toString());
throw e;
}
}
@Override
public String getUsage() {
return USAGE + ":\n\n" + DESC;
}
}
/**
* main() entry point for the KeyShell. While strictly speaking the
* return is void, it will System.exit() with a return code: 0 is for

View File

@ -44,7 +44,6 @@
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.hadoop.util.KMSUtil;
import org.apache.http.client.utils.URIBuilder;
import org.codehaus.jackson.map.ObjectMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -80,6 +79,7 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
@ -146,7 +146,7 @@ public void fillQueueForKey(String keyName,
List response = call(conn, null,
HttpURLConnection.HTTP_OK, List.class);
List<EncryptedKeyVersion> ekvs =
parseJSONEncKeyVersion(keyName, response);
parseJSONEncKeyVersions(keyName, response);
keyQueue.addAll(ekvs);
}
}
@ -173,14 +173,20 @@ public long renew(Token<?> token, Configuration conf) throws IOException {
LOG.debug("Renewing delegation token {}", token);
KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
KeyProviderFactory.KEY_PROVIDER_PATH);
if (!(keyProvider instanceof
KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
LOG.warn("keyProvider {} cannot renew dt.", keyProvider == null ?
"null" : keyProvider.getClass());
return 0;
try {
if (!(keyProvider instanceof
KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
LOG.warn("keyProvider {} cannot renew dt.", keyProvider == null ?
"null" : keyProvider.getClass());
return 0;
}
return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
keyProvider).renewDelegationToken(token);
} finally {
if (keyProvider != null) {
keyProvider.close();
}
}
return ((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
keyProvider).renewDelegationToken(token);
}
@Override
@ -188,14 +194,20 @@ public void cancel(Token<?> token, Configuration conf) throws IOException {
LOG.debug("Canceling delegation token {}", token);
KeyProvider keyProvider = KMSUtil.createKeyProvider(conf,
KeyProviderFactory.KEY_PROVIDER_PATH);
if (!(keyProvider instanceof
KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
LOG.warn("keyProvider {} cannot cancel dt.", keyProvider == null ?
"null" : keyProvider.getClass());
return;
try {
if (!(keyProvider instanceof
KeyProviderDelegationTokenExtension.DelegationTokenExtension)) {
LOG.warn("keyProvider {} cannot cancel dt.", keyProvider == null ?
"null" : keyProvider.getClass());
return;
}
((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
keyProvider).cancelDelegationToken(token);
} finally {
if (keyProvider != null) {
keyProvider.close();
}
}
((KeyProviderDelegationTokenExtension.DelegationTokenExtension)
keyProvider).cancelDelegationToken(token);
}
}
@ -209,39 +221,43 @@ public KMSEncryptedKeyVersion(String keyName, String keyVersionName,
@SuppressWarnings("rawtypes")
private static List<EncryptedKeyVersion>
parseJSONEncKeyVersion(String keyName, List valueList) {
parseJSONEncKeyVersions(String keyName, List valueList) {
List<EncryptedKeyVersion> ekvs = new LinkedList<EncryptedKeyVersion>();
if (!valueList.isEmpty()) {
for (Object values : valueList) {
Map valueMap = (Map) values;
String versionName = checkNotNull(
(String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] iv = Base64.decodeBase64(checkNotNull(
(String) valueMap.get(KMSRESTConstants.IV_FIELD),
KMSRESTConstants.IV_FIELD));
Map encValueMap = checkNotNull((Map)
valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
String encVersionName = checkNotNull((String)
encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
KMSRESTConstants.MATERIAL_FIELD));
ekvs.add(new KMSEncryptedKeyVersion(keyName, versionName, iv,
encVersionName, encKeyMaterial));
ekvs.add(parseJSONEncKeyVersion(keyName, valueMap));
}
}
return ekvs;
}
private static EncryptedKeyVersion parseJSONEncKeyVersion(String keyName,
Map valueMap) {
String versionName = checkNotNull(
(String) valueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] iv = Base64.decodeBase64(checkNotNull(
(String) valueMap.get(KMSRESTConstants.IV_FIELD),
KMSRESTConstants.IV_FIELD));
Map encValueMap = checkNotNull((Map)
valueMap.get(KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD),
KMSRESTConstants.ENCRYPTED_KEY_VERSION_FIELD);
String encVersionName = checkNotNull((String)
encValueMap.get(KMSRESTConstants.VERSION_NAME_FIELD),
KMSRESTConstants.VERSION_NAME_FIELD);
byte[] encKeyMaterial = Base64.decodeBase64(checkNotNull((String)
encValueMap.get(KMSRESTConstants.MATERIAL_FIELD),
KMSRESTConstants.MATERIAL_FIELD));
return new KMSEncryptedKeyVersion(keyName, versionName, iv,
encVersionName, encKeyMaterial);
}
private static KeyVersion parseJSONKeyVersion(Map valueMap) {
KeyVersion keyVersion = null;
if (!valueMap.isEmpty()) {
@ -741,6 +757,17 @@ public KeyVersion createKey(String name, byte[] material, Options options)
}
}
@Override
public void invalidateCache(String name) throws IOException {
checkNotEmpty(name, "name");
final URL url = createURL(KMSRESTConstants.KEY_RESOURCE, name,
KMSRESTConstants.INVALIDATECACHE_RESOURCE, null);
final HttpURLConnection conn = createConnection(url, HTTP_POST);
// invalidate the server cache first, then drain local cache.
call(conn, null, HttpURLConnection.HTTP_OK, null);
drain(name);
}
private KeyVersion rollNewVersionInternal(String name, byte[] material)
throws NoSuchAlgorithmException, IOException {
checkNotEmpty(name, "name");
@ -755,7 +782,7 @@ private KeyVersion rollNewVersionInternal(String name, byte[] material)
Map response = call(conn, jsonMaterial,
HttpURLConnection.HTTP_OK, Map.class);
KeyVersion keyVersion = parseJSONKeyVersion(response);
encKeyVersionQueue.drain(name);
invalidateCache(name);
return keyVersion;
}
@ -825,6 +852,35 @@ public KeyVersion decryptEncryptedKey(
return parseJSONKeyVersion(response);
}
@Override
public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
throws IOException, GeneralSecurityException {
checkNotNull(ekv.getEncryptionKeyVersionName(), "versionName");
checkNotNull(ekv.getEncryptedKeyIv(), "iv");
checkNotNull(ekv.getEncryptedKeyVersion(), "encryptedKey");
Preconditions.checkArgument(ekv.getEncryptedKeyVersion().getVersionName()
.equals(KeyProviderCryptoExtension.EEK),
"encryptedKey version name must be '%s', is '%s'",
KeyProviderCryptoExtension.EEK,
ekv.getEncryptedKeyVersion().getVersionName());
final Map<String, String> params = new HashMap<>();
params.put(KMSRESTConstants.EEK_OP, KMSRESTConstants.EEK_REENCRYPT);
final Map<String, Object> jsonPayload = new HashMap<>();
jsonPayload.put(KMSRESTConstants.NAME_FIELD, ekv.getEncryptionKeyName());
jsonPayload.put(KMSRESTConstants.IV_FIELD,
Base64.encodeBase64String(ekv.getEncryptedKeyIv()));
jsonPayload.put(KMSRESTConstants.MATERIAL_FIELD,
Base64.encodeBase64String(ekv.getEncryptedKeyVersion().getMaterial()));
final URL url = createURL(KMSRESTConstants.KEY_VERSION_RESOURCE,
ekv.getEncryptionKeyVersionName(), KMSRESTConstants.EEK_SUB_RESOURCE,
params);
final HttpURLConnection conn = createConnection(url, HTTP_POST);
conn.setRequestProperty(CONTENT_TYPE, APPLICATION_JSON_MIME);
final Map response =
call(conn, jsonPayload, HttpURLConnection.HTTP_OK, Map.class);
return parseJSONEncKeyVersion(ekv.getEncryptionKeyName(), response);
}
@Override
public List<KeyVersion> getKeyVersions(String name) throws IOException {
checkNotEmpty(name, "name");
@ -1026,10 +1082,9 @@ private Text getDelegationTokenService() throws IOException {
return dtService;
}
private boolean currentUgiContainsKmsDt() throws IOException {
// Add existing credentials from current UGI, since provider is cached.
Credentials creds = UserGroupInformation.getCurrentUser().
getCredentials();
private boolean containsKmsDt(UserGroupInformation ugi) throws IOException {
// Add existing credentials from the UGI, since provider is cached.
Credentials creds = ugi.getCredentials();
if (!creds.getAllTokens().isEmpty()) {
org.apache.hadoop.security.token.Token<? extends TokenIdentifier>
dToken = creds.getToken(getDelegationTokenService());
@ -1051,11 +1106,16 @@ private UserGroupInformation getActualUgi() throws IOException {
if (currentUgi.getRealUser() != null) {
// Use real user for proxy user
actualUgi = currentUgi.getRealUser();
} else if (!currentUgiContainsKmsDt() &&
!currentUgi.hasKerberosCredentials()) {
// Use login user for user that does not have either
}
if (UserGroupInformation.isSecurityEnabled() &&
!containsKmsDt(actualUgi) &&
!actualUgi.hasKerberosCredentials()) {
// Use login user is only necessary when Kerberos is enabled
// but the actual user does not have either
// Kerberos credential or KMS delegation token for KMS operations
actualUgi = currentUgi.getLoginUser();
LOG.debug("Using loginUser when Kerberos is enabled but the actual user" +
" does not have either KMS Delegation Token or Kerberos Credentials");
actualUgi = UserGroupInformation.getLoginUser();
}
return actualUgi;
}
@ -1072,6 +1132,7 @@ public void close() throws IOException {
} finally {
if (sslFactory != null) {
sslFactory.destroy();
sslFactory = null;
}
}
}

View File

@ -36,12 +36,14 @@ public class KMSRESTConstants {
public static final String VERSIONS_SUB_RESOURCE = "_versions";
public static final String EEK_SUB_RESOURCE = "_eek";
public static final String CURRENT_VERSION_SUB_RESOURCE = "_currentversion";
public static final String INVALIDATECACHE_RESOURCE = "_invalidatecache";
public static final String KEY = "key";
public static final String EEK_OP = "eek_op";
public static final String EEK_GENERATE = "generate";
public static final String EEK_DECRYPT = "decrypt";
public static final String EEK_NUM_KEYS = "num_keys";
public static final String EEK_REENCRYPT = "reencrypt";
public static final String IV_FIELD = "iv";
public static final String NAME_FIELD = "name";

View File

@ -178,6 +178,14 @@ public void drain(String keyName) {
}
}
// This request is sent to all providers in the load-balancing group
@Override
public void invalidateCache(String keyName) throws IOException {
for (KMSClientProvider provider : providers) {
provider.invalidateCache(keyName);
}
}
@Override
public EncryptedKeyVersion
generateEncryptedKey(final String encryptionKeyName)
@ -218,6 +226,24 @@ public KeyVersion call(KMSClientProvider provider)
}
}
public EncryptedKeyVersion reencryptEncryptedKey(EncryptedKeyVersion ekv)
throws IOException, GeneralSecurityException {
try {
return doOp(new ProviderCallable<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion call(KMSClientProvider provider)
throws IOException, GeneralSecurityException {
return provider.reencryptEncryptedKey(ekv);
}
}, nextIdx());
} catch (WrapperException we) {
if (we.getCause() instanceof GeneralSecurityException) {
throw (GeneralSecurityException) we.getCause();
}
throw new IOException(we.getCause());
}
}
@Override
public KeyVersion getKeyVersion(final String versionName) throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
@ -307,6 +333,7 @@ public KeyVersion call(KMSClientProvider provider) throws IOException,
throw new IOException(e.getCause());
}
}
@Override
public void deleteKey(final String name) throws IOException {
doOp(new ProviderCallable<Void>() {
@ -317,28 +344,33 @@ public Void call(KMSClientProvider provider) throws IOException {
}
}, nextIdx());
}
@Override
public KeyVersion rollNewVersion(final String name, final byte[] material)
throws IOException {
return doOp(new ProviderCallable<KeyVersion>() {
final KeyVersion newVersion = doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException {
return provider.rollNewVersion(name, material);
}
}, nextIdx());
invalidateCache(name);
return newVersion;
}
@Override
public KeyVersion rollNewVersion(final String name)
throws NoSuchAlgorithmException, IOException {
try {
return doOp(new ProviderCallable<KeyVersion>() {
final KeyVersion newVersion = doOp(new ProviderCallable<KeyVersion>() {
@Override
public KeyVersion call(KMSClientProvider provider) throws IOException,
NoSuchAlgorithmException {
NoSuchAlgorithmException {
return provider.rollNewVersion(name);
}
}, nextIdx());
invalidateCache(name);
return newVersion;
} catch (WrapperException e) {
if (e.getCause() instanceof GeneralSecurityException) {
throw (NoSuchAlgorithmException) e.getCause();

View File

@ -18,8 +18,9 @@
package org.apache.hadoop.crypto.key.kms;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@ -28,6 +29,9 @@
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import com.google.common.base.Preconditions;
import com.google.common.cache.CacheBuilder;
@ -67,8 +71,17 @@ public void fillQueueForKey(String keyName,
private static final String REFILL_THREAD =
ValueQueue.class.getName() + "_thread";
private static final int LOCK_ARRAY_SIZE = 16;
// Using a mask assuming array size is the power of 2, of MAX_VALUE.
private static final int MASK = LOCK_ARRAY_SIZE == Integer.MAX_VALUE ?
LOCK_ARRAY_SIZE :
LOCK_ARRAY_SIZE - 1;
private final LoadingCache<String, LinkedBlockingQueue<E>> keyQueues;
// Stripped rwlocks based on key name to synchronize the queue from
// the sync'ed rw-thread and the background async refill thread.
private final List<ReadWriteLock> lockArray =
new ArrayList<>(LOCK_ARRAY_SIZE);
private final ThreadPoolExecutor executor;
private final UniqueKeyBlockingQueue queue = new UniqueKeyBlockingQueue();
private final QueueRefiller<E> refiller;
@ -84,9 +97,47 @@ public void fillQueueForKey(String keyName,
*/
private abstract static class NamedRunnable implements Runnable {
final String name;
private AtomicBoolean canceled = new AtomicBoolean(false);
private NamedRunnable(String keyName) {
this.name = keyName;
}
public void cancel() {
canceled.set(true);
}
public boolean isCanceled() {
return canceled.get();
}
}
private void readLock(String keyName) {
getLock(keyName).readLock().lock();
}
private void readUnlock(String keyName) {
getLock(keyName).readLock().unlock();
}
private void writeUnlock(String keyName) {
getLock(keyName).writeLock().unlock();
}
private void writeLock(String keyName) {
getLock(keyName).writeLock().lock();
}
/**
* Get the stripped lock given a key name.
*
* @param keyName The key name.
*/
private ReadWriteLock getLock(String keyName) {
return lockArray.get(indexFor(keyName));
}
private static int indexFor(String keyName) {
return keyName.hashCode() & MASK;
}
/**
@ -103,11 +154,12 @@ private static class UniqueKeyBlockingQueue extends
LinkedBlockingQueue<Runnable> {
private static final long serialVersionUID = -2152747693695890371L;
private HashSet<String> keysInProgress = new HashSet<String>();
private HashMap<String, Runnable> keysInProgress = new HashMap<>();
@Override
public synchronized void put(Runnable e) throws InterruptedException {
if (keysInProgress.add(((NamedRunnable)e).name)) {
if (!keysInProgress.containsKey(((NamedRunnable)e).name)) {
keysInProgress.put(((NamedRunnable)e).name, e);
super.put(e);
}
}
@ -131,6 +183,14 @@ public Runnable poll(long timeout, TimeUnit unit)
return k;
}
public Runnable deleteByName(String name) {
NamedRunnable e = (NamedRunnable) keysInProgress.remove(name);
if (e != null) {
e.cancel();
super.remove(e);
}
return e;
}
}
/**
@ -172,6 +232,9 @@ public ValueQueue(final int numValues, final float lowWatermark,
this.policy = policy;
this.numValues = numValues;
this.lowWatermark = lowWatermark;
for (int i = 0; i < LOCK_ARRAY_SIZE; ++i) {
lockArray.add(i, new ReentrantReadWriteLock());
}
keyQueues = CacheBuilder.newBuilder()
.expireAfterAccess(expiry, TimeUnit.MILLISECONDS)
.build(new CacheLoader<String, LinkedBlockingQueue<E>>() {
@ -233,9 +296,18 @@ public E getNext(String keyName)
*
* @param keyName the key to drain the Queue for
*/
public void drain(String keyName ) {
public void drain(String keyName) {
try {
keyQueues.get(keyName).clear();
Runnable e;
while ((e = queue.deleteByName(keyName)) != null) {
executor.remove(e);
}
writeLock(keyName);
try {
keyQueues.get(keyName).clear();
} finally {
writeUnlock(keyName);
}
} catch (ExecutionException ex) {
//NOP
}
@ -247,14 +319,19 @@ public void drain(String keyName ) {
* @return int queue size
*/
public int getSize(String keyName) {
// We can't do keyQueues.get(keyName).size() here,
// since that will have the side effect of populating the cache.
Map<String, LinkedBlockingQueue<E>> map =
keyQueues.getAllPresent(Arrays.asList(keyName));
if (map.get(keyName) == null) {
return 0;
readLock(keyName);
try {
// We can't do keyQueues.get(keyName).size() here,
// since that will have the side effect of populating the cache.
Map<String, LinkedBlockingQueue<E>> map =
keyQueues.getAllPresent(Arrays.asList(keyName));
if (map.get(keyName) == null) {
return 0;
}
return map.get(keyName).size();
} finally {
readUnlock(keyName);
}
return map.get(keyName).size();
}
/**
@ -276,7 +353,9 @@ public List<E> getAtMost(String keyName, int num) throws IOException,
LinkedList<E> ekvs = new LinkedList<E>();
try {
for (int i = 0; i < num; i++) {
readLock(keyName);
E val = keyQueue.poll();
readUnlock(keyName);
// If queue is empty now, Based on the provided SyncGenerationPolicy,
// figure out how many new values need to be generated synchronously
if (val == null) {
@ -336,9 +415,17 @@ public void run() {
int threshold = (int) (lowWatermark * (float) cacheSize);
// Need to ensure that only one refill task per key is executed
try {
if (keyQueue.size() < threshold) {
refiller.fillQueueForKey(name, keyQueue,
cacheSize - keyQueue.size());
writeLock(keyName);
try {
if (keyQueue.size() < threshold && !isCanceled()) {
refiller.fillQueueForKey(name, keyQueue,
cacheSize - keyQueue.size());
}
if (isCanceled()) {
keyQueue.clear();
}
} finally {
writeUnlock(keyName);
}
} catch (final Exception e) {
throw new RuntimeException(e);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.fs;
import java.io.IOException;
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -29,7 +30,9 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class BlockLocation {
public class BlockLocation implements Serializable {
private static final long serialVersionUID = 0x22986f6d;
private String[] hosts; // Datanode hostnames
private String[] cachedHosts; // Datanode hostnames with a cached replica
private String[] names; // Datanode IP:xferPort for accessing the block
@ -303,4 +306,4 @@ public String toString() {
}
return result.toString();
}
}
}

View File

@ -605,6 +605,7 @@ boolean apply(Path p) throws IOException {
* Rename files/dirs
*/
@Override
@SuppressWarnings("deprecation")
public boolean rename(Path src, Path dst) throws IOException {
if (fs.isDirectory(src)) {
return fs.rename(src, dst);
@ -721,6 +722,7 @@ public void copyToLocalFile(boolean delSrc, Path src, Path dst)
* If src and dst are directories, the copyCrc parameter
* determines whether to copy CRC files.
*/
@SuppressWarnings("deprecation")
public void copyToLocalFile(Path src, Path dst, boolean copyCrc)
throws IOException {
if (!fs.isDirectory(src)) { // source is a file

View File

@ -141,6 +141,22 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT =
256 * 1024;
/** ZStandard compression level. */
public static final String IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY =
"io.compression.codec.zstd.level";
/** Default value for IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY. */
public static final int IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT = 3;
/** ZStandard buffer size. */
public static final String IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY =
"io.compression.codec.zstd.buffersize";
/** ZStandard buffer size a value of 0 means use the recommended zstd
* buffer size that the library recommends. */
public static final int
IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT = 0;
/** Internal buffer size for Lz4 compressor/decompressors */
public static final String IO_COMPRESSION_CODEC_LZ4_BUFFERSIZE_KEY =
"io.compression.codec.lz4.buffersize";
@ -337,6 +353,17 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
public static final String HADOOP_USER_GROUP_METRICS_PERCENTILES_INTERVALS =
"hadoop.user.group.metrics.percentiles.intervals";
/* When creating UGI with UserGroupInformation(Subject), treat the passed
* subject external if set to true, and assume the owner of the subject
* should do the credential renewal.
*
* This is a temporary config to solve the compatibility issue with
* HADOOP-13558 and HADOOP-13805 fix, see the jiras for discussions.
*/
public static final String HADOOP_TREAT_SUBJECT_EXTERNAL_KEY =
"hadoop.treat.subject.external";
public static final boolean HADOOP_TREAT_SUBJECT_EXTERNAL_DEFAULT = false;
public static final String RPC_METRICS_QUANTILE_ENABLE =
"rpc.metrics.quantile.enable";
public static final boolean RPC_METRICS_QUANTILE_ENABLE_DEFAULT = false;

View File

@ -250,18 +250,43 @@ public class CommonConfigurationKeysPublic {
* @deprecated Moved to mapreduce, see mapreduce.task.io.sort.mb
* in mapred-default.xml
* See https://issues.apache.org/jira/browse/HADOOP-6801
*
* For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
* instead, see {@link #SEQ_IO_SORT_MB_KEY}.
*/
public static final String IO_SORT_MB_KEY = "io.sort.mb";
/** Default value for IO_SORT_MB_DEFAULT */
/** Default value for {@link #IO_SORT_MB_KEY}. */
public static final int IO_SORT_MB_DEFAULT = 100;
/**
* @deprecated Moved to mapreduce, see mapreduce.task.io.sort.factor
* in mapred-default.xml
* See https://issues.apache.org/jira/browse/HADOOP-6801
*
* For {@link org.apache.hadoop.io.SequenceFile.Sorter} control
* instead, see {@link #SEQ_IO_SORT_FACTOR_KEY}.
*/
public static final String IO_SORT_FACTOR_KEY = "io.sort.factor";
/** Default value for IO_SORT_FACTOR_DEFAULT */
/** Default value for {@link #IO_SORT_FACTOR_KEY}. */
public static final int IO_SORT_FACTOR_DEFAULT = 100;
/**
* @see
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
public static final String SEQ_IO_SORT_MB_KEY = "seq.io.sort.mb";
/** Default value for {@link #SEQ_IO_SORT_MB_KEY}. */
public static final int SEQ_IO_SORT_MB_DEFAULT = 100;
/**
* @see
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
public static final String SEQ_IO_SORT_FACTOR_KEY = "seq.io.sort.factor";
/** Default value for {@link #SEQ_IO_SORT_FACTOR_KEY}. */
public static final int SEQ_IO_SORT_FACTOR_DEFAULT = 100;
/**
* @see
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
@ -517,6 +542,21 @@ public class CommonConfigurationKeysPublic {
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
"hadoop.security.groups.shell.command.timeout";
/**
* @see
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
public static final long
HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
0L;
/**
* @see
* <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
* core-default.xml</a>
*/
public static final String HADOOP_SECURITY_AUTHENTICATION =
"hadoop.security.authentication";
/**

View File

@ -160,7 +160,7 @@ public Path getHomeDirectory() {
@Override
public int getUriDefaultPort() {
return DELEGATE_TO_FS_DEFAULT_PORT;
return getDefaultPortIfDefined(fsImpl);
}
@Override

View File

@ -48,4 +48,7 @@ public class FSExceptionMessages {
= "Requested more bytes than destination buffer size";
public static final String PERMISSION_DENIED = "Permission denied";
public static final String PERMISSION_DENIED_BY_STICKY_BIT =
"Permission denied by sticky bit";
}

View File

@ -20,6 +20,9 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.ObjectInputValidation;
import java.io.Serializable;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -31,11 +34,14 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FileStatus implements Writable, Comparable<FileStatus> {
public class FileStatus implements Writable, Comparable<FileStatus>,
Serializable, ObjectInputValidation {
private static final long serialVersionUID = 0x13caeae8;
private Path path;
private long length;
private boolean isdir;
private Boolean isdir;
private short block_replication;
private long blocksize;
private long modification_time;
@ -387,4 +393,15 @@ public String toString() {
sb.append("}");
return sb.toString();
}
@Override
public void validateObject() throws InvalidObjectException {
if (null == path) {
throw new InvalidObjectException("No Path in deserialized FileStatus");
}
if (null == isdir) {
throw new InvalidObjectException("No type in deserialized FileStatus");
}
}
}

View File

@ -22,6 +22,7 @@
import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
@ -326,14 +327,15 @@ public static boolean copy(FileSystem srcFS, Path[] srcs,
return copy(srcFS, srcs[0], dstFS, dst, deleteSource, overwrite, conf);
// Check if dest is directory
if (!dstFS.exists(dst)) {
throw new IOException("`" + dst +"': specified destination directory " +
"does not exist");
} else {
try {
FileStatus sdst = dstFS.getFileStatus(dst);
if (!sdst.isDirectory())
throw new IOException("copying multiple files, but last argument `" +
dst + "' is not a directory");
} catch (FileNotFoundException e) {
throw new IOException(
"`" + dst + "': specified destination directory " +
"does not exist", e);
}
for (Path src : srcs) {
@ -481,8 +483,13 @@ private static boolean copy(FileSystem srcFS, FileStatus srcStatus,
private static Path checkDest(String srcName, FileSystem dstFS, Path dst,
boolean overwrite) throws IOException {
if (dstFS.exists(dst)) {
FileStatus sdst = dstFS.getFileStatus(dst);
FileStatus sdst;
try {
sdst = dstFS.getFileStatus(dst);
} catch (FileNotFoundException e) {
sdst = null;
}
if (null != sdst) {
if (sdst.isDirectory()) {
if (null == srcName) {
throw new IOException("Target " + dst + " is a directory");

View File

@ -34,6 +34,7 @@
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
@ -234,6 +235,12 @@ public boolean rename(Path src, Path dst) throws IOException {
return fs.rename(src, dst);
}
@Override
protected void rename(Path src, Path dst, Rename... options)
throws IOException {
fs.rename(src, dst, options);
}
@Override
public boolean truncate(Path f, final long newLength) throws IOException {
return fs.truncate(f, newLength);

View File

@ -328,7 +328,12 @@ public int run(String argv[]) throws Exception {
scope.close();
}
} catch (IllegalArgumentException e) {
displayError(cmd, e.getLocalizedMessage());
if (e.getMessage() == null) {
displayError(cmd, "Null exception message");
e.printStackTrace(System.err);
} else {
displayError(cmd, e.getLocalizedMessage());
}
printUsage(System.err);
if (instance != null) {
printInstanceUsage(System.err, instance);

View File

@ -153,7 +153,7 @@ public void set(String glob) {
if (curlyOpen > 0) {
error("Unclosed group", glob, len);
}
compiled = Pattern.compile(regex.toString());
compiled = Pattern.compile(regex.toString(), Pattern.DOTALL);
}
/**

View File

@ -525,7 +525,7 @@ public Path next() {
try {
advance();
} catch (IOException ie) {
throw new RuntimeException("Can't check existance of " + next, ie);
throw new RuntimeException("Can't check existence of " + next, ie);
}
if (result == null) {
throw new NoSuchElementException();

View File

@ -208,9 +208,7 @@ public FileDescriptor getFileDescriptor() throws IOException {
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException(f.toString());
}
getFileStatus(f);
return new FSDataInputStream(new BufferedFSInputStream(
new LocalFSFileInputStream(f), bufferSize));
}
@ -278,9 +276,6 @@ public void write(int b) throws IOException {
@Override
public FSDataOutputStream append(Path f, int bufferSize,
Progressable progress) throws IOException {
if (!exists(f)) {
throw new FileNotFoundException("File " + f + " not found");
}
FileStatus status = getFileStatus(f);
if (status.isDirectory()) {
throw new IOException("Cannot append to a diretory (=" + f + " )");
@ -387,17 +382,18 @@ public final boolean handleEmptyDstDirectoryOnWindows(Path src, File srcFile,
// platforms (notably Windows) do not provide this behavior, so the Java API
// call renameTo(dstFile) fails. Delete destination and attempt rename
// again.
if (this.exists(dst)) {
try {
FileStatus sdst = this.getFileStatus(dst);
if (sdst.isDirectory() && dstFile.list().length == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting empty destination and renaming " + src + " to " +
dst);
dst);
}
if (this.delete(dst, false) && srcFile.renameTo(dstFile)) {
return true;
}
}
} catch (FileNotFoundException ignored) {
}
return false;
}

View File

@ -121,9 +121,8 @@ public boolean moveToTrash(Path path) throws IOException {
if (!path.isAbsolute()) // make path absolute
path = new Path(fs.getWorkingDirectory(), path);
if (!fs.exists(path)) // check that path exists
throw new FileNotFoundException(path.toString());
// check that path exists
fs.getFileStatus(path);
String qpath = fs.makeQualified(path).toString();
Path trashRoot = fs.getTrashRoot(path);

View File

@ -23,6 +23,7 @@
import java.net.ConnectException;
import java.net.URI;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -65,6 +66,9 @@ public class FTPFileSystem extends FileSystem {
public static final String FS_FTP_HOST = "fs.ftp.host";
public static final String FS_FTP_HOST_PORT = "fs.ftp.host.port";
public static final String FS_FTP_PASSWORD_PREFIX = "fs.ftp.password.";
public static final String FS_FTP_DATA_CONNECTION_MODE =
"fs.ftp.data.connection.mode";
public static final String FS_FTP_TRANSFER_MODE = "fs.ftp.transfer.mode";
public static final String E_SAME_DIRECTORY_ONLY =
"only same directory renames are supported";
@ -143,9 +147,10 @@ private FTPClient connect() throws IOException {
NetUtils.UNKNOWN_HOST, 0,
new ConnectException("Server response " + reply));
} else if (client.login(user, password)) {
client.setFileTransferMode(FTP.BLOCK_TRANSFER_MODE);
client.setFileTransferMode(getTransferMode(conf));
client.setFileType(FTP.BINARY_FILE_TYPE);
client.setBufferSize(DEFAULT_BUFFER_SIZE);
setDataConnectionMode(client, conf);
} else {
throw new IOException("Login failed on server - " + host + ", port - "
+ port + " as user '" + user + "'");
@ -154,6 +159,69 @@ private FTPClient connect() throws IOException {
return client;
}
/**
* Set FTP's transfer mode based on configuration. Valid values are
* STREAM_TRANSFER_MODE, BLOCK_TRANSFER_MODE and COMPRESSED_TRANSFER_MODE.
* <p/>
* Defaults to BLOCK_TRANSFER_MODE.
*
* @param conf
* @return
*/
@VisibleForTesting
int getTransferMode(Configuration conf) {
final String mode = conf.get(FS_FTP_TRANSFER_MODE);
// FTP default is STREAM_TRANSFER_MODE, but Hadoop FTPFS's default is
// FTP.BLOCK_TRANSFER_MODE historically.
int ret = FTP.BLOCK_TRANSFER_MODE;
if (mode == null) {
return ret;
}
final String upper = mode.toUpperCase();
if (upper.equals("STREAM_TRANSFER_MODE")) {
ret = FTP.STREAM_TRANSFER_MODE;
} else if (upper.equals("COMPRESSED_TRANSFER_MODE")) {
ret = FTP.COMPRESSED_TRANSFER_MODE;
} else {
if (!upper.equals("BLOCK_TRANSFER_MODE")) {
LOG.warn("Cannot parse the value for " + FS_FTP_TRANSFER_MODE + ": "
+ mode + ". Using default.");
}
}
return ret;
}
/**
* Set the FTPClient's data connection mode based on configuration. Valid
* values are ACTIVE_LOCAL_DATA_CONNECTION_MODE,
* PASSIVE_LOCAL_DATA_CONNECTION_MODE and PASSIVE_REMOTE_DATA_CONNECTION_MODE.
* <p/>
* Defaults to ACTIVE_LOCAL_DATA_CONNECTION_MODE.
*
* @param client
* @param conf
* @throws IOException
*/
@VisibleForTesting
void setDataConnectionMode(FTPClient client, Configuration conf)
throws IOException {
final String mode = conf.get(FS_FTP_DATA_CONNECTION_MODE);
if (mode == null) {
return;
}
final String upper = mode.toUpperCase();
if (upper.equals("PASSIVE_LOCAL_DATA_CONNECTION_MODE")) {
client.enterLocalPassiveMode();
} else if (upper.equals("PASSIVE_REMOTE_DATA_CONNECTION_MODE")) {
client.enterRemotePassiveMode();
} else {
if (!upper.equals("ACTIVE_LOCAL_DATA_CONNECTION_MODE")) {
LOG.warn("Cannot parse the value for " + FS_FTP_DATA_CONNECTION_MODE
+ ": " + mode + ". Using default.");
}
}
}
/**
* Logout and disconnect the given FTPClient. *
*
@ -576,6 +644,7 @@ private boolean isParentOf(Path parent, Path child) {
* @return
* @throws IOException
*/
@SuppressWarnings("deprecation")
private boolean rename(FTPClient client, Path src, Path dst)
throws IOException {
Path workDir = new Path(client.printWorkingDirectory());

View File

@ -29,6 +29,7 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class FsCreateModes extends FsPermission {
private static final long serialVersionUID = 0x22986f6d;
private final FsPermission unmasked;
/**

View File

@ -20,6 +20,9 @@
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.io.InvalidObjectException;
import java.io.ObjectInputValidation;
import java.io.Serializable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -36,8 +39,10 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class FsPermission implements Writable {
public class FsPermission implements Writable, Serializable,
ObjectInputValidation {
private static final Log LOG = LogFactory.getLog(FsPermission.class);
private static final long serialVersionUID = 0x2fe08564;
static final WritableFactory FACTORY = new WritableFactory() {
@Override
@ -60,7 +65,7 @@ public static FsPermission createImmutable(short permission) {
private FsAction useraction = null;
private FsAction groupaction = null;
private FsAction otheraction = null;
private boolean stickyBit = false;
private Boolean stickyBit = false;
private FsPermission() {}
@ -202,7 +207,7 @@ public boolean equals(Object obj) {
return this.useraction == that.useraction
&& this.groupaction == that.groupaction
&& this.otheraction == that.otheraction
&& this.stickyBit == that.stickyBit;
&& this.stickyBit.booleanValue() == that.stickyBit.booleanValue();
}
return false;
}
@ -377,6 +382,7 @@ else if (unixSymbolicPermission.length() != MAX_PERMISSION_LENGTH) {
}
private static class ImmutableFsPermission extends FsPermission {
private static final long serialVersionUID = 0x1bab54bd;
public ImmutableFsPermission(short permission) {
super(permission);
}
@ -386,4 +392,14 @@ public void readFields(DataInput in) throws IOException {
throw new UnsupportedOperationException();
}
}
@Override
public void validateObject() throws InvalidObjectException {
if (null == useraction || null == groupaction || null == otheraction) {
throw new InvalidObjectException("Invalid mode in FsPermission");
}
if (null == stickyBit) {
throw new InvalidObjectException("No sticky bit in FsPermission");
}
}
}

View File

@ -101,7 +101,17 @@ protected int getDepth() {
* @throws IOException if any error occurs
*/
abstract protected void run(Path path) throws IOException;
/**
* Execute the command on the input path data. Commands can override to make
* use of the resolved filesystem.
* @param pathData The input path with resolved filesystem
* @throws IOException
*/
protected void run(PathData pathData) throws IOException {
run(pathData.path);
}
/**
* For each source path, execute the command
*
@ -113,7 +123,7 @@ public int runAll() {
try {
PathData[] srcs = PathData.expandAsGlob(src, getConf());
for (PathData s : srcs) {
run(s.path);
run(s);
}
} catch (IOException e) {
exitCode = -1;

View File

@ -248,7 +248,7 @@ public static class DuplicatedOptionException extends IllegalArgumentException {
private static final long serialVersionUID = 0L;
public DuplicatedOptionException(String duplicatedOption) {
super("option " + duplicatedOption + " already exsits!");
super("option " + duplicatedOption + " already exists!");
}
}
}

View File

@ -52,10 +52,6 @@
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.codehaus.jackson.JsonEncoding;
import org.codehaus.jackson.JsonFactory;
import org.codehaus.jackson.JsonGenerator;
import org.codehaus.jackson.util.MinimalPrettyPrinter;
/**
* Display contents or checksums of files
@ -277,12 +273,7 @@ public AvroFileInputStream(FileStatus status) throws IOException {
Schema schema = fileReader.getSchema();
writer = new GenericDatumWriter<Object>(schema);
output = new ByteArrayOutputStream();
JsonGenerator generator =
new JsonFactory().createJsonGenerator(output, JsonEncoding.UTF8);
MinimalPrettyPrinter prettyPrinter = new MinimalPrettyPrinter();
prettyPrinter.setRootValueSeparator(System.getProperty("line.separator"));
generator.setPrettyPrinter(prettyPrinter);
encoder = EncoderFactory.get().jsonEncoder(schema, generator);
encoder = EncoderFactory.get().jsonEncoder(schema, output);
}
/**

View File

@ -20,19 +20,24 @@
import java.io.IOException;
import java.io.PrintStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.viewfs.ViewFileSystem;
import org.apache.hadoop.fs.viewfs.ViewFileSystemUtil;
import org.apache.hadoop.util.StringUtils;
/** Base class for commands related to viewing filesystem usage, such as
* du and df
/**
* Base class for commands related to viewing filesystem usage,
* such as du and df.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
@ -44,15 +49,27 @@ public static void registerCommands(CommandFactory factory) {
factory.addClass(Dus.class, "-dus");
}
protected boolean humanReadable = false;
protected TableBuilder usagesTable;
private boolean humanReadable = false;
private TableBuilder usagesTable;
protected String formatSize(long size) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}
public TableBuilder getUsagesTable() {
return usagesTable;
}
public void setUsagesTable(TableBuilder usagesTable) {
this.usagesTable = usagesTable;
}
public void setHumanReadable(boolean humanReadable) {
this.humanReadable = humanReadable;
}
/** Show the size of a partition in the filesystem */
public static class Df extends FsUsage {
public static final String NAME = "df";
@ -70,38 +87,74 @@ protected void processOptions(LinkedList<String> args)
throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h");
cf.parse(args);
humanReadable = cf.getOpt("h");
setHumanReadable(cf.getOpt("h"));
if (args.isEmpty()) args.add(Path.SEPARATOR);
}
@Override
protected void processArguments(LinkedList<PathData> args)
throws IOException {
usagesTable = new TableBuilder(
"Filesystem", "Size", "Used", "Available", "Use%");
usagesTable.setRightAlign(1, 2, 3, 4);
setUsagesTable(new TableBuilder(
"Filesystem", "Size", "Used", "Available", "Use%", "Mounted on"));
getUsagesTable().setRightAlign(1, 2, 3, 4);
super.processArguments(args);
if (!usagesTable.isEmpty()) {
usagesTable.printToStream(out);
if (!getUsagesTable().isEmpty()) {
getUsagesTable().printToStream(out);
}
}
/**
* Add a new row to the usages table for the given FileSystem URI.
*
* @param uri - FileSystem URI
* @param fsStatus - FileSystem status
* @param mountedOnPath - FileSystem mounted on path
*/
private void addToUsagesTable(URI uri, FsStatus fsStatus,
String mountedOnPath) {
long size = fsStatus.getCapacity();
long used = fsStatus.getUsed();
long free = fsStatus.getRemaining();
getUsagesTable().addRow(
uri,
formatSize(size),
formatSize(used),
formatSize(free),
StringUtils.formatPercent((double) used / (double) size, 0),
mountedOnPath
);
}
@Override
protected void processPath(PathData item) throws IOException {
FsStatus fsStats = item.fs.getStatus(item.path);
long size = fsStats.getCapacity();
long used = fsStats.getUsed();
long free = fsStats.getRemaining();
if (ViewFileSystemUtil.isViewFileSystem(item.fs)) {
ViewFileSystem viewFileSystem = (ViewFileSystem) item.fs;
Map<ViewFileSystem.MountPoint, FsStatus> fsStatusMap =
ViewFileSystemUtil.getStatus(viewFileSystem, item.path);
usagesTable.addRow(
item.fs.getUri(),
formatSize(size),
formatSize(used),
formatSize(free),
StringUtils.formatPercent((double)used/(double)size, 0)
);
for (Map.Entry<ViewFileSystem.MountPoint, FsStatus> entry :
fsStatusMap.entrySet()) {
ViewFileSystem.MountPoint viewFsMountPoint = entry.getKey();
FsStatus fsStatus = entry.getValue();
// Add the viewfs mount point status to report
URI[] mountPointFileSystemURIs =
viewFsMountPoint.getTargetFileSystemURIs();
// Since LinkMerge is not supported yet, we
// should ideally see mountPointFileSystemURIs
// array with only one element.
addToUsagesTable(mountPointFileSystemURIs[0],
fsStatus, viewFsMountPoint.getMountedOnPath().toString());
}
} else {
// Hide the columns specific to ViewFileSystem
getUsagesTable().setColumnHide(5, true);
FsStatus fsStatus = item.fs.getStatus(item.path);
addToUsagesTable(item.fs.getUri(), fsStatus, "/");
}
}
}
/** show disk usage */
@ -128,7 +181,7 @@ public static class Du extends FsUsage {
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "h", "s", "x");
cf.parse(args);
humanReadable = cf.getOpt("h");
setHumanReadable(cf.getOpt("h"));
summary = cf.getOpt("s");
excludeSnapshots = cf.getOpt("x");
if (args.isEmpty()) args.add(Path.CUR_DIR);
@ -137,10 +190,10 @@ protected void processOptions(LinkedList<String> args) throws IOException {
@Override
protected void processArguments(LinkedList<PathData> args)
throws IOException {
usagesTable = new TableBuilder(3);
setUsagesTable(new TableBuilder(3));
super.processArguments(args);
if (!usagesTable.isEmpty()) {
usagesTable.printToStream(out);
if (!getUsagesTable().isEmpty()) {
getUsagesTable().printToStream(out);
}
}
@ -163,7 +216,8 @@ protected void processPath(PathData item) throws IOException {
length -= contentSummary.getSnapshotLength();
spaceConsumed -= contentSummary.getSnapshotSpaceConsumed();
}
usagesTable.addRow(formatSize(length), formatSize(spaceConsumed), item);
getUsagesTable().addRow(formatSize(length),
formatSize(spaceConsumed), item);
}
}
/** show disk usage summary */
@ -191,6 +245,7 @@ private static class TableBuilder {
protected List<String[]> rows;
protected int[] widths;
protected boolean[] rightAlign;
private boolean[] hide;
/**
* Create a table w/o headers
@ -200,6 +255,7 @@ public TableBuilder(int columns) {
rows = new ArrayList<String[]>();
widths = new int[columns];
rightAlign = new boolean[columns];
hide = new boolean[columns];
}
/**
@ -219,7 +275,14 @@ public TableBuilder(Object ... headers) {
public void setRightAlign(int ... indexes) {
for (int i : indexes) rightAlign[i] = true;
}
/**
* Hide the given column index
*/
public void setColumnHide(int columnIndex, boolean hideCol) {
hide[columnIndex] = hideCol;
}
/**
* Add a row of objects to the table
* @param objects the values
@ -234,7 +297,7 @@ public void addRow(Object ... objects) {
}
/**
* Render the table to a stream
* Render the table to a stream.
* @param out PrintStream for output
*/
public void printToStream(PrintStream out) {
@ -242,6 +305,9 @@ public void printToStream(PrintStream out) {
StringBuilder fmt = new StringBuilder();
for (int i=0; i < widths.length; i++) {
if (hide[i]) {
continue;
}
if (fmt.length() != 0) fmt.append(" ");
if (rightAlign[i]) {
fmt.append("%"+widths[i]+"s");

View File

@ -33,7 +33,7 @@
* Format sequences:<br>
* %a: Permissions in octal<br>
* %A: Permissions in symbolic style<br>
* %b: Size of file in blocks<br>
* %b: Size of file in bytes<br>
* %F: Type<br>
* %g: Group name of owner<br>
* %n: Filename<br>
@ -60,7 +60,7 @@ public static void registerCommands(CommandFactory factory) {
"Print statistics about the file/directory at <path>" + NEWLINE +
"in the specified format. Format accepts permissions in" + NEWLINE +
"octal (%a) and symbolic (%A), filesize in" + NEWLINE +
"blocks (%b), type (%F), group name of owner (%g)," + NEWLINE +
"bytes (%b), type (%F), group name of owner (%g)," + NEWLINE +
"name (%n), block size (%o), replication (%r), user name" + NEWLINE +
"of owner (%u), modification date (%y, %Y)." + NEWLINE +
"%y shows UTC date as \"yyyy-MM-dd HH:mm:ss\" and" + NEWLINE +

View File

@ -221,12 +221,23 @@ public FileChecksum getFileChecksum(final Path f)
return super.getFileChecksum(fullPath(f));
}
@Override
public FileChecksum getFileChecksum(final Path f, final long length)
throws IOException {
return super.getFileChecksum(fullPath(f), length);
}
@Override
public FileStatus getFileStatus(final Path f)
throws IOException {
return super.getFileStatus(fullPath(f));
}
@Override
public Path getLinkTarget(Path f) throws IOException {
return super.getLinkTarget(fullPath(f));
}
@Override
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {

View File

@ -23,6 +23,7 @@
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
@ -32,7 +33,6 @@
import java.util.Set;
import java.util.Map.Entry;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -48,6 +48,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
@ -89,34 +90,35 @@ static AccessControlException readOnlyMountTable(final String operation,
return readOnlyMountTable(operation, p.toString());
}
static public class MountPoint {
/**
* The source of the mount.
*/
private Path src;
/**
* MountPoint representation built from the configuration.
*/
public static class MountPoint {
/**
* One or more targets of the mount.
* Multiple targets imply MergeMount.
* The mounted on path location.
*/
private URI[] targets;
private final Path mountedOnPath;
MountPoint(Path srcPath, URI[] targetURIs) {
src = srcPath;
targets = targetURIs;
/**
* Array of target FileSystem URIs.
*/
private final URI[] targetFileSystemURIs;
MountPoint(Path srcPath, URI[] targetFs) {
mountedOnPath = srcPath;
targetFileSystemURIs = targetFs;
}
@VisibleForTesting
Path getSrc() {
return src;
public Path getMountedOnPath() {
return mountedOnPath;
}
@VisibleForTesting
URI[] getTargets() {
return targets;
public URI[] getTargetFileSystemURIs() {
return targetFileSystemURIs;
}
}
final long creationTime; // of the the mount table
final UserGroupInformation ugi; // the user/group of user who created mtable
URI myUri;
@ -133,7 +135,7 @@ URI[] getTargets() {
* @param p path
* @return path-part of the Path p
*/
private String getUriPath(final Path p) {
String getUriPath(final Path p) {
checkPath(p);
return makeAbsolute(p).toUri().getPath();
}
@ -348,6 +350,15 @@ public FileChecksum getFileChecksum(final Path f)
return res.targetFileSystem.getFileChecksum(res.remainingPath);
}
@Override
public FileChecksum getFileChecksum(final Path f, final long length)
throws AccessControlException, FileNotFoundException,
IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getFileChecksum(res.remainingPath, length);
}
private static FileStatus fixFileStatus(FileStatus orig,
Path qualified) throws IOException {
// FileStatus#getPath is a fully qualified path relative to the root of
@ -731,8 +742,8 @@ public MountPoint[] getMountPoints() {
MountPoint[] result = new MountPoint[mountPoints.size()];
for ( int i = 0; i < mountPoints.size(); ++i ) {
result[i] = new MountPoint(new Path(mountPoints.get(i).src),
mountPoints.get(i).target.targetDirLinkList);
result[i] = new MountPoint(new Path(mountPoints.get(i).src),
mountPoints.get(i).target.targetDirLinkList);
}
return result;
}
@ -799,6 +810,83 @@ public Collection<? extends BlockStoragePolicySpi> getAllStoragePolicies()
return allPolicies;
}
/**
* Get the trash root directory for current user when the path
* specified is deleted.
*
* @param path the trash root of the path to be determined.
* @return the trash root path.
*/
@Override
public Path getTrashRoot(Path path) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(path), true);
return res.targetFileSystem.getTrashRoot(res.remainingPath);
} catch (Exception e) {
throw new NotInMountpointException(path, "getTrashRoot");
}
}
/**
* Get all the trash roots for current user or all users.
*
* @param allUsers return trash roots for all users if true.
* @return all Trash root directories.
*/
@Override
public Collection<FileStatus> getTrashRoots(boolean allUsers) {
List<FileStatus> trashRoots = new ArrayList<>();
for (FileSystem fs : getChildFileSystems()) {
trashRoots.addAll(fs.getTrashRoots(allUsers));
}
return trashRoots;
}
@Override
public FsStatus getStatus() throws IOException {
return getStatus(null);
}
@Override
public FsStatus getStatus(Path p) throws IOException {
if (p == null) {
p = InodeTree.SlashPath;
}
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(
getUriPath(p), true);
return res.targetFileSystem.getStatus(p);
}
/**
* Return the total size of all files under "/", if {@link
* Constants#CONFIG_VIEWFS_LINK_MERGE_SLASH} is supported and is a valid
* mount point. Else, throw NotInMountpointException.
*
* @throws IOException
*/
@Override
public long getUsed() throws IOException {
InodeTree.ResolveResult<FileSystem> res = fsState.resolve(
getUriPath(InodeTree.SlashPath), true);
if (res.isInternalDir()) {
throw new NotInMountpointException(InodeTree.SlashPath, "getUsed");
} else {
return res.targetFileSystem.getUsed();
}
}
@Override
public Path getLinkTarget(Path path) throws IOException {
InodeTree.ResolveResult<FileSystem> res;
try {
res = fsState.resolve(getUriPath(path), true);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(path, "getLinkTarget");
}
return res.targetFileSystem.getLinkTarget(res.remainingPath);
}
/**
* An instance of this class represents an internal dir of the viewFs
* that is internal dir of the mount table.
@ -901,7 +989,7 @@ public FileChecksum getFileChecksum(final Path f)
public FileStatus getFileStatus(Path f) throws IOException {
checkPathIsSlash(f);
return new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
new Path(theInternalDir.fullPath).makeQualified(
myUri, ROOT_PATH));
@ -922,14 +1010,14 @@ public FileStatus[] listStatus(Path f) throws AccessControlException,
result[i++] = new FileStatus(0, false, 0, 0,
creationTime, creationTime, PERMISSION_555,
ugi.getUserName(), ugi.getPrimaryGroupName(),
ugi.getShortUserName(), ugi.getPrimaryGroupName(),
link.getTargetLink(),
new Path(inode.fullPath).makeQualified(
myUri, null));
} else {
result[i++] = new FileStatus(0, true, 0, 0,
creationTime, creationTime, PERMISSION_555,
ugi.getUserName(), ugi.getGroupNames()[0],
ugi.getShortUserName(), ugi.getGroupNames()[0],
new Path(inode.fullPath).makeQualified(
myUri, null));
}
@ -1053,7 +1141,7 @@ public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getUserName())
return new AclStatus.Builder().owner(ugi.getShortUserName())
.group(ugi.getPrimaryGroupName())
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();

View File

@ -0,0 +1,164 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs.viewfs;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.fs.viewfs.ViewFileSystem.MountPoint;
/**
* Utility APIs for ViewFileSystem.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public final class ViewFileSystemUtil {
private ViewFileSystemUtil() {
// Private Constructor
}
/**
* Check if the FileSystem is a ViewFileSystem.
*
* @param fileSystem
* @return true if the fileSystem is ViewFileSystem
*/
public static boolean isViewFileSystem(final FileSystem fileSystem) {
return fileSystem.getScheme().equals(FsConstants.VIEWFS_SCHEME);
}
/**
* Get FsStatus for all ViewFsMountPoints matching path for the given
* ViewFileSystem.
*
* Say ViewFileSystem has following mount points configured
* (1) hdfs://NN0_host:port/sales mounted on /dept/sales
* (2) hdfs://NN1_host:port/marketing mounted on /dept/marketing
* (3) hdfs://NN2_host:port/eng_usa mounted on /dept/eng/usa
* (4) hdfs://NN3_host:port/eng_asia mounted on /dept/eng/asia
*
* For the above config, here is a sample list of paths and their matching
* mount points while getting FsStatus
*
* Path Description Matching MountPoint
*
* "/" Root ViewFileSystem lists all (1), (2), (3), (4)
* mount points.
*
* "/dept" Not a mount point, but a valid (1), (2), (3), (4)
* internal dir in the mount tree
* and resolved down to "/" path.
*
* "/dept/sales" Matches a mount point (1)
*
* "/dept/sales/india" Path is over a valid mount point (1)
* and resolved down to
* "/dept/sales"
*
* "/dept/eng" Not a mount point, but a valid (1), (2), (3), (4)
* internal dir in the mount tree
* and resolved down to "/" path.
*
* "/erp" Doesn't match or leads to or
* over any valid mount points None
*
*
* @param fileSystem - ViewFileSystem on which mount point exists
* @param path - URI for which FsStatus is requested
* @return Map of ViewFsMountPoint and FsStatus
*/
public static Map<MountPoint, FsStatus> getStatus(
FileSystem fileSystem, Path path) throws IOException {
if (!isViewFileSystem(fileSystem)) {
throw new UnsupportedFileSystemException("FileSystem '"
+ fileSystem.getUri() + "'is not a ViewFileSystem.");
}
ViewFileSystem viewFileSystem = (ViewFileSystem) fileSystem;
String viewFsUriPath = viewFileSystem.getUriPath(path);
boolean isPathOverMountPoint = false;
boolean isPathLeadingToMountPoint = false;
boolean isPathIncludesAllMountPoint = false;
Map<MountPoint, FsStatus> mountPointMap = new HashMap<>();
for (MountPoint mountPoint : viewFileSystem.getMountPoints()) {
String[] mountPointPathComponents = InodeTree.breakIntoPathComponents(
mountPoint.getMountedOnPath().toString());
String[] incomingPathComponents =
InodeTree.breakIntoPathComponents(viewFsUriPath);
int pathCompIndex;
for (pathCompIndex = 0; pathCompIndex < mountPointPathComponents.length &&
pathCompIndex < incomingPathComponents.length; pathCompIndex++) {
if (!mountPointPathComponents[pathCompIndex].equals(
incomingPathComponents[pathCompIndex])) {
break;
}
}
if (pathCompIndex >= mountPointPathComponents.length) {
// Patch matches or over a valid mount point
isPathOverMountPoint = true;
mountPointMap.clear();
updateMountPointFsStatus(viewFileSystem, mountPointMap, mountPoint,
new Path(viewFsUriPath));
break;
} else {
if (pathCompIndex > 1) {
// Path is in the mount tree
isPathLeadingToMountPoint = true;
} else if (incomingPathComponents.length <= 1) {
// Special case of "/" path
isPathIncludesAllMountPoint = true;
}
updateMountPointFsStatus(viewFileSystem, mountPointMap, mountPoint,
mountPoint.getMountedOnPath());
}
}
if (!isPathOverMountPoint && !isPathLeadingToMountPoint &&
!isPathIncludesAllMountPoint) {
throw new NotInMountpointException(path, "getStatus");
}
return mountPointMap;
}
/**
* Update FsStatus for the given the mount point.
*
* @param viewFileSystem
* @param mountPointMap
* @param mountPoint
* @param path
*/
private static void updateMountPointFsStatus(
final ViewFileSystem viewFileSystem,
final Map<MountPoint, FsStatus> mountPointMap,
final MountPoint mountPoint, final Path path) throws IOException {
FsStatus fsStatus = viewFileSystem.getStatus(path);
mountPointMap.put(mountPoint, fsStatus);
}
}

View File

@ -845,7 +845,7 @@ public FileChecksum getFileChecksum(final Path f)
public FileStatus getFileStatus(final Path f) throws IOException {
checkPathIsSlash(f);
return new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
new Path(theInternalDir.fullPath).makeQualified(
myUri, null));
}
@ -865,13 +865,13 @@ public FileStatus getFileLinkStatus(final Path f)
INodeLink<AbstractFileSystem> inodelink =
(INodeLink<AbstractFileSystem>) inode;
result = new FileStatus(0, false, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
inodelink.getTargetLink(),
new Path(inode.fullPath).makeQualified(
myUri, null));
} else {
result = new FileStatus(0, true, 0, 0, creationTime, creationTime,
PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
new Path(inode.fullPath).makeQualified(
myUri, null));
}
@ -910,14 +910,14 @@ public FileStatus[] listStatus(final Path f) throws AccessControlException,
result[i++] = new FileStatus(0, false, 0, 0,
creationTime, creationTime,
PERMISSION_555, ugi.getUserName(), ugi.getPrimaryGroupName(),
PERMISSION_555, ugi.getShortUserName(), ugi.getPrimaryGroupName(),
link.getTargetLink(),
new Path(inode.fullPath).makeQualified(
myUri, null));
} else {
result[i++] = new FileStatus(0, true, 0, 0,
creationTime, creationTime,
PERMISSION_555, ugi.getUserName(), ugi.getGroupNames()[0],
PERMISSION_555, ugi.getShortUserName(), ugi.getGroupNames()[0],
new Path(inode.fullPath).makeQualified(
myUri, null));
}
@ -1043,7 +1043,7 @@ public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
@Override
public AclStatus getAclStatus(Path path) throws IOException {
checkPathIsSlash(path);
return new AclStatus.Builder().owner(ugi.getUserName())
return new AclStatus.Builder().owner(ugi.getShortUserName())
.group(ugi.getPrimaryGroupName())
.addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
.stickyBit(false).build();

View File

@ -346,8 +346,13 @@ public synchronized void ensureParentZNode()
createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT);
} catch (KeeperException e) {
if (isNodeExists(e.code())) {
// This is OK - just ensuring existence.
continue;
// Set ACLs for parent node, if they do not exist or are different
try {
setAclsWithRetries(prefixPath);
} catch (KeeperException e1) {
throw new IOException("Couldn't set ACLs on parent ZNode: " +
prefixPath, e1);
}
} else {
throw new IOException("Couldn't create " + prefixPath, e);
}
@ -1066,14 +1071,36 @@ public Void run() throws KeeperException, InterruptedException {
});
}
private void setAclsWithRetries(final String path)
throws KeeperException, InterruptedException {
Stat stat = new Stat();
zkDoWithRetries(new ZKAction<Void>() {
@Override
public Void run() throws KeeperException, InterruptedException {
List<ACL> acl = zkClient.getACL(path, stat);
if (acl == null || !acl.containsAll(zkAcl) ||
!zkAcl.containsAll(acl)) {
zkClient.setACL(path, zkAcl, stat.getVersion());
}
return null;
}
}, Code.BADVERSION);
}
private <T> T zkDoWithRetries(ZKAction<T> action) throws KeeperException,
InterruptedException {
return zkDoWithRetries(action, null);
}
private <T> T zkDoWithRetries(ZKAction<T> action, Code retryCode)
throws KeeperException, InterruptedException {
int retry = 0;
while (true) {
try {
return action.run();
} catch (KeeperException ke) {
if (shouldRetry(ke.code()) && ++retry < maxRetryNum) {
if ((shouldRetry(ke.code()) || shouldRetry(ke.code(), retryCode))
&& ++retry < maxRetryNum) {
continue;
}
throw ke;
@ -1189,6 +1216,10 @@ private static boolean isSessionExpired(Code code) {
private static boolean shouldRetry(Code code) {
return code == Code.CONNECTIONLOSS || code == Code.OPERATIONTIMEOUT;
}
private static boolean shouldRetry(Code code, Code retryIfCode) {
return (retryIfCode == null ? false : retryIfCode == code);
}
@Override
public String toString() {

View File

@ -80,6 +80,8 @@ public abstract class HAAdmin extends Configured implements Tool {
"--" + FORCEACTIVE + " option is used."))
.put("-getServiceState",
new UsageInfo("<serviceId>", "Returns the state of the service"))
.put("-getAllServiceState",
new UsageInfo(null, "Returns the state of all the services"))
.put("-checkHealth",
new UsageInfo("<serviceId>",
"Requests that the service perform a health check.\n" +
@ -119,7 +121,11 @@ protected void printUsage(PrintStream errOut) {
String cmd = e.getKey();
UsageInfo usage = e.getValue();
errOut.println(" [" + cmd + " " + usage.args + "]");
if (usage.args == null) {
errOut.println(" [" + cmd + "]");
} else {
errOut.println(" [" + cmd + " " + usage.args + "]");
}
}
errOut.println();
ToolRunner.printGenericCommandUsage(errOut);
@ -130,7 +136,11 @@ private void printUsage(PrintStream errOut, String cmd) {
if (usage == null) {
throw new RuntimeException("No usage for cmd " + cmd);
}
errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
if (usage.args == null) {
errOut.println(getUsageString() + " [" + cmd + "]");
} else {
errOut.println(getUsageString() + " [" + cmd + " " + usage.args + "]");
}
}
private int transitionToActive(final CommandLine cmd)
@ -455,6 +465,8 @@ protected int runCmd(String[] argv) throws Exception {
return failover(cmdLine);
} else if ("-getServiceState".equals(cmd)) {
return getServiceState(cmdLine);
} else if ("-getAllServiceState".equals(cmd)) {
return getAllServiceState();
} else if ("-checkHealth".equals(cmd)) {
return checkHealth(cmdLine);
} else if ("-help".equals(cmd)) {
@ -465,7 +477,30 @@ protected int runCmd(String[] argv) throws Exception {
throw new AssertionError("Should not get here, command: " + cmd);
}
}
protected int getAllServiceState() {
Collection<String> targetIds = getTargetIds(null);
if (targetIds.isEmpty()) {
errOut.println("Failed to get service IDs");
return -1;
}
for (String targetId : targetIds) {
HAServiceTarget target = resolveTarget(targetId);
String address = target.getAddress().getHostName() + ":"
+ target.getAddress().getPort();
try {
HAServiceProtocol proto = target.getProxy(getConf(),
rpcTimeoutForChecks);
out.println(String.format("%-50s %-10s", address, proto
.getServiceStatus().getState()));
} catch (IOException e) {
out.println(String.format("%-50s %-10s", address,
"Failed to connect: " + e.getMessage()));
}
}
return 0;
}
private boolean confirmForceManual() throws IOException {
return ToolRunner.confirmPrompt(
"You have specified the --" + FORCEMANUAL + " flag. This flag is " +
@ -532,7 +567,11 @@ private int help(String[] argv) {
return -1;
}
out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
if (usageInfo.args == null) {
out.println(cmd + ": " + usageInfo.help);
} else {
out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
}
return 0;
}

View File

@ -84,8 +84,11 @@ public abstract class ZKFailoverController {
ZK_AUTH_KEY
};
protected static final String USAGE =
"Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ]";
protected static final String USAGE =
"Usage: hdfs zkfc [ -formatZK [-force] [-nonInteractive] ]\n"
+ "\t-force: formats the znode if the znode exists.\n"
+ "\t-nonInteractive: formats the znode aborts if the znode exists,\n"
+ "\tunless -force option is specified.";
/** Unable to format the parent znode in ZK */
static final int ERR_CODE_FORMAT_DENIED = 2;

View File

@ -17,6 +17,10 @@
*/
package org.apache.hadoop.http;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InterruptedIOException;
@ -45,7 +49,10 @@
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
@ -53,15 +60,17 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.ConfServlet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configuration.IntegerRanges;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.Shell;
import org.eclipse.jetty.http.HttpVersion;
@ -90,16 +99,9 @@
import org.eclipse.jetty.servlet.ServletMapping;
import org.eclipse.jetty.util.ArrayUtil;
import org.eclipse.jetty.util.MultiException;
import org.eclipse.jetty.webapp.WebAppContext;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.sun.jersey.spi.container.servlet.ServletContainer;
import org.eclipse.jetty.util.ssl.SslContextFactory;
import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.webapp.WebAppContext;
/**
* Create a Jetty embedded server to answer http requests. The primary goal is
@ -116,9 +118,20 @@
public final class HttpServer2 implements FilterContainer {
public static final Log LOG = LogFactory.getLog(HttpServer2.class);
public static final String HTTP_SCHEME = "http";
public static final String HTTPS_SCHEME = "https";
public static final String HTTP_MAX_REQUEST_HEADER_SIZE_KEY =
"hadoop.http.max.request.header.size";
public static final int HTTP_MAX_REQUEST_HEADER_SIZE_DEFAULT = 65536;
public static final String HTTP_MAX_RESPONSE_HEADER_SIZE_KEY =
"hadoop.http.max.response.header.size";
public static final int HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT = 65536;
public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
static final String FILTER_INITIALIZER_PROPERTY
= "hadoop.http.filter.initializers";
public static final String HTTP_MAX_THREADS = "hadoop.http.max.threads";
// The ServletContext attribute where the daemon Configuration
// gets stored.
@ -139,6 +152,7 @@ public final class HttpServer2 implements FilterContainer {
protected final WebAppContext webAppContext;
protected final boolean findPort;
protected final IntegerRanges portRanges;
private final Map<ServletContextHandler, Boolean> defaultContexts =
new HashMap<>();
protected final List<String> filterNames = new ArrayList<>();
@ -158,6 +172,7 @@ public static class Builder {
private ArrayList<URI> endpoints = Lists.newArrayList();
private String name;
private Configuration conf;
private Configuration sslConf;
private String[] pathSpecs;
private AccessControlList adminsAcl;
private boolean securityEnabled = false;
@ -176,6 +191,7 @@ public static class Builder {
private String keyPassword;
private boolean findPort;
private IntegerRanges portRanges = null;
private String hostName;
private boolean disallowFallbackToRandomSignerSecretProvider;
@ -248,11 +264,25 @@ public Builder setFindPort(boolean findPort) {
return this;
}
public Builder setPortRanges(IntegerRanges ranges) {
this.portRanges = ranges;
return this;
}
public Builder setConf(Configuration conf) {
this.conf = conf;
return this;
}
/**
* Specify the SSL configuration to load. This API provides an alternative
* to keyStore/keyPassword/trustStore.
*/
public Builder setSSLConf(Configuration sslCnf) {
this.sslConf = sslCnf;
return this;
}
public Builder setPathSpec(String[] pathSpec) {
this.pathSpecs = pathSpec;
return this;
@ -315,7 +345,45 @@ public Builder setXFrameOption(String option) {
return this;
}
/**
* A wrapper of {@link Configuration#getPassword(String)}. It returns
* <code>String</code> instead of <code>char[]</code> and throws
* {@link IOException} when the password not found.
*
* @param conf the configuration
* @param name the property name
* @return the password string
*/
private static String getPassword(Configuration conf, String name)
throws IOException {
char[] passchars = conf.getPassword(name);
if (passchars == null) {
throw new IOException("Password " + name + " not found");
}
return new String(passchars);
}
/**
* Load SSL properties from the SSL configuration.
*/
private void loadSSLConfiguration() throws IOException {
if (sslConf == null) {
return;
}
needsClientAuth(sslConf.getBoolean(
SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH,
SSLFactory.SSL_SERVER_NEED_CLIENT_AUTH_DEFAULT));
keyStore(sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_LOCATION),
getPassword(sslConf, SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD),
sslConf.get(SSLFactory.SSL_SERVER_KEYSTORE_TYPE,
SSLFactory.SSL_SERVER_KEYSTORE_TYPE_DEFAULT));
keyPassword(getPassword(sslConf,
SSLFactory.SSL_SERVER_KEYSTORE_KEYPASSWORD));
trustStore(sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_LOCATION),
getPassword(sslConf, SSLFactory.SSL_SERVER_TRUSTSTORE_PASSWORD),
sslConf.get(SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE,
SSLFactory.SSL_SERVER_TRUSTSTORE_TYPE_DEFAULT));
}
public HttpServer2 build() throws IOException {
Preconditions.checkNotNull(name, "name is not set");
@ -335,15 +403,33 @@ public HttpServer2 build() throws IOException {
server.initSpnego(conf, hostName, usernameConfKey, keytabConfKey);
}
for (URI ep : endpoints) {
if (HTTPS_SCHEME.equals(ep.getScheme())) {
loadSSLConfiguration();
break;
}
}
int requestHeaderSize = conf.getInt(
HTTP_MAX_REQUEST_HEADER_SIZE_KEY,
HTTP_MAX_REQUEST_HEADER_SIZE_DEFAULT);
int responseHeaderSize = conf.getInt(
HTTP_MAX_RESPONSE_HEADER_SIZE_KEY,
HTTP_MAX_RESPONSE_HEADER_SIZE_DEFAULT);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setRequestHeaderSize(requestHeaderSize);
httpConfig.setResponseHeaderSize(responseHeaderSize);
for (URI ep : endpoints) {
final ServerConnector connector;
String scheme = ep.getScheme();
if ("http".equals(scheme)) {
connector =
HttpServer2.createDefaultChannelConnector(server.webServer);
} else if ("https".equals(scheme)) {
connector = createHttpsChannelConnector(server.webServer);
if (HTTP_SCHEME.equals(scheme)) {
connector = createHttpChannelConnector(server.webServer,
httpConfig);
} else if (HTTPS_SCHEME.equals(scheme)) {
connector = createHttpsChannelConnector(server.webServer,
httpConfig);
} else {
throw new HadoopIllegalArgumentException(
"unknown scheme for endpoint:" + ep);
@ -356,16 +442,20 @@ public HttpServer2 build() throws IOException {
return server;
}
private ServerConnector createHttpsChannelConnector(Server server) {
private ServerConnector createHttpChannelConnector(
Server server, HttpConfiguration httpConfig) {
ServerConnector conn = new ServerConnector(server);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
httpConfig.setSecureScheme("https");
httpConfig.addCustomizer(new SecureRequestCustomizer());
ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
conn.addConnectionFactory(connFactory);
configureChannelConnector(conn);
return conn;
}
private ServerConnector createHttpsChannelConnector(
Server server, HttpConfiguration httpConfig) {
httpConfig.setSecureScheme(HTTPS_SCHEME);
httpConfig.addCustomizer(new SecureRequestCustomizer());
ServerConnector conn = createHttpChannelConnector(server, httpConfig);
SslContextFactory sslContextFactory = new SslContextFactory();
sslContextFactory.setNeedClientAuth(needsClientAuth);
@ -397,7 +487,7 @@ private HttpServer2(final Builder b) throws IOException {
this.webServer = new Server();
this.adminsAcl = b.adminsAcl;
this.handlers = new HandlerCollection();
this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
this.webAppContext = createWebAppContext(b, adminsAcl, appDir);
this.xFrameOptionIsEnabled = b.xFrameEnabled;
this.xFrameOption = b.xFrameOption;
@ -414,6 +504,7 @@ private HttpServer2(final Builder b) throws IOException {
}
this.findPort = b.findPort;
this.portRanges = b.portRanges;
initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs);
}
@ -423,7 +514,7 @@ private void initializeWebServer(String name, String hostName,
Preconditions.checkNotNull(webAppContext);
int maxThreads = conf.getInt(HTTP_MAX_THREADS, -1);
int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, -1);
// If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the
// default value (currently 250).
@ -482,8 +573,8 @@ private void addListener(ServerConnector connector) {
listeners.add(connector);
}
private static WebAppContext createWebAppContext(String name,
Configuration conf, AccessControlList adminsAcl, final String appDir) {
private static WebAppContext createWebAppContext(Builder b,
AccessControlList adminsAcl, final String appDir) {
WebAppContext ctx = new WebAppContext();
ctx.setDefaultsDescriptor(null);
ServletHolder holder = new ServletHolder(new DefaultServlet());
@ -496,10 +587,15 @@ private static WebAppContext createWebAppContext(String name,
holder.setInitParameters(params);
ctx.setWelcomeFiles(new String[] {"index.html"});
ctx.addServlet(holder, "/");
ctx.setDisplayName(name);
ctx.setDisplayName(b.name);
ctx.setContextPath("/");
ctx.setWar(appDir + "/" + name);
ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
ctx.setWar(appDir + "/" + b.name);
String tempDirectory = b.conf.get(HTTP_TEMP_DIR_KEY);
if (tempDirectory != null && !tempDirectory.isEmpty()) {
ctx.setTempDirectory(new File(tempDirectory));
ctx.setAttribute("javax.servlet.context.tempdir", tempDirectory);
}
ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, b.conf);
ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
addNoCacheFilter(ctx);
return ctx;
@ -541,18 +637,6 @@ private static void configureChannelConnector(ServerConnector c) {
}
}
@InterfaceAudience.Private
public static ServerConnector createDefaultChannelConnector(Server server) {
ServerConnector conn = new ServerConnector(server);
HttpConfiguration httpConfig = new HttpConfiguration();
httpConfig.setRequestHeaderSize(JettyUtils.HEADER_SIZE);
httpConfig.setResponseHeaderSize(JettyUtils.HEADER_SIZE);
ConnectionFactory connFactory = new HttpConnectionFactory(httpConfig);
conn.addConnectionFactory(connFactory);
configureChannelConnector(conn);
return conn;
}
/** Get an array of FilterConfiguration specified in the conf */
private static FilterInitializer[] getFilterInitializers(Configuration conf) {
if (conf == null) {
@ -1004,6 +1088,93 @@ private void loadListeners() {
}
}
/**
* Bind listener by closing and opening the listener.
* @param listener
* @throws Exception
*/
private static void bindListener(ServerConnector listener) throws Exception {
// jetty has a bug where you can't reopen a listener that previously
// failed to open w/o issuing a close first, even if the port is changed
listener.close();
listener.open();
LOG.info("Jetty bound to port " + listener.getLocalPort());
}
/**
* Create bind exception by wrapping the bind exception thrown.
* @param listener
* @param ex
* @return
*/
private static BindException constructBindException(ServerConnector listener,
BindException ex) {
BindException be = new BindException("Port in use: "
+ listener.getHost() + ":" + listener.getPort());
if (ex != null) {
be.initCause(ex);
}
return be;
}
/**
* Bind using single configured port. If findPort is true, we will try to bind
* after incrementing port till a free port is found.
* @param listener jetty listener.
* @param port port which is set in the listener.
* @throws Exception
*/
private void bindForSinglePort(ServerConnector listener, int port)
throws Exception {
while (true) {
try {
bindListener(listener);
break;
} catch (BindException ex) {
if (port == 0 || !findPort) {
throw constructBindException(listener, ex);
}
}
// try the next port number
listener.setPort(++port);
Thread.sleep(100);
}
}
/**
* Bind using port ranges. Keep on looking for a free port in the port range
* and throw a bind exception if no port in the configured range binds.
* @param listener jetty listener.
* @param startPort initial port which is set in the listener.
* @throws Exception
*/
private void bindForPortRange(ServerConnector listener, int startPort)
throws Exception {
BindException bindException = null;
try {
bindListener(listener);
return;
} catch (BindException ex) {
// Ignore exception.
bindException = ex;
}
for(Integer port : portRanges) {
if (port == startPort) {
continue;
}
Thread.sleep(100);
listener.setPort(port);
try {
bindListener(listener);
return;
} catch (BindException ex) {
// Ignore exception. Move to next port.
bindException = ex;
}
}
throw constructBindException(listener, bindException);
}
/**
* Open the main listener for the server
* @throws Exception
@ -1016,25 +1187,10 @@ void openListeners() throws Exception {
continue;
}
int port = listener.getPort();
while (true) {
// jetty has a bug where you can't reopen a listener that previously
// failed to open w/o issuing a close first, even if the port is changed
try {
listener.close();
listener.open();
LOG.info("Jetty bound to port " + listener.getLocalPort());
break;
} catch (BindException ex) {
if (port == 0 || !findPort) {
BindException be = new BindException("Port in use: "
+ listener.getHost() + ":" + listener.getPort());
be.initCause(ex);
throw be;
}
}
// try the next port number
listener.setPort(++port);
Thread.sleep(100);
if (portRanges != null && port != 0) {
bindForPortRange(listener, port);
} else {
bindForSinglePort(listener, port);
}
}
}
@ -1056,7 +1212,7 @@ public void stop() throws Exception {
}
try {
// explicitly destroy the secrete provider
// explicitly destroy the secret provider
secretProvider.destroy();
// clear & stop webAppContext attributes to avoid memory leaks.
webAppContext.clearAttributes();

View File

@ -24,6 +24,7 @@
import java.rmi.server.UID;
import java.security.MessageDigest;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.*;
import org.apache.hadoop.util.Options;
import org.apache.hadoop.fs.*;
@ -146,7 +147,7 @@
* </ul>
* </li>
* <li>
* A sync-marker every few <code>100</code> bytes or so.
* A sync-marker every few <code>100</code> kilobytes or so.
* </li>
* </ul>
*
@ -165,7 +166,7 @@
* </ul>
* </li>
* <li>
* A sync-marker every few <code>100</code> bytes or so.
* A sync-marker every few <code>100</code> kilobytes or so.
* </li>
* </ul>
*
@ -217,8 +218,11 @@ private SequenceFile() {} // no public ctor
private static final int SYNC_HASH_SIZE = 16; // number of bytes in hash
private static final int SYNC_SIZE = 4+SYNC_HASH_SIZE; // escape + hash
/** The number of bytes between sync points.*/
public static final int SYNC_INTERVAL = 100*SYNC_SIZE;
/**
* The number of bytes between sync points. 100 KB, default.
* Computed as 5 KB * 20 = 100 KB
*/
public static final int SYNC_INTERVAL = 5 * 1024 * SYNC_SIZE; // 5KB*(16+4)
/**
* The compression type used to compress key/value pairs in the
@ -856,6 +860,9 @@ public static class Writer implements java.io.Closeable, Syncable {
// starts and ends by scanning for this value.
long lastSyncPos; // position of last sync
byte[] sync; // 16 random bytes
@VisibleForTesting
int syncInterval;
{
try {
MessageDigest digester = MessageDigest.getInstance("MD5");
@ -987,7 +994,16 @@ public static Option file(Path value) {
private static Option filesystem(FileSystem fs) {
return new SequenceFile.Writer.FileSystemOption(fs);
}
private static class SyncIntervalOption extends Options.IntegerOption
implements Option {
SyncIntervalOption(int val) {
// If a negative sync interval is provided,
// fall back to the default sync interval.
super(val < 0 ? SYNC_INTERVAL : val);
}
}
public static Option bufferSize(int value) {
return new BufferSizeOption(value);
}
@ -1032,11 +1048,15 @@ public static Option compression(CompressionType value,
CompressionCodec codec) {
return new CompressionOption(value, codec);
}
public static Option syncInterval(int value) {
return new SyncIntervalOption(value);
}
/**
* Construct a uncompressed writer from a set of options.
* @param conf the configuration to use
* @param options the options used when creating the writer
* @param opts the options used when creating the writer
* @throws IOException if it fails
*/
Writer(Configuration conf,
@ -1062,6 +1082,8 @@ public static Option compression(CompressionType value,
Options.getOption(MetadataOption.class, opts);
CompressionOption compressionTypeOption =
Options.getOption(CompressionOption.class, opts);
SyncIntervalOption syncIntervalOption =
Options.getOption(SyncIntervalOption.class, opts);
// check consistency of options
if ((fileOption == null) == (streamOption == null)) {
throw new IllegalArgumentException("file or stream must be specified");
@ -1163,7 +1185,12 @@ public static Option compression(CompressionType value,
"GzipCodec without native-hadoop " +
"code!");
}
init(conf, out, ownStream, keyClass, valueClass, codec, metadata);
this.syncInterval = (syncIntervalOption == null) ?
SYNC_INTERVAL :
syncIntervalOption.getValue();
init(
conf, out, ownStream, keyClass, valueClass,
codec, metadata, syncInterval);
}
/** Create the named file.
@ -1176,7 +1203,7 @@ public Writer(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass) throws IOException {
this.compress = CompressionType.NONE;
init(conf, fs.create(name), true, keyClass, valClass, null,
new Metadata());
new Metadata(), SYNC_INTERVAL);
}
/** Create the named file with write-progress reporter.
@ -1190,7 +1217,7 @@ public Writer(FileSystem fs, Configuration conf, Path name,
Progressable progress, Metadata metadata) throws IOException {
this.compress = CompressionType.NONE;
init(conf, fs.create(name, progress), true, keyClass, valClass,
null, metadata);
null, metadata, SYNC_INTERVAL);
}
/** Create the named file with write-progress reporter.
@ -1206,7 +1233,7 @@ public Writer(FileSystem fs, Configuration conf, Path name,
this.compress = CompressionType.NONE;
init(conf,
fs.create(name, true, bufferSize, replication, blockSize, progress),
true, keyClass, valClass, null, metadata);
true, keyClass, valClass, null, metadata, SYNC_INTERVAL);
}
boolean isCompressed() { return compress != CompressionType.NONE; }
@ -1234,18 +1261,21 @@ private void writeFileHeader()
/** Initialize. */
@SuppressWarnings("unchecked")
void init(Configuration conf, FSDataOutputStream out, boolean ownStream,
Class keyClass, Class valClass,
CompressionCodec codec, Metadata metadata)
void init(Configuration config, FSDataOutputStream outStream,
boolean ownStream, Class key, Class val,
CompressionCodec compCodec, Metadata meta,
int syncIntervalVal)
throws IOException {
this.conf = conf;
this.out = out;
this.conf = config;
this.out = outStream;
this.ownOutputStream = ownStream;
this.keyClass = keyClass;
this.valClass = valClass;
this.codec = codec;
this.metadata = metadata;
SerializationFactory serializationFactory = new SerializationFactory(conf);
this.keyClass = key;
this.valClass = val;
this.codec = compCodec;
this.metadata = meta;
this.syncInterval = syncIntervalVal;
SerializationFactory serializationFactory =
new SerializationFactory(config);
this.keySerializer = serializationFactory.getSerializer(keyClass);
if (this.keySerializer == null) {
throw new IOException(
@ -1366,7 +1396,7 @@ public synchronized void close() throws IOException {
synchronized void checkAndWriteSync() throws IOException {
if (sync != null &&
out.getPos() >= lastSyncPos+SYNC_INTERVAL) { // time to emit sync
out.getPos() >= lastSyncPos+this.syncInterval) { // time to emit sync
sync();
}
}
@ -2786,14 +2816,30 @@ public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
}
/** Sort and merge using an arbitrary {@link RawComparator}. */
@SuppressWarnings("deprecation")
public Sorter(FileSystem fs, RawComparator comparator, Class keyClass,
Class valClass, Configuration conf, Metadata metadata) {
this.fs = fs;
this.comparator = comparator;
this.keyClass = keyClass;
this.valClass = valClass;
this.memory = conf.getInt("io.sort.mb", 100) * 1024 * 1024;
this.factor = conf.getInt("io.sort.factor", 100);
// Remember to fall-back on the deprecated MB and Factor keys
// until they are removed away permanently.
if (conf.get(CommonConfigurationKeys.IO_SORT_MB_KEY) != null) {
this.memory = conf.getInt(CommonConfigurationKeys.IO_SORT_MB_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_MB_DEFAULT) * 1024 * 1024;
} else {
this.memory = conf.getInt(CommonConfigurationKeys.SEQ_IO_SORT_MB_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_MB_DEFAULT) * 1024 * 1024;
}
if (conf.get(CommonConfigurationKeys.IO_SORT_FACTOR_KEY) != null) {
this.factor = conf.getInt(CommonConfigurationKeys.IO_SORT_FACTOR_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_DEFAULT);
} else {
this.factor = conf.getInt(
CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_KEY,
CommonConfigurationKeys.SEQ_IO_SORT_FACTOR_DEFAULT);
}
this.conf = conf;
this.metadata = metadata;
}

View File

@ -95,7 +95,7 @@ public interface Decompressor {
* @param b Buffer for the compressed data
* @param off Start offset of the data
* @param len Size of the buffer
* @return The actual number of bytes of compressed data.
* @return The actual number of bytes of uncompressed data.
* @throws IOException
*/
public int decompress(byte[] b, int off, int len) throws IOException;

View File

@ -18,18 +18,21 @@
package org.apache.hadoop.io.compress;
import java.io.*;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.util.zip.GZIPOutputStream;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.compress.DefaultCodec;
import org.apache.hadoop.io.compress.zlib.*;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
import org.apache.hadoop.io.compress.zlib.ZlibFactory;
/**
* This class creates gzip compressors/decompressors.
@ -45,10 +48,6 @@ public class GzipCodec extends DefaultCodec {
protected static class GzipOutputStream extends CompressorStream {
private static class ResetableGZIPOutputStream extends GZIPOutputStream {
private static final int TRAILER_SIZE = 8;
public static final String JVMVersion= System.getProperty("java.version");
private static final boolean HAS_BROKEN_FINISH =
(IBM_JAVA && JVMVersion.contains("1.6.0"));
public ResetableGZIPOutputStream(OutputStream out) throws IOException {
super(out);
@ -57,61 +56,6 @@ public ResetableGZIPOutputStream(OutputStream out) throws IOException {
public void resetState() throws IOException {
def.reset();
}
/**
* Override this method for HADOOP-8419.
* Override because IBM implementation calls def.end() which
* causes problem when reseting the stream for reuse.
*
*/
@Override
public void finish() throws IOException {
if (HAS_BROKEN_FINISH) {
if (!def.finished()) {
def.finish();
while (!def.finished()) {
int i = def.deflate(this.buf, 0, this.buf.length);
if ((def.finished()) && (i <= this.buf.length - TRAILER_SIZE)) {
writeTrailer(this.buf, i);
i += TRAILER_SIZE;
out.write(this.buf, 0, i);
return;
}
if (i > 0) {
out.write(this.buf, 0, i);
}
}
byte[] arrayOfByte = new byte[TRAILER_SIZE];
writeTrailer(arrayOfByte, 0);
out.write(arrayOfByte);
}
} else {
super.finish();
}
}
/** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
throws IOException {
writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
}
/** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
private void writeInt(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
throws IOException {
writeShort(paramInt1 & 0xFFFF, paramArrayOfByte, paramInt2);
writeShort(paramInt1 >> 16 & 0xFFFF, paramArrayOfByte, paramInt2 + 2);
}
/** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
throws IOException {
paramArrayOfByte[paramInt2] = (byte)(paramInt1 & 0xFF);
paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1 >> 8 & 0xFF);
}
}
public GzipOutputStream(OutputStream out) throws IOException {

View File

@ -0,0 +1,242 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.compress.zstd.ZStandardCompressor;
import org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY;
/**
* This class creates zstd compressors/decompressors.
*/
public class ZStandardCodec implements
Configurable, CompressionCodec, DirectDecompressionCodec {
private Configuration conf;
/**
* Set the configuration to be used by this object.
*
* @param conf the configuration object.
*/
@Override
public void setConf(Configuration conf) {
this.conf = conf;
}
/**
* Return the configuration used by this object.
*
* @return the configuration object used by this object.
*/
@Override
public Configuration getConf() {
return conf;
}
public static void checkNativeCodeLoaded() {
if (!NativeCodeLoader.isNativeCodeLoaded() ||
!NativeCodeLoader.buildSupportsZstd()) {
throw new RuntimeException("native zStandard library "
+ "not available: this version of libhadoop was built "
+ "without zstd support.");
}
if (!ZStandardCompressor.isNativeCodeLoaded()) {
throw new RuntimeException("native zStandard library not "
+ "available: ZStandardCompressor has not been loaded.");
}
if (!ZStandardDecompressor.isNativeCodeLoaded()) {
throw new RuntimeException("native zStandard library not "
+ "available: ZStandardDecompressor has not been loaded.");
}
}
public static boolean isNativeCodeLoaded() {
return ZStandardCompressor.isNativeCodeLoaded()
&& ZStandardDecompressor.isNativeCodeLoaded();
}
public static String getLibraryName() {
return ZStandardCompressor.getLibraryName();
}
public static int getCompressionLevel(Configuration conf) {
return conf.getInt(
CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY,
CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT);
}
public static int getCompressionBufferSize(Configuration conf) {
int bufferSize = getBufferSize(conf);
return bufferSize == 0 ?
ZStandardCompressor.getRecommendedBufferSize() :
bufferSize;
}
public static int getDecompressionBufferSize(Configuration conf) {
int bufferSize = getBufferSize(conf);
return bufferSize == 0 ?
ZStandardDecompressor.getRecommendedBufferSize() :
bufferSize;
}
private static int getBufferSize(Configuration conf) {
return conf.getInt(IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY,
IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT);
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream}.
*
* @param out the location for the final output stream
* @return a stream the user can write uncompressed data to have compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out)
throws IOException {
return Util.
createOutputStreamWithCodecPool(this, conf, out);
}
/**
* Create a {@link CompressionOutputStream} that will write to the given
* {@link OutputStream} with the given {@link Compressor}.
*
* @param out the location for the final output stream
* @param compressor compressor to use
* @return a stream the user can write uncompressed data to have compressed
* @throws IOException
*/
@Override
public CompressionOutputStream createOutputStream(OutputStream out,
Compressor compressor)
throws IOException {
checkNativeCodeLoaded();
return new CompressorStream(out, compressor,
getCompressionBufferSize(conf));
}
/**
* Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
*
* @return the type of compressor needed by this codec.
*/
@Override
public Class<? extends Compressor> getCompressorType() {
checkNativeCodeLoaded();
return ZStandardCompressor.class;
}
/**
* Create a new {@link Compressor} for use by this {@link CompressionCodec}.
*
* @return a new compressor for use by this codec
*/
@Override
public Compressor createCompressor() {
checkNativeCodeLoaded();
return new ZStandardCompressor(
getCompressionLevel(conf), getCompressionBufferSize(conf));
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* input stream.
*
* @param in the stream to read compressed bytes from
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in)
throws IOException {
return Util.
createInputStreamWithCodecPool(this, conf, in);
}
/**
* Create a {@link CompressionInputStream} that will read from the given
* {@link InputStream} with the given {@link Decompressor}.
*
* @param in the stream to read compressed bytes from
* @param decompressor decompressor to use
* @return a stream to read uncompressed bytes from
* @throws IOException
*/
@Override
public CompressionInputStream createInputStream(InputStream in,
Decompressor decompressor)
throws IOException {
checkNativeCodeLoaded();
return new DecompressorStream(in, decompressor,
getDecompressionBufferSize(conf));
}
/**
* Get the type of {@link Decompressor} needed by
* this {@link CompressionCodec}.
*
* @return the type of decompressor needed by this codec.
*/
@Override
public Class<? extends Decompressor> getDecompressorType() {
checkNativeCodeLoaded();
return ZStandardDecompressor.class;
}
/**
* Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
*
* @return a new decompressor for use by this codec
*/
@Override
public Decompressor createDecompressor() {
checkNativeCodeLoaded();
return new ZStandardDecompressor(getDecompressionBufferSize(conf));
}
/**
* Get the default filename extension for this kind of compression.
*
* @return <code>.zst</code>.
*/
@Override
public String getDefaultExtension() {
return ".zst";
}
@Override
public DirectDecompressor createDirectDecompressor() {
return new ZStandardDecompressor.ZStandardDirectDecompressor(
getDecompressionBufferSize(conf)
);
}
}

View File

@ -0,0 +1,305 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zstd;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.ZStandardCodec;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A {@link Compressor} based on the zStandard compression algorithm.
* https://github.com/facebook/zstd
*/
public class ZStandardCompressor implements Compressor {
private static final Logger LOG =
LoggerFactory.getLogger(ZStandardCompressor.class);
private long stream;
private int level;
private int directBufferSize;
private byte[] userBuf = null;
private int userBufOff = 0, userBufLen = 0;
private ByteBuffer uncompressedDirectBuf = null;
private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0;
private boolean keepUncompressedBuf = false;
private ByteBuffer compressedDirectBuf = null;
private boolean finish, finished;
private long bytesRead = 0;
private long bytesWritten = 0;
private static boolean nativeZStandardLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
// Initialize the native library
initIDs();
nativeZStandardLoaded = true;
} catch (Throwable t) {
LOG.warn("Error loading zstandard native libraries: " + t);
}
}
}
public static boolean isNativeCodeLoaded() {
return nativeZStandardLoaded;
}
public static int getRecommendedBufferSize() {
return getStreamSize();
}
@VisibleForTesting
ZStandardCompressor() {
this(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
}
/**
* Creates a new compressor with the default compression level.
* Compressed data will be generated in ZStandard format.
*/
public ZStandardCompressor(int level, int bufferSize) {
this(level, bufferSize, bufferSize);
}
@VisibleForTesting
ZStandardCompressor(int level, int inputBufferSize, int outputBufferSize) {
this.level = level;
stream = create();
this.directBufferSize = outputBufferSize;
uncompressedDirectBuf = ByteBuffer.allocateDirect(inputBufferSize);
compressedDirectBuf = ByteBuffer.allocateDirect(outputBufferSize);
compressedDirectBuf.position(outputBufferSize);
reset();
}
/**
* Prepare the compressor to be used in a new stream with settings defined in
* the given Configuration. It will reset the compressor's compression level
* and compression strategy.
*
* @param conf Configuration storing new settings
*/
@Override
public void reinit(Configuration conf) {
if (conf == null) {
return;
}
level = ZStandardCodec.getCompressionLevel(conf);
reset();
LOG.debug("Reinit compressor with new compression configuration");
}
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufLen = len;
uncompressedDirectBufOff = 0;
setInputFromSavedData();
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
}
//copy enough data from userBuf to uncompressedDirectBuf
private void setInputFromSavedData() {
int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
uncompressedDirectBuf.put(userBuf, userBufOff, len);
userBufLen -= len;
userBufOff += len;
uncompressedDirectBufLen = uncompressedDirectBuf.position();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"Dictionary support is not enabled");
}
@Override
public boolean needsInput() {
// Consume remaining compressed data?
if (compressedDirectBuf.remaining() > 0) {
return false;
}
// have we consumed all input
if (keepUncompressedBuf && uncompressedDirectBufLen > 0) {
return false;
}
if (uncompressedDirectBuf.remaining() > 0) {
// Check if we have consumed all user-input
if (userBufLen <= 0) {
return true;
} else {
// copy enough data from userBuf to uncompressedDirectBuf
setInputFromSavedData();
// uncompressedDirectBuf is not full
return uncompressedDirectBuf.remaining() > 0;
}
}
return false;
}
@Override
public void finish() {
finish = true;
}
@Override
public boolean finished() {
// Check if 'zstd' says its 'finished' and all compressed
// data has been consumed
return (finished && compressedDirectBuf.remaining() == 0);
}
@Override
public int compress(byte[] b, int off, int len) throws IOException {
checkStream();
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is compressed data
int n = compressedDirectBuf.remaining();
if (n > 0) {
n = Math.min(n, len);
compressedDirectBuf.get(b, off, n);
return n;
}
// Re-initialize the output direct buffer
compressedDirectBuf.rewind();
compressedDirectBuf.limit(directBufferSize);
// Compress data
n = deflateBytesDirect(
uncompressedDirectBuf,
uncompressedDirectBufOff,
uncompressedDirectBufLen,
compressedDirectBuf,
directBufferSize
);
compressedDirectBuf.limit(n);
// Check if we have consumed all input buffer
if (uncompressedDirectBufLen <= 0) {
// consumed all input buffer
keepUncompressedBuf = false;
uncompressedDirectBuf.clear();
uncompressedDirectBufOff = 0;
uncompressedDirectBufLen = 0;
} else {
// did not consume all input buffer
keepUncompressedBuf = true;
}
// Get at most 'len' bytes
n = Math.min(n, len);
compressedDirectBuf.get(b, off, n);
return n;
}
/**
* Returns the total number of compressed bytes output so far.
*
* @return the total (non-negative) number of compressed bytes output so far
*/
@Override
public long getBytesWritten() {
checkStream();
return bytesWritten;
}
/**
* <p>Returns the total number of uncompressed bytes input so far.</p>
*
* @return the total (non-negative) number of uncompressed bytes input so far
*/
@Override
public long getBytesRead() {
checkStream();
return bytesRead;
}
@Override
public void reset() {
checkStream();
init(level, stream);
finish = false;
finished = false;
bytesRead = 0;
bytesWritten = 0;
uncompressedDirectBuf.rewind();
uncompressedDirectBufOff = 0;
uncompressedDirectBufLen = 0;
keepUncompressedBuf = false;
compressedDirectBuf.limit(directBufferSize);
compressedDirectBuf.position(directBufferSize);
userBufOff = 0;
userBufLen = 0;
}
@Override
public void end() {
if (stream != 0) {
end(stream);
stream = 0;
}
}
private void checkStream() {
if (stream == 0) {
throw new NullPointerException();
}
}
private native static long create();
private native static void init(int level, long stream);
private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
int srcLen, ByteBuffer dst, int dstLen);
private static native int getStreamSize();
private native static void end(long strm);
private native static void initIDs();
public native static String getLibraryName();
}

View File

@ -0,0 +1,323 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.io.compress.zstd;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A {@link Decompressor} based on the zStandard compression algorithm.
* https://github.com/facebook/zstd
*/
public class ZStandardDecompressor implements Decompressor {
private static final Logger LOG =
LoggerFactory.getLogger(ZStandardDecompressor.class);
private long stream;
private int directBufferSize;
private ByteBuffer compressedDirectBuf = null;
private int compressedDirectBufOff, bytesInCompressedBuffer;
private ByteBuffer uncompressedDirectBuf = null;
private byte[] userBuf = null;
private int userBufOff = 0, userBufferBytesToConsume = 0;
private boolean finished;
private int remaining = 0;
private static boolean nativeZStandardLoaded = false;
static {
if (NativeCodeLoader.isNativeCodeLoaded()) {
try {
// Initialize the native library
initIDs();
nativeZStandardLoaded = true;
} catch (Throwable t) {
LOG.warn("Error loading zstandard native libraries: " + t);
}
}
}
public static boolean isNativeCodeLoaded() {
return nativeZStandardLoaded;
}
public static int getRecommendedBufferSize() {
return getStreamSize();
}
public ZStandardDecompressor() {
this(getStreamSize());
}
/**
* Creates a new decompressor.
*/
public ZStandardDecompressor(int bufferSize) {
this.directBufferSize = bufferSize;
compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
stream = create();
reset();
}
@Override
public void setInput(byte[] b, int off, int len) {
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
this.userBuf = b;
this.userBufOff = off;
this.userBufferBytesToConsume = len;
setInputFromSavedData();
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
}
private void setInputFromSavedData() {
compressedDirectBufOff = 0;
bytesInCompressedBuffer = userBufferBytesToConsume;
if (bytesInCompressedBuffer > directBufferSize) {
bytesInCompressedBuffer = directBufferSize;
}
compressedDirectBuf.rewind();
compressedDirectBuf.put(
userBuf, userBufOff, bytesInCompressedBuffer);
userBufOff += bytesInCompressedBuffer;
userBufferBytesToConsume -= bytesInCompressedBuffer;
}
// dictionary is not supported
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"Dictionary support is not enabled");
}
@Override
public boolean needsInput() {
// Consume remaining compressed data?
if (uncompressedDirectBuf.remaining() > 0) {
return false;
}
// Check if we have consumed all input
if (bytesInCompressedBuffer - compressedDirectBufOff <= 0) {
// Check if we have consumed all user-input
if (userBufferBytesToConsume <= 0) {
return true;
} else {
setInputFromSavedData();
}
}
return false;
}
// dictionary is not supported.
@Override
public boolean needsDictionary() {
return false;
}
@Override
public boolean finished() {
// finished == true if ZSTD_decompressStream() returns 0
// also check we have nothing left in our buffer
return (finished && uncompressedDirectBuf.remaining() == 0);
}
@Override
public int decompress(byte[] b, int off, int len)
throws IOException {
checkStream();
if (b == null) {
throw new NullPointerException();
}
if (off < 0 || len < 0 || off > b.length - len) {
throw new ArrayIndexOutOfBoundsException();
}
// Check if there is uncompressed data
int n = uncompressedDirectBuf.remaining();
if (n > 0) {
return populateUncompressedBuffer(b, off, len, n);
}
// Re-initialize the output direct buffer
uncompressedDirectBuf.rewind();
uncompressedDirectBuf.limit(directBufferSize);
// Decompress data
n = inflateBytesDirect(
compressedDirectBuf,
compressedDirectBufOff,
bytesInCompressedBuffer,
uncompressedDirectBuf,
0,
directBufferSize
);
uncompressedDirectBuf.limit(n);
// Get at most 'len' bytes
return populateUncompressedBuffer(b, off, len, n);
}
/**
* <p>Returns the number of bytes remaining in the input buffers;
* normally called when finished() is true to determine amount of post-stream
* data.</p>
*
* @return the total (non-negative) number of unprocessed bytes in input
*/
@Override
public int getRemaining() {
checkStream();
// userBuf + compressedDirectBuf
return userBufferBytesToConsume + remaining;
}
/**
* Resets everything including the input buffers (user and direct).
*/
@Override
public void reset() {
checkStream();
init(stream);
remaining = 0;
finished = false;
compressedDirectBufOff = 0;
bytesInCompressedBuffer = 0;
uncompressedDirectBuf.limit(directBufferSize);
uncompressedDirectBuf.position(directBufferSize);
userBufOff = 0;
userBufferBytesToConsume = 0;
}
@Override
public void end() {
if (stream != 0) {
free(stream);
stream = 0;
}
}
@Override
protected void finalize() {
reset();
}
private void checkStream() {
if (stream == 0) {
throw new NullPointerException("Stream not initialized");
}
}
private int populateUncompressedBuffer(byte[] b, int off, int len, int n) {
n = Math.min(n, len);
uncompressedDirectBuf.get(b, off, n);
return n;
}
private native static void initIDs();
private native static long create();
private native static void init(long stream);
private native int inflateBytesDirect(ByteBuffer src, int srcOffset,
int srcLen, ByteBuffer dst, int dstOffset, int dstLen);
private native static void free(long strm);
private native static int getStreamSize();
int inflateDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
assert
(this instanceof ZStandardDecompressor.ZStandardDirectDecompressor);
int originalPosition = dst.position();
int n = inflateBytesDirect(
src, src.position(), src.remaining(), dst, dst.position(),
dst.remaining()
);
dst.position(originalPosition + n);
if (bytesInCompressedBuffer > 0) {
src.position(compressedDirectBufOff);
} else {
src.position(src.limit());
}
return n;
}
/**
* A {@link DirectDecompressor} for ZStandard
* https://github.com/facebook/zstd.
*/
public static class ZStandardDirectDecompressor
extends ZStandardDecompressor implements DirectDecompressor {
public ZStandardDirectDecompressor(int directBufferSize) {
super(directBufferSize);
}
@Override
public boolean finished() {
return (endOfInput && super.finished());
}
@Override
public void reset() {
super.reset();
endOfInput = true;
}
private boolean endOfInput;
@Override
public void decompress(ByteBuffer src, ByteBuffer dst)
throws IOException {
assert dst.isDirect() : "dst.isDirect()";
assert src.isDirect() : "src.isDirect()";
assert dst.remaining() > 0 : "dst.remaining() > 0";
this.inflateDirect(src, dst);
endOfInput = !src.hasRemaining();
}
@Override
public void setDictionary(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
@Override
public int decompress(byte[] b, int off, int len) {
throw new UnsupportedOperationException(
"byte[] arrays are not supported for DirectDecompressor");
}
}
}

View File

@ -0,0 +1,22 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
package org.apache.hadoop.io.compress.zstd;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

View File

@ -55,9 +55,9 @@ public final class CodecUtil {
public static final String IO_ERASURECODE_CODEC_XOR =
XORErasureCodec.class.getCanonicalName();
/** Erasure coder Reed-Solomon codec. */
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_KEY =
public static final String IO_ERASURECODE_CODEC_RS_KEY =
"io.erasurecode.codec.rs";
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT =
public static final String IO_ERASURECODE_CODEC_RS =
RSErasureCodec.class.getCanonicalName();
/** Erasure coder hitch hiker XOR codec. */
public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
@ -67,10 +67,10 @@ public final class CodecUtil {
/** Supported erasure codec classes. */
/** Raw coder factory for the RS default codec. */
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY =
"io.erasurecode.codec.rs-default.rawcoder";
public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT =
/** Raw coder factory for the RS codec. */
public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
"io.erasurecode.codec.rs.rawcoder";
public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT =
RSRawErasureCoderFactory.class.getCanonicalName();
/** Raw coder factory for the RS legacy codec. */
@ -183,10 +183,10 @@ private static RawErasureCoderFactory createRawCoderFactory(
private static String getRawCoderFactNameFromCodec(Configuration conf,
String codec) {
switch (codec) {
case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
case ErasureCodeConstants.RS_CODEC_NAME:
return conf.get(
IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT);
IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT);
case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
return conf.get(
IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
@ -233,15 +233,15 @@ private static ErasureCodec createCodec(Configuration conf,
private static String getCodecClassName(Configuration conf, String codec) {
switch (codec) {
case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
case ErasureCodeConstants.RS_CODEC_NAME:
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
CodecUtil.IO_ERASURECODE_CODEC_RS_KEY,
CodecUtil.IO_ERASURECODE_CODEC_RS);
case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
//TODO:rs-legacy should be handled differently.
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
CodecUtil.IO_ERASURECODE_CODEC_RS_KEY,
CodecUtil.IO_ERASURECODE_CODEC_RS);
case ErasureCodeConstants.XOR_CODEC_NAME:
return conf.get(
CodecUtil.IO_ERASURECODE_CODEC_XOR_KEY,

View File

@ -25,17 +25,23 @@ public final class ErasureCodeConstants {
private ErasureCodeConstants() {
}
public static final String RS_DEFAULT_CODEC_NAME = "rs-default";
public static final String RS_CODEC_NAME = "rs";
public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
public static final String XOR_CODEC_NAME = "xor";
public static final String HHXOR_CODEC_NAME = "hhxor";
public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
RS_DEFAULT_CODEC_NAME, 6, 3);
RS_CODEC_NAME, 6, 3);
public static final ECSchema RS_3_2_SCHEMA = new ECSchema(
RS_DEFAULT_CODEC_NAME, 3, 2);
RS_CODEC_NAME, 3, 2);
public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
RS_LEGACY_CODEC_NAME, 6, 3);
public static final ECSchema XOR_2_1_SCHEMA = new ECSchema(
XOR_CODEC_NAME, 2, 1);
public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
RS_CODEC_NAME, 10, 4);
}

View File

@ -67,7 +67,7 @@ protected ErasureCodingStep prepareDecodingStep(
private RawErasureDecoder checkCreateRSRawDecoder() {
if (rsRawDecoder == null) {
rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
ErasureCodeConstants.RS_CODEC_NAME, getOptions());
}
return rsRawDecoder;
}

View File

@ -61,7 +61,7 @@ protected ErasureCodingStep prepareEncodingStep(
private RawErasureEncoder checkCreateRSRawEncoder() {
if (rsRawEncoder == null) {
rsRawEncoder = CodecUtil.createRawEncoder(getConf(),
ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
ErasureCodeConstants.RS_CODEC_NAME, getOptions());
}
return rsRawEncoder;
}

Some files were not shown because too many files have changed in this diff Show More