Merge r1432789 through r1437840 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1437843 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
commit
74d1e5c302
@ -980,12 +980,12 @@ fi
|
||||
(( RESULT = RESULT + $JAVAC_RET ))
|
||||
checkJavadocWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkEclipseGeneration
|
||||
(( RESULT = RESULT + $? ))
|
||||
### Checkstyle not implemented yet
|
||||
#checkStyle
|
||||
#(( RESULT = RESULT + $? ))
|
||||
buildAndInstall
|
||||
checkEclipseGeneration
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkFindbugsWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
checkReleaseAuditWarnings
|
||||
|
@ -146,6 +146,9 @@ Trunk (Unreleased)
|
||||
HADOOP-9162. Add utility to check native library availability.
|
||||
(Binglin Chang via suresh)
|
||||
|
||||
HADOOP-8924. Add maven plugin alternative to shell script to save
|
||||
package-info.java. (Chris Nauroth via suresh)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
|
||||
@ -308,10 +311,13 @@ Trunk (Unreleased)
|
||||
HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on
|
||||
Windows. (Chris Nauroth via suresh)
|
||||
|
||||
HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for
|
||||
HADOOP-8957. AbstractFileSystem#IsValidName should be overridden for
|
||||
embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia)
|
||||
|
||||
HADOOP-9139 improve killKdc.sh (Ivan A. Veselovsky via bobby)
|
||||
HADOOP-9139. improve killKdc.sh (Ivan A. Veselovsky via bobby)
|
||||
|
||||
HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
|
||||
a new module to the build (Chris Nauroth via bobby)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
@ -323,6 +329,8 @@ Release 2.0.3-alpha - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
HADOOP-8999. SASL negotiation is flawed (daryn)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HADOOP-8597. Permit FsShell's text command to read Avro files.
|
||||
@ -433,6 +441,18 @@ Release 2.0.3-alpha - Unreleased
|
||||
HADOOP-9192. Move token related request/response messages to common.
|
||||
(suresh)
|
||||
|
||||
HADOOP-8712. Change default hadoop.security.group.mapping to
|
||||
JniBasedUnixGroupsNetgroupMappingWithFallback (Robert Parker via todd)
|
||||
|
||||
HADOOP-9106. Allow configuration of IPC connect timeout.
|
||||
(Rober Parker via suresh)
|
||||
|
||||
HADOOP-9216. CompressionCodecFactory#getCodecClasses should trim the
|
||||
result of parsing by Configuration. (Tsuyoshi Ozawa via todd)
|
||||
|
||||
HADOOP-9231. Parametrize staging URL for the uniformity of
|
||||
distributionManagement. (Konstantin Boudnik via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang
|
||||
@ -493,8 +513,6 @@ Release 2.0.3-alpha - Unreleased
|
||||
|
||||
HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu)
|
||||
|
||||
HADOOP-8999. SASL negotiation is flawed (daryn)
|
||||
|
||||
HADOOP-6607. Add different variants of non caching HTTP headers. (tucu)
|
||||
|
||||
HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems
|
||||
@ -537,6 +555,23 @@ Release 2.0.3-alpha - Unreleased
|
||||
|
||||
HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite)
|
||||
|
||||
HADOOP-9203. RPCCallBenchmark should find a random available port.
|
||||
(Andrew Purtell via suresh)
|
||||
|
||||
HADOOP-9178. src/main/conf is missing hadoop-policy.xml.
|
||||
(Sandy Ryza via eli)
|
||||
|
||||
HADOOP-8816. HTTP Error 413 full HEAD if using kerberos authentication.
|
||||
(moritzmoeller via tucu)
|
||||
|
||||
HADOOP-9212. Potential deadlock in FileSystem.Cache/IPC/UGI. (tomwhite)
|
||||
|
||||
HADOOP-9193. hadoop script can inadvertently expand wildcard arguments
|
||||
when delegating to hdfs script. (Andy Isaacson via todd)
|
||||
|
||||
HADOOP-9215. when using cmake-2.6, libhadoop.so doesn't get created
|
||||
(only libhadoop.so.1.0.0) (Colin Patrick McCabe via todd)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -1227,6 +1262,21 @@ Release 2.0.0-alpha - 05-23-2012
|
||||
HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via
|
||||
bobby)
|
||||
|
||||
Release 0.23.7 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx
|
||||
permissions (Ivan A. Veselovsky via bobby)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 0.23.6 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -1234,6 +1284,8 @@ Release 0.23.6 - UNRELEASED
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
HADOOP-9217. Print thread dumps when hadoop-common tests fail.
|
||||
(Andrey Klochkov via suresh)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
@ -1250,7 +1302,10 @@ Release 0.23.6 - UNRELEASED
|
||||
|
||||
HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby)
|
||||
|
||||
Release 0.23.5 - UNRELEASED
|
||||
HADOOP-9097. Maven RAT plugin is not checking all source files (tgraves)
|
||||
|
||||
Release 0.23.5 - 2012-11-28
|
||||
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -1,67 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
# contributor license agreements. See the NOTICE file distributed with
|
||||
# this work for additional information regarding copyright ownership.
|
||||
# The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
# (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# This file is used to generate the package-info.java class that
|
||||
# records the version, revision, branch, user, timestamp, and url
|
||||
unset LANG
|
||||
unset LC_CTYPE
|
||||
unset LC_TIME
|
||||
version=$1
|
||||
build_dir=$2
|
||||
user=`whoami | tr '\n\r' '\n'`
|
||||
date=`date`
|
||||
cwd=`pwd`
|
||||
if git rev-parse HEAD 2>/dev/null > /dev/null ; then
|
||||
revision=`git log -1 --pretty=format:"%H"`
|
||||
hostname=`hostname`
|
||||
branch=`git branch | sed -n -e 's/^* //p'`
|
||||
url="git://${hostname}${cwd}"
|
||||
elif [ -d .svn ]; then
|
||||
revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'`
|
||||
url=`svn info | sed -n -e 's/^URL: \(.*\)/\1/p'`
|
||||
# Get canonical branch (branches/X, tags/X, or trunk)
|
||||
branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \
|
||||
-e 's,.*\(tags/.*\)$,\1,p' \
|
||||
-e 's,.*trunk$,trunk,p'`
|
||||
else
|
||||
revision="Unknown"
|
||||
branch="Unknown"
|
||||
url="file://$cwd"
|
||||
fi
|
||||
|
||||
which md5sum > /dev/null
|
||||
if [ "$?" = "0" ] ; then
|
||||
srcChecksum=`find src/main/java -name '*.java' | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1`
|
||||
else
|
||||
srcChecksum="Not Available"
|
||||
fi
|
||||
|
||||
mkdir -p $build_dir/org/apache/hadoop
|
||||
cat << EOF | \
|
||||
sed -e "s/VERSION/$version/" -e "s/USER/$user/" -e "s/DATE/$date/" \
|
||||
-e "s|URL|$url|" -e "s/REV/$revision/" \
|
||||
-e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \
|
||||
> $build_dir/org/apache/hadoop/package-info.java
|
||||
/*
|
||||
* Generated by src/saveVersion.sh
|
||||
*/
|
||||
@HadoopVersionAnnotation(version="VERSION", revision="REV", branch="BRANCH",
|
||||
user="USER", date="DATE", url="URL",
|
||||
srcChecksum="SRCCHECKSUM")
|
||||
package org.apache.hadoop;
|
||||
EOF
|
@ -244,7 +244,51 @@
|
||||
</dependencies>
|
||||
|
||||
<build>
|
||||
<!--
|
||||
Include all files in src/main/resources. By default, do not apply property
|
||||
substitution (filtering=false), but do apply property substitution to
|
||||
common-version-info.properties (filtering=true). This will substitute the
|
||||
version information correctly, but prevent Maven from altering other files
|
||||
like core-default.xml.
|
||||
-->
|
||||
<resources>
|
||||
<resource>
|
||||
<directory>${basedir}/src/main/resources</directory>
|
||||
<excludes>
|
||||
<exclude>common-version-info.properties</exclude>
|
||||
</excludes>
|
||||
<filtering>false</filtering>
|
||||
</resource>
|
||||
<resource>
|
||||
<directory>${basedir}/src/main/resources</directory>
|
||||
<includes>
|
||||
<include>common-version-info.properties</include>
|
||||
</includes>
|
||||
<filtering>true</filtering>
|
||||
</resource>
|
||||
</resources>
|
||||
<plugins>
|
||||
<plugin>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
<artifactId>hadoop-maven-plugins</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>version-info</id>
|
||||
<goals>
|
||||
<goal>version-info</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<source>
|
||||
<directory>${basedir}/src/main</directory>
|
||||
<includes>
|
||||
<include>java/**/*.java</include>
|
||||
<include>proto/**/*.proto</include>
|
||||
</includes>
|
||||
</source>
|
||||
</configuration>
|
||||
</execution>
|
||||
</executions>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
@ -288,22 +332,6 @@
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>save-version</id>
|
||||
<phase>generate-sources</phase>
|
||||
<goals>
|
||||
<goal>run</goal>
|
||||
</goals>
|
||||
<configuration>
|
||||
<target>
|
||||
<mkdir dir="${project.build.directory}/generated-sources/java"/>
|
||||
<exec executable="sh">
|
||||
<arg
|
||||
line="${basedir}/dev-support/saveVersion.sh ${project.version} ${project.build.directory}/generated-sources/java"/>
|
||||
</exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
<execution>
|
||||
<id>generate-test-sources</id>
|
||||
<phase>generate-test-sources</phase>
|
||||
@ -445,13 +473,26 @@
|
||||
<exclude>dev-support/jdiff/**</exclude>
|
||||
<exclude>src/main/native/*</exclude>
|
||||
<exclude>src/main/native/config/*</exclude>
|
||||
<exclude>src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo</exclude>
|
||||
<exclude>src/main/native/m4/*</exclude>
|
||||
<exclude>src/test/empty-file</exclude>
|
||||
<exclude>src/test/all-tests</exclude>
|
||||
<exclude>src/test/resources/kdc/ldif/users.ldif</exclude>
|
||||
<exclude>src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
<plugin>
|
||||
<groupId>org.apache.maven.plugins</groupId>
|
||||
<artifactId>maven-surefire-plugin</artifactId>
|
||||
<configuration>
|
||||
<properties>
|
||||
<property>
|
||||
<name>listener</name>
|
||||
<value>org.apache.hadoop.test.TimedOutTestsListener</value>
|
||||
</property>
|
||||
</properties>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
</build>
|
||||
|
||||
@ -513,6 +554,9 @@
|
||||
<exec executable="make" dir="${project.build.directory}/native" failonerror="true">
|
||||
<arg line="VERBOSE=1"/>
|
||||
</exec>
|
||||
<!-- The second make is a workaround for HADOOP-9215. It can
|
||||
be removed when version 2.6 of cmake is no longer supported . -->
|
||||
<exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef CONFIG_H
|
||||
#define CONFIG_H
|
||||
|
||||
|
@ -58,9 +58,9 @@ case $COMMAND in
|
||||
#try to locate hdfs and if present, delegate to it.
|
||||
shift
|
||||
if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then
|
||||
exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} $*
|
||||
exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
|
||||
elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then
|
||||
exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} $*
|
||||
exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@"
|
||||
else
|
||||
echo "HADOOP_HDFS_HOME not found!"
|
||||
exit 1
|
||||
@ -75,9 +75,9 @@ case $COMMAND in
|
||||
#try to locate mapred and if present, delegate to it.
|
||||
shift
|
||||
if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then
|
||||
exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} $*
|
||||
exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
|
||||
elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then
|
||||
exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} $*
|
||||
exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@"
|
||||
else
|
||||
echo "HADOOP_MAPRED_HOME not found!"
|
||||
exit 1
|
||||
|
@ -0,0 +1,219 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
|
||||
Copyright 2011 The Apache Software Foundation
|
||||
|
||||
Licensed to the Apache Software Foundation (ASF) under one
|
||||
or more contributor license agreements. See the NOTICE file
|
||||
distributed with this work for additional information
|
||||
regarding copyright ownership. The ASF licenses this file
|
||||
to you under the Apache License, Version 2.0 (the
|
||||
"License"); you may not use this file except in compliance
|
||||
with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
<configuration>
|
||||
<property>
|
||||
<name>security.client.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for ClientProtocol, which is used by user code
|
||||
via the DistributedFileSystem.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.client.datanode.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
|
||||
for block recovery.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.datanode.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for DatanodeProtocol, which is used by datanodes to
|
||||
communicate with the namenode.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.inter.datanode.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for InterDatanodeProtocol, the inter-datanode protocol
|
||||
for updating generation timestamp.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.namenode.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for NamenodeProtocol, the protocol used by the secondary
|
||||
namenode to communicate with the namenode.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.admin.operations.protocol.acl</name>
|
||||
<value>${HADOOP_HDFS_USER}</value>
|
||||
<description>ACL for AdminOperationsProtocol. Used for admin commands.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.refresh.usertogroups.mappings.protocol.acl</name>
|
||||
<value>${HADOOP_HDFS_USER}</value>
|
||||
<description>ACL for RefreshUserMappingsProtocol. Used to refresh
|
||||
users mappings. The ACL is a comma-separated list of user and
|
||||
group names. The user and group list is separated by a blank. For
|
||||
e.g. "alice,bob users,wheel". A special value of "*" means all
|
||||
users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.refresh.policy.protocol.acl</name>
|
||||
<value>${HADOOP_HDFS_USER}</value>
|
||||
<description>ACL for RefreshAuthorizationPolicyProtocol, used by the
|
||||
dfsadmin and mradmin commands to refresh the security policy in-effect.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.ha.service.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for HAService protocol used by HAAdmin to manage the
|
||||
active and stand-by states of namenode.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.zkfc.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for access to the ZK Failover Controller
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.qjournal.service.protocol.acl</name>
|
||||
<value>${HADOOP_HDFS_USER}</value>
|
||||
<description>ACL for QJournalProtocol, used by the NN to communicate with
|
||||
JNs when using the QuorumJournalManager for edit logs.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.mrhs.client.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for HSClientProtocol, used by job clients to
|
||||
communciate with the MR History Server job status etc.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<!-- YARN Protocols -->
|
||||
|
||||
<property>
|
||||
<name>security.resourcetracker.protocol.acl</name>
|
||||
<value>${HADOOP_YARN_USER}</value>
|
||||
<description>ACL for ResourceTracker protocol, used by the
|
||||
ResourceManager and NodeManager to communicate with each other.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.admin.protocol.acl</name>
|
||||
<value>${HADOOP_YARN_USER}</value>
|
||||
<description>ACL for RMAdminProtocol, for admin commands.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.client.resourcemanager.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for ClientRMProtocol, used by the ResourceManager
|
||||
and applications submission clients to communicate with each other.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.applicationmaster.resourcemanager.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for AMRMProtocol, used by the ResourceManager
|
||||
and ApplicationMasters to communicate with each other.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.containermanager.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for ContainerManager protocol, used by the NodeManager
|
||||
and ApplicationMasters to communicate with each other.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.resourcelocalizer.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for ResourceLocalizer protocol, used by the NodeManager
|
||||
and ResourceLocalizer to communicate with each other.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.job.task.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for TaskUmbilicalProtocol, used by the map and reduce
|
||||
tasks to communicate with the parent tasktracker.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>security.job.client.protocol.acl</name>
|
||||
<value>*</value>
|
||||
<description>ACL for MRClientProtocol, used by job clients to
|
||||
communciate with the MR ApplicationMaster to query job status etc.
|
||||
The ACL is a comma-separated list of user and group names. The user and
|
||||
group list is separated by a blank. For e.g. "alice,bob users,wheel".
|
||||
A special value of "*" means all users are allowed.</description>
|
||||
</property>
|
||||
|
||||
</configuration>
|
@ -116,22 +116,6 @@
|
||||
<td>ACL for NamenodeProtocol, the protocol used by the secondary
|
||||
namenode to communicate with the namenode.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>security.inter.tracker.protocol.acl</code></td>
|
||||
<td>ACL for InterTrackerProtocol, used by the tasktrackers to
|
||||
communicate with the jobtracker.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>security.job.submission.protocol.acl</code></td>
|
||||
<td>ACL for JobSubmissionProtocol, used by job clients to
|
||||
communciate with the jobtracker for job submission, querying job status
|
||||
etc.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>security.task.umbilical.protocol.acl</code></td>
|
||||
<td>ACL for TaskUmbilicalProtocol, used by the map and reduce
|
||||
tasks to communicate with the parent tasktracker.</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><code>security.refresh.policy.protocol.acl</code></td>
|
||||
<td>ACL for RefreshAuthorizationPolicyProtocol, used by the
|
||||
|
@ -1,74 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop;
|
||||
|
||||
import java.lang.annotation.*;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* A package attribute that captures the version of Hadoop that was compiled.
|
||||
*/
|
||||
@Retention(RetentionPolicy.RUNTIME)
|
||||
@Target(ElementType.PACKAGE)
|
||||
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
|
||||
@InterfaceStability.Unstable
|
||||
public @interface HadoopVersionAnnotation {
|
||||
|
||||
/**
|
||||
* Get the Hadoop version
|
||||
* @return the version string "0.6.3-dev"
|
||||
*/
|
||||
String version();
|
||||
|
||||
/**
|
||||
* Get the username that compiled Hadoop.
|
||||
*/
|
||||
String user();
|
||||
|
||||
/**
|
||||
* Get the date when Hadoop was compiled.
|
||||
* @return the date in unix 'date' format
|
||||
*/
|
||||
String date();
|
||||
|
||||
/**
|
||||
* Get the url for the subversion repository.
|
||||
*/
|
||||
String url();
|
||||
|
||||
/**
|
||||
* Get the subversion revision.
|
||||
* @return the revision number as a string (eg. "451451")
|
||||
*/
|
||||
String revision();
|
||||
|
||||
/**
|
||||
* Get the branch from which this was compiled.
|
||||
* @return The branch name, e.g. "trunk" or "branches/branch-0.20"
|
||||
*/
|
||||
String branch();
|
||||
|
||||
/**
|
||||
* Get a checksum of the source files from which
|
||||
* Hadoop was compiled.
|
||||
* @return a string that uniquely identifies the source
|
||||
**/
|
||||
String srcChecksum();
|
||||
}
|
@ -21,6 +21,7 @@
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.http.lib.StaticUserWebFilter;
|
||||
import org.apache.hadoop.security.authorize.Service;
|
||||
|
||||
/**
|
||||
* This class contains constants for configuration keys used
|
||||
@ -114,7 +115,18 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
||||
SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
|
||||
public static final String
|
||||
SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl";
|
||||
|
||||
public static final String
|
||||
SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl";
|
||||
public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL =
|
||||
"security.client.datanode.protocol.acl";
|
||||
public static final String
|
||||
SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl";
|
||||
public static final String
|
||||
SECURITY_INTER_DATANODE_PROTOCOL_ACL = "security.inter.datanode.protocol.acl";
|
||||
public static final String
|
||||
SECURITY_NAMENODE_PROTOCOL_ACL = "security.namenode.protocol.acl";
|
||||
public static final String SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL =
|
||||
"security.qjournal.service.protocol.acl";
|
||||
public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
|
||||
"hadoop.security.token.service.use_ip";
|
||||
public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
|
||||
@ -191,4 +203,4 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
||||
public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT =
|
||||
4*60*60; // 4 hours
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -173,6 +173,11 @@ public class CommonConfigurationKeysPublic {
|
||||
/** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */
|
||||
public static final int IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String IPC_CLIENT_CONNECT_TIMEOUT_KEY =
|
||||
"ipc.client.connect.timeout";
|
||||
/** Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY */
|
||||
public static final int IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT = 20000; // 20s
|
||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||
public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_KEY =
|
||||
"ipc.client.connect.max.retries";
|
||||
/** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
|
||||
|
@ -87,33 +87,98 @@ public static Path[] stat2Paths(FileStatus[] stats, Path path) {
|
||||
* (4) If dir is a normal directory, then dir and all its contents recursively
|
||||
* are deleted.
|
||||
*/
|
||||
public static boolean fullyDelete(File dir) {
|
||||
if (dir.delete()) {
|
||||
public static boolean fullyDelete(final File dir) {
|
||||
return fullyDelete(dir, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete a directory and all its contents. If
|
||||
* we return false, the directory may be partially-deleted.
|
||||
* (1) If dir is symlink to a file, the symlink is deleted. The file pointed
|
||||
* to by the symlink is not deleted.
|
||||
* (2) If dir is symlink to a directory, symlink is deleted. The directory
|
||||
* pointed to by symlink is not deleted.
|
||||
* (3) If dir is a normal file, it is deleted.
|
||||
* (4) If dir is a normal directory, then dir and all its contents recursively
|
||||
* are deleted.
|
||||
* @param dir the file or directory to be deleted
|
||||
* @param tryGrantPermissions true if permissions should be modified to delete a file.
|
||||
* @return true on success false on failure.
|
||||
*/
|
||||
public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) {
|
||||
if (tryGrantPermissions) {
|
||||
// try to chmod +rwx the parent folder of the 'dir':
|
||||
File parent = dir.getParentFile();
|
||||
grantPermissions(parent);
|
||||
}
|
||||
if (deleteImpl(dir, false)) {
|
||||
// dir is (a) normal file, (b) symlink to a file, (c) empty directory or
|
||||
// (d) symlink to a directory
|
||||
return true;
|
||||
}
|
||||
|
||||
// handle nonempty directory deletion
|
||||
if (!fullyDeleteContents(dir)) {
|
||||
if (!fullyDeleteContents(dir, tryGrantPermissions)) {
|
||||
return false;
|
||||
}
|
||||
return dir.delete();
|
||||
return deleteImpl(dir, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* Pure-Java implementation of "chmod +rwx f".
|
||||
*/
|
||||
private static void grantPermissions(final File f) {
|
||||
f.setExecutable(true);
|
||||
f.setReadable(true);
|
||||
f.setWritable(true);
|
||||
}
|
||||
|
||||
private static boolean deleteImpl(final File f, final boolean doLog) {
|
||||
if (f == null) {
|
||||
LOG.warn("null file argument.");
|
||||
return false;
|
||||
}
|
||||
final boolean wasDeleted = f.delete();
|
||||
if (wasDeleted) {
|
||||
return true;
|
||||
}
|
||||
final boolean ex = f.exists();
|
||||
if (doLog && ex) {
|
||||
LOG.warn("Failed to delete file or dir ["
|
||||
+ f.getAbsolutePath() + "]: it still exists.");
|
||||
}
|
||||
return !ex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the contents of a directory, not the directory itself. If
|
||||
* we return false, the directory may be partially-deleted.
|
||||
* If dir is a symlink to a directory, all the contents of the actual
|
||||
* directory pointed to by dir will be deleted.
|
||||
*/
|
||||
public static boolean fullyDeleteContents(File dir) {
|
||||
public static boolean fullyDeleteContents(final File dir) {
|
||||
return fullyDeleteContents(dir, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete the contents of a directory, not the directory itself. If
|
||||
* we return false, the directory may be partially-deleted.
|
||||
* If dir is a symlink to a directory, all the contents of the actual
|
||||
* directory pointed to by dir will be deleted.
|
||||
* @param tryGrantPermissions if 'true', try grant +rwx permissions to this
|
||||
* and all the underlying directories before trying to delete their contents.
|
||||
*/
|
||||
public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) {
|
||||
if (tryGrantPermissions) {
|
||||
// to be able to list the dir and delete files from it
|
||||
// we must grant the dir rwx permissions:
|
||||
grantPermissions(dir);
|
||||
}
|
||||
boolean deletionSucceeded = true;
|
||||
File contents[] = dir.listFiles();
|
||||
final File[] contents = dir.listFiles();
|
||||
if (contents != null) {
|
||||
for (int i = 0; i < contents.length; i++) {
|
||||
if (contents[i].isFile()) {
|
||||
if (!contents[i].delete()) {// normal file or symlink to another file
|
||||
if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file
|
||||
deletionSucceeded = false;
|
||||
continue; // continue deletion of other files/dirs under dir
|
||||
}
|
||||
@ -121,16 +186,16 @@ public static boolean fullyDeleteContents(File dir) {
|
||||
// Either directory or symlink to another directory.
|
||||
// Try deleting the directory as this might be a symlink
|
||||
boolean b = false;
|
||||
b = contents[i].delete();
|
||||
b = deleteImpl(contents[i], false);
|
||||
if (b){
|
||||
//this was indeed a symlink or an empty directory
|
||||
continue;
|
||||
}
|
||||
// if not an empty directory or symlink let
|
||||
// fullydelete handle it.
|
||||
if (!fullyDelete(contents[i])) {
|
||||
if (!fullyDelete(contents[i], tryGrantPermissions)) {
|
||||
deletionSucceeded = false;
|
||||
continue; // continue deletion of other files/dirs under dir
|
||||
// continue deletion of other files/dirs under dir
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -305,6 +305,7 @@ public static Connector createDefaultChannelConnector() {
|
||||
ret.setAcceptQueueSize(128);
|
||||
ret.setResolveNames(false);
|
||||
ret.setUseDirectBuffers(false);
|
||||
ret.setHeaderBufferSize(1024*64);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ public static List<Class<? extends CompressionCodec>> getCodecClasses(Configurat
|
||||
if (codecsString != null) {
|
||||
StringTokenizer codecSplit = new StringTokenizer(codecsString, ",");
|
||||
while (codecSplit.hasMoreElements()) {
|
||||
String codecSubstring = codecSplit.nextToken();
|
||||
String codecSubstring = codecSplit.nextToken().trim();
|
||||
if (codecSubstring.length() != 0) {
|
||||
try {
|
||||
Class<?> cls = conf.getClassByName(codecSubstring);
|
||||
|
@ -106,6 +106,8 @@ public class Client {
|
||||
|
||||
private SocketFactory socketFactory; // how to create sockets
|
||||
private int refCount = 1;
|
||||
|
||||
private final int connectionTimeout;
|
||||
|
||||
final static int PING_CALL_ID = -1;
|
||||
|
||||
@ -159,7 +161,16 @@ final public static int getTimeout(Configuration conf) {
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* set the connection timeout value in configuration
|
||||
*
|
||||
* @param conf Configuration
|
||||
* @param timeout the socket connect timeout value
|
||||
*/
|
||||
public static final void setConnectTimeout(Configuration conf, int timeout) {
|
||||
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Increment this client's reference count
|
||||
*
|
||||
@ -494,8 +505,7 @@ private synchronized void setupConnection() throws IOException {
|
||||
}
|
||||
}
|
||||
|
||||
// connection time out is 20s
|
||||
NetUtils.connect(this.socket, server, 20000);
|
||||
NetUtils.connect(this.socket, server, connectionTimeout);
|
||||
if (rpcTimeout > 0) {
|
||||
pingInterval = rpcTimeout; // rpcTimeout overwrites pingInterval
|
||||
}
|
||||
@ -1034,6 +1044,8 @@ public Client(Class<? extends Writable> valueClass, Configuration conf,
|
||||
this.valueClass = valueClass;
|
||||
this.conf = conf;
|
||||
this.socketFactory = factory;
|
||||
this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY,
|
||||
CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -25,6 +25,7 @@
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.NoRouteToHostException;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketException;
|
||||
@ -865,4 +866,23 @@ public static List<InetAddress> getIPs(String subnet,
|
||||
}
|
||||
return addrs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a free port number. There is no guarantee it will remain free, so
|
||||
* it should be used immediately.
|
||||
*
|
||||
* @returns A free port for binding a local socket
|
||||
*/
|
||||
public static int getFreeSocketPort() {
|
||||
int port = 0;
|
||||
try {
|
||||
ServerSocket s = new ServerSocket(0);
|
||||
port = s.getLocalPort();
|
||||
s.close();
|
||||
return port;
|
||||
} catch (IOException e) {
|
||||
// Could not get a free port. Return default port 0.
|
||||
}
|
||||
return port;
|
||||
}
|
||||
}
|
||||
|
@ -18,10 +18,13 @@
|
||||
|
||||
package org.apache.hadoop.security;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.DataInput;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutput;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
@ -148,8 +151,32 @@ public static Credentials readTokenStorageFile(Path filename, Configuration conf
|
||||
in.close();
|
||||
return credentials;
|
||||
} catch(IOException ioe) {
|
||||
IOUtils.cleanup(LOG, in);
|
||||
throw new IOException("Exception reading " + filename, ioe);
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, in);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Convenience method for reading a token storage file, and loading the Tokens
|
||||
* therein in the passed UGI
|
||||
* @param filename
|
||||
* @param conf
|
||||
* @throws IOException
|
||||
*/
|
||||
public static Credentials readTokenStorageFile(File filename, Configuration conf)
|
||||
throws IOException {
|
||||
DataInputStream in = null;
|
||||
Credentials credentials = new Credentials();
|
||||
try {
|
||||
in = new DataInputStream(new BufferedInputStream(
|
||||
new FileInputStream(filename)));
|
||||
credentials.readTokenStorageStream(in);
|
||||
return credentials;
|
||||
} catch(IOException ioe) {
|
||||
throw new IOException("Exception reading " + filename, ioe);
|
||||
} finally {
|
||||
IOUtils.cleanup(LOG, in);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN;
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.UndeclaredThrowableException;
|
||||
import java.security.AccessControlContext;
|
||||
@ -656,10 +657,11 @@ static UserGroupInformation getLoginUser() throws IOException {
|
||||
|
||||
String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION);
|
||||
if (fileLocation != null) {
|
||||
// load the token storage file and put all of the tokens into the
|
||||
// user.
|
||||
// Load the token storage file and put all of the tokens into the
|
||||
// user. Don't use the FileSystem API for reading since it has a lock
|
||||
// cycle (HADOOP-9212).
|
||||
Credentials cred = Credentials.readTokenStorageFile(
|
||||
new Path("file:///" + fileLocation), conf);
|
||||
new File(fileLocation), conf);
|
||||
loginUser.addCredentials(cred);
|
||||
}
|
||||
loginUser.spawnAutoRenewalThreadForUserCreds();
|
||||
|
@ -20,41 +20,78 @@
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.HadoopVersionAnnotation;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Properties;
|
||||
|
||||
/**
|
||||
* This class finds the package info for Hadoop and the HadoopVersionAnnotation
|
||||
* information.
|
||||
* This class returns build information about Hadoop components.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public class VersionInfo {
|
||||
private static final Log LOG = LogFactory.getLog(VersionInfo.class);
|
||||
|
||||
private static Package myPackage;
|
||||
private static HadoopVersionAnnotation version;
|
||||
|
||||
static {
|
||||
myPackage = HadoopVersionAnnotation.class.getPackage();
|
||||
version = myPackage.getAnnotation(HadoopVersionAnnotation.class);
|
||||
private Properties info;
|
||||
|
||||
protected VersionInfo(String component) {
|
||||
info = new Properties();
|
||||
String versionInfoFile = component + "-version-info.properties";
|
||||
try {
|
||||
InputStream is = Thread.currentThread().getContextClassLoader()
|
||||
.getResourceAsStream(versionInfoFile);
|
||||
info.load(is);
|
||||
} catch (IOException ex) {
|
||||
LogFactory.getLog(getClass()).warn("Could not read '" +
|
||||
versionInfoFile + "', " + ex.toString(), ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the meta-data for the Hadoop package.
|
||||
* @return
|
||||
*/
|
||||
static Package getPackage() {
|
||||
return myPackage;
|
||||
protected String _getVersion() {
|
||||
return info.getProperty("version", "Unknown");
|
||||
}
|
||||
|
||||
|
||||
protected String _getRevision() {
|
||||
return info.getProperty("revision", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getBranch() {
|
||||
return info.getProperty("branch", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getDate() {
|
||||
return info.getProperty("date", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getUser() {
|
||||
return info.getProperty("user", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getUrl() {
|
||||
return info.getProperty("url", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getSrcChecksum() {
|
||||
return info.getProperty("srcChecksum", "Unknown");
|
||||
}
|
||||
|
||||
protected String _getBuildVersion(){
|
||||
return getVersion() +
|
||||
" from " + _getRevision() +
|
||||
" by " + _getUser() +
|
||||
" source checksum " + _getSrcChecksum();
|
||||
}
|
||||
|
||||
private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
|
||||
/**
|
||||
* Get the Hadoop version.
|
||||
* @return the Hadoop version string, eg. "0.6.3-dev"
|
||||
*/
|
||||
public static String getVersion() {
|
||||
return version != null ? version.version() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getVersion();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -62,7 +99,7 @@ public static String getVersion() {
|
||||
* @return the revision number, eg. "451451"
|
||||
*/
|
||||
public static String getRevision() {
|
||||
return version != null ? version.revision() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getRevision();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -70,7 +107,7 @@ public static String getRevision() {
|
||||
* @return The branch name, e.g. "trunk" or "branches/branch-0.20"
|
||||
*/
|
||||
public static String getBranch() {
|
||||
return version != null ? version.branch() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getBranch();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -78,7 +115,7 @@ public static String getBranch() {
|
||||
* @return the compilation date in unix date format
|
||||
*/
|
||||
public static String getDate() {
|
||||
return version != null ? version.date() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getDate();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -86,14 +123,14 @@ public static String getDate() {
|
||||
* @return the username of the user
|
||||
*/
|
||||
public static String getUser() {
|
||||
return version != null ? version.user() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getUser();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the subversion URL for the root Hadoop directory.
|
||||
*/
|
||||
public static String getUrl() {
|
||||
return version != null ? version.url() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getUrl();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -101,7 +138,7 @@ public static String getUrl() {
|
||||
* built.
|
||||
**/
|
||||
public static String getSrcChecksum() {
|
||||
return version != null ? version.srcChecksum() : "Unknown";
|
||||
return COMMON_VERSION_INFO._getSrcChecksum();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -109,14 +146,11 @@ public static String getSrcChecksum() {
|
||||
* revision, user and date.
|
||||
*/
|
||||
public static String getBuildVersion(){
|
||||
return VersionInfo.getVersion() +
|
||||
" from " + VersionInfo.getRevision() +
|
||||
" by " + VersionInfo.getUser() +
|
||||
" source checksum " + VersionInfo.getSrcChecksum();
|
||||
return COMMON_VERSION_INFO._getBuildVersion();
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
LOG.debug("version: "+ version);
|
||||
LOG.debug("version: "+ getVersion());
|
||||
System.out.println("Hadoop " + getVersion());
|
||||
System.out.println("Subversion " + getUrl() + " -r " + getRevision());
|
||||
System.out.println("Compiled by " + getUser() + " on " + getDate());
|
||||
|
@ -1 +1,14 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.security.AnnotatedSecurityInfo
|
||||
|
@ -0,0 +1,25 @@
|
||||
#
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
version=${pom.version}
|
||||
revision=${version-info.scm.commit}
|
||||
branch=${version-info.scm.branch}
|
||||
user=${user.name}
|
||||
date=${version-info.build.time}
|
||||
url=${version-info.scm.uri}
|
||||
srcChecksum=${version-info.source.md5}
|
@ -80,9 +80,17 @@
|
||||
|
||||
<property>
|
||||
<name>hadoop.security.group.mapping</name>
|
||||
<value>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</value>
|
||||
<value>org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback</value>
|
||||
<description>
|
||||
Class for user to group mapping (get groups for a given user) for ACL
|
||||
Class for user to group mapping (get groups for a given user) for ACL.
|
||||
The default implementation,
|
||||
org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback,
|
||||
will determine if the Java Native Interface (JNI) is available. If JNI is
|
||||
available the implementation will use the API within hadoop to resolve a
|
||||
list of groups for a user. If JNI is not available then the shell
|
||||
implementation, ShellBasedUnixGroupsMapping, is used. This implementation
|
||||
shells out to the Linux/Unix environment with the
|
||||
<code>bash -c groups</code> command to resolve a list of groups for a user.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
@ -565,6 +573,14 @@
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connect.timeout</name>
|
||||
<value>20000</value>
|
||||
<description>Indicates the number of milliseconds a client will wait for the
|
||||
socket to establish a server connection.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>ipc.client.connect.max.retries.on.timeouts</name>
|
||||
<value>45</value>
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -17,6 +17,7 @@
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import org.junit.Before;
|
||||
import java.io.BufferedReader;
|
||||
import java.io.File;
|
||||
import java.io.FileReader;
|
||||
@ -173,12 +174,26 @@ public void testListAPI() throws IOException {
|
||||
//Expected an IOException
|
||||
}
|
||||
}
|
||||
|
||||
@Before
|
||||
public void before() throws IOException {
|
||||
cleanupImpl();
|
||||
}
|
||||
|
||||
@After
|
||||
public void tearDown() throws IOException {
|
||||
FileUtil.fullyDelete(del);
|
||||
FileUtil.fullyDelete(tmp);
|
||||
FileUtil.fullyDelete(partitioned);
|
||||
cleanupImpl();
|
||||
}
|
||||
|
||||
private void cleanupImpl() throws IOException {
|
||||
FileUtil.fullyDelete(del, true);
|
||||
Assert.assertTrue(!del.exists());
|
||||
|
||||
FileUtil.fullyDelete(tmp, true);
|
||||
Assert.assertTrue(!tmp.exists());
|
||||
|
||||
FileUtil.fullyDelete(partitioned, true);
|
||||
Assert.assertTrue(!partitioned.exists());
|
||||
}
|
||||
|
||||
@Test
|
||||
@ -269,12 +284,14 @@ private void validateTmpDir() {
|
||||
Assert.assertTrue(new File(tmp, FILE).exists());
|
||||
}
|
||||
|
||||
private File xSubDir = new File(del, "xsubdir");
|
||||
private File ySubDir = new File(del, "ysubdir");
|
||||
static String file1Name = "file1";
|
||||
private File file2 = new File(xSubDir, "file2");
|
||||
private File file3 = new File(ySubDir, "file3");
|
||||
private File zlink = new File(del, "zlink");
|
||||
private final File xSubDir = new File(del, "xSubDir");
|
||||
private final File xSubSubDir = new File(xSubDir, "xSubSubDir");
|
||||
private final File ySubDir = new File(del, "ySubDir");
|
||||
private static final String file1Name = "file1";
|
||||
private final File file2 = new File(xSubDir, "file2");
|
||||
private final File file22 = new File(xSubSubDir, "file22");
|
||||
private final File file3 = new File(ySubDir, "file3");
|
||||
private final File zlink = new File(del, "zlink");
|
||||
|
||||
/**
|
||||
* Creates a directory which can not be deleted completely.
|
||||
@ -286,10 +303,14 @@ private void validateTmpDir() {
|
||||
* |
|
||||
* .---------------------------------------,
|
||||
* | | | |
|
||||
* file1(!w) xsubdir(-w) ysubdir(+w) zlink
|
||||
* | |
|
||||
* file2 file3
|
||||
*
|
||||
* file1(!w) xSubDir(-rwx) ySubDir(+w) zlink
|
||||
* | | |
|
||||
* | file2(-rwx) file3
|
||||
* |
|
||||
* xSubSubDir(-rwx)
|
||||
* |
|
||||
* file22(-rwx)
|
||||
*
|
||||
* @throws IOException
|
||||
*/
|
||||
private void setupDirsAndNonWritablePermissions() throws IOException {
|
||||
@ -302,7 +323,16 @@ private void setupDirsAndNonWritablePermissions() throws IOException {
|
||||
|
||||
xSubDir.mkdirs();
|
||||
file2.createNewFile();
|
||||
xSubDir.setWritable(false);
|
||||
|
||||
xSubSubDir.mkdirs();
|
||||
file22.createNewFile();
|
||||
|
||||
revokePermissions(file22);
|
||||
revokePermissions(xSubSubDir);
|
||||
|
||||
revokePermissions(file2);
|
||||
revokePermissions(xSubDir);
|
||||
|
||||
ySubDir.mkdirs();
|
||||
file3.createNewFile();
|
||||
|
||||
@ -314,23 +344,43 @@ private void setupDirsAndNonWritablePermissions() throws IOException {
|
||||
FileUtil.symLink(tmpFile.toString(), zlink.toString());
|
||||
}
|
||||
|
||||
private static void grantPermissions(final File f) {
|
||||
f.setReadable(true);
|
||||
f.setWritable(true);
|
||||
f.setExecutable(true);
|
||||
}
|
||||
|
||||
private static void revokePermissions(final File f) {
|
||||
f.setWritable(false);
|
||||
f.setExecutable(false);
|
||||
f.setReadable(false);
|
||||
}
|
||||
|
||||
// Validates the return value.
|
||||
// Validates the existence of directory "xsubdir" and the file "file1"
|
||||
// Sets writable permissions for the non-deleted dir "xsubdir" so that it can
|
||||
// be deleted in tearDown().
|
||||
private void validateAndSetWritablePermissions(boolean ret) {
|
||||
xSubDir.setWritable(true);
|
||||
Assert.assertFalse("The return value should have been false!", ret);
|
||||
Assert.assertTrue("The file file1 should not have been deleted!",
|
||||
// Validates the existence of the file "file1"
|
||||
private void validateAndSetWritablePermissions(
|
||||
final boolean expectedRevokedPermissionDirsExist, final boolean ret) {
|
||||
grantPermissions(xSubDir);
|
||||
grantPermissions(xSubSubDir);
|
||||
|
||||
Assert.assertFalse("The return value should have been false.", ret);
|
||||
Assert.assertTrue("The file file1 should not have been deleted.",
|
||||
new File(del, file1Name).exists());
|
||||
Assert.assertTrue(
|
||||
"The directory xsubdir should not have been deleted!",
|
||||
xSubDir.exists());
|
||||
Assert.assertTrue("The file file2 should not have been deleted!",
|
||||
file2.exists());
|
||||
Assert.assertFalse("The directory ysubdir should have been deleted!",
|
||||
|
||||
Assert.assertEquals(
|
||||
"The directory xSubDir *should* not have been deleted.",
|
||||
expectedRevokedPermissionDirsExist, xSubDir.exists());
|
||||
Assert.assertEquals("The file file2 *should* not have been deleted.",
|
||||
expectedRevokedPermissionDirsExist, file2.exists());
|
||||
Assert.assertEquals(
|
||||
"The directory xSubSubDir *should* not have been deleted.",
|
||||
expectedRevokedPermissionDirsExist, xSubSubDir.exists());
|
||||
Assert.assertEquals("The file file22 *should* not have been deleted.",
|
||||
expectedRevokedPermissionDirsExist, file22.exists());
|
||||
|
||||
Assert.assertFalse("The directory ySubDir should have been deleted.",
|
||||
ySubDir.exists());
|
||||
Assert.assertFalse("The link zlink should have been deleted!",
|
||||
Assert.assertFalse("The link zlink should have been deleted.",
|
||||
zlink.exists());
|
||||
}
|
||||
|
||||
@ -339,7 +389,15 @@ public void testFailFullyDelete() throws IOException {
|
||||
LOG.info("Running test to verify failure of fullyDelete()");
|
||||
setupDirsAndNonWritablePermissions();
|
||||
boolean ret = FileUtil.fullyDelete(new MyFile(del));
|
||||
validateAndSetWritablePermissions(ret);
|
||||
validateAndSetWritablePermissions(true, ret);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFailFullyDeleteGrantPermissions() throws IOException {
|
||||
setupDirsAndNonWritablePermissions();
|
||||
boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
|
||||
// this time the directories with revoked permissions *should* be deleted:
|
||||
validateAndSetWritablePermissions(false, ret);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -388,7 +446,10 @@ public boolean delete() {
|
||||
*/
|
||||
@Override
|
||||
public File[] listFiles() {
|
||||
File[] files = super.listFiles();
|
||||
final File[] files = super.listFiles();
|
||||
if (files == null) {
|
||||
return null;
|
||||
}
|
||||
List<File> filesList = Arrays.asList(files);
|
||||
Collections.sort(filesList);
|
||||
File[] myFiles = new MyFile[files.length];
|
||||
@ -405,9 +466,17 @@ public void testFailFullyDeleteContents() throws IOException {
|
||||
LOG.info("Running test to verify failure of fullyDeleteContents()");
|
||||
setupDirsAndNonWritablePermissions();
|
||||
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
|
||||
validateAndSetWritablePermissions(ret);
|
||||
validateAndSetWritablePermissions(true, ret);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
|
||||
setupDirsAndNonWritablePermissions();
|
||||
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
|
||||
// this time the directories with revoked permissions *should* be deleted:
|
||||
validateAndSetWritablePermissions(false, ret);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCopyMergeSingleDirectory() throws IOException {
|
||||
setupDirs();
|
||||
|
@ -119,6 +119,18 @@ public void doGet(HttpServletRequest request,
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("serial")
|
||||
public static class LongHeaderServlet extends HttpServlet {
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public void doGet(HttpServletRequest request,
|
||||
HttpServletResponse response
|
||||
) throws ServletException, IOException {
|
||||
Assert.assertEquals(63 * 1024, request.getHeader("longheader").length());
|
||||
response.setStatus(HttpServletResponse.SC_OK);
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("serial")
|
||||
public static class HtmlContentServlet extends HttpServlet {
|
||||
@Override
|
||||
@ -139,6 +151,7 @@ public void doGet(HttpServletRequest request,
|
||||
server.addServlet("echo", "/echo", EchoServlet.class);
|
||||
server.addServlet("echomap", "/echomap", EchoMapServlet.class);
|
||||
server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class);
|
||||
server.addServlet("longheader", "/longheader", LongHeaderServlet.class);
|
||||
server.addJerseyResourcePackage(
|
||||
JerseyResource.class.getPackage().getName(), "/jersey/*");
|
||||
server.start();
|
||||
@ -197,6 +210,22 @@ public void run() {
|
||||
readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>")));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that verifies headers can be up to 64K long.
|
||||
* The test adds a 63K header leaving 1K for other headers.
|
||||
* This is because the header buffer setting is for ALL headers,
|
||||
* names and values included. */
|
||||
@Test public void testLongHeader() throws Exception {
|
||||
URL url = new URL(baseUrl, "/longheader");
|
||||
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (int i = 0 ; i < 63 * 1024; i++) {
|
||||
sb.append("a");
|
||||
}
|
||||
conn.setRequestProperty("longheader", sb.toString());
|
||||
assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
|
||||
}
|
||||
|
||||
@Test public void testContentTypes() throws Exception {
|
||||
// Static CSS files should have text/css
|
||||
URL cssUrl = new URL(baseUrl, "/static/test.css");
|
||||
|
@ -256,5 +256,17 @@ public static void testFinding() {
|
||||
checkCodec("overridden factory for .gz", NewGzipCodec.class, codec);
|
||||
codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName());
|
||||
checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec);
|
||||
|
||||
Configuration conf = new Configuration();
|
||||
conf.set("io.compression.codecs",
|
||||
" org.apache.hadoop.io.compress.GzipCodec , " +
|
||||
" org.apache.hadoop.io.compress.DefaultCodec , " +
|
||||
" org.apache.hadoop.io.compress.BZip2Codec ");
|
||||
try {
|
||||
CompressionCodecFactory.getCodecClasses(conf);
|
||||
} catch (IllegalArgumentException e) {
|
||||
fail("IllegalArgumentException is unexpected");
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ private static class MyOptions {
|
||||
private int serverReaderThreads = 1;
|
||||
private int clientThreads = 0;
|
||||
private String host = "0.0.0.0";
|
||||
private int port = 12345;
|
||||
private int port = 0;
|
||||
public int secondsToRun = 15;
|
||||
private int msgSize = 1024;
|
||||
public Class<? extends RpcEngine> rpcEngine =
|
||||
@ -201,11 +201,21 @@ private void processOptions(CommandLine line, Options opts)
|
||||
}
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
if (port == 0) {
|
||||
port = NetUtils.getFreeSocketPort();
|
||||
if (port == 0) {
|
||||
throw new RuntimeException("Could not find a free port");
|
||||
}
|
||||
}
|
||||
return port;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads
|
||||
+ "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads="
|
||||
+ clientThreads + "\nhost=" + host + "\nport=" + port
|
||||
+ clientThreads + "\nhost=" + host + "\nport=" + getPort()
|
||||
+ "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize;
|
||||
}
|
||||
}
|
||||
@ -228,12 +238,12 @@ private Server startServer(MyOptions opts) throws IOException {
|
||||
.newReflectiveBlockingService(serverImpl);
|
||||
|
||||
server = new RPC.Builder(conf).setProtocol(TestRpcService.class)
|
||||
.setInstance(service).setBindAddress(opts.host).setPort(opts.port)
|
||||
.setInstance(service).setBindAddress(opts.host).setPort(opts.getPort())
|
||||
.setNumHandlers(opts.serverThreads).setVerbose(false).build();
|
||||
} else if (opts.rpcEngine == WritableRpcEngine.class) {
|
||||
server = new RPC.Builder(conf).setProtocol(TestProtocol.class)
|
||||
.setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host)
|
||||
.setPort(opts.port).setNumHandlers(opts.serverThreads)
|
||||
.setPort(opts.getPort()).setNumHandlers(opts.serverThreads)
|
||||
.setVerbose(false).build();
|
||||
} else {
|
||||
throw new RuntimeException("Bad engine: " + opts.rpcEngine);
|
||||
@ -378,7 +388,7 @@ private interface RpcServiceWrapper {
|
||||
* Create a client proxy for the specified engine.
|
||||
*/
|
||||
private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException {
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.port);
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort());
|
||||
|
||||
if (opts.rpcEngine == ProtobufRpcEngine.class) {
|
||||
final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf);
|
||||
|
@ -62,7 +62,6 @@ public class TestIPC {
|
||||
final private static Configuration conf = new Configuration();
|
||||
final static private int PING_INTERVAL = 1000;
|
||||
final static private int MIN_SLEEP_TIME = 1000;
|
||||
|
||||
/**
|
||||
* Flag used to turn off the fault injection behavior
|
||||
* of the various writables.
|
||||
@ -499,6 +498,26 @@ public void testIpcTimeout() throws Exception {
|
||||
client.call(new LongWritable(RANDOM.nextLong()),
|
||||
addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIpcConnectTimeout() throws Exception {
|
||||
// start server
|
||||
Server server = new TestServer(1, true);
|
||||
InetSocketAddress addr = NetUtils.getConnectAddress(server);
|
||||
//Intentionally do not start server to get a connection timeout
|
||||
|
||||
// start client
|
||||
Client.setConnectTimeout(conf, 100);
|
||||
Client client = new Client(LongWritable.class, conf);
|
||||
// set the rpc timeout to twice the MIN_SLEEP_TIME
|
||||
try {
|
||||
client.call(new LongWritable(RANDOM.nextLong()),
|
||||
addr, null, null, MIN_SLEEP_TIME*2, conf);
|
||||
fail("Expected an exception to have been thrown");
|
||||
} catch (SocketTimeoutException e) {
|
||||
LOG.info("Get a SocketTimeoutException ", e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check that file descriptors aren't leaked by starting
|
||||
|
@ -1,2 +1,15 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
|
||||
org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
|
||||
|
@ -49,9 +49,6 @@
|
||||
<groupId>org.apache.rat</groupId>
|
||||
<artifactId>apache-rat-plugin</artifactId>
|
||||
<configuration>
|
||||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
@ -66,9 +66,6 @@
|
||||
<groupId>org.apache.rat</groupId>
|
||||
<artifactId>apache-rat-plugin</artifactId>
|
||||
<configuration>
|
||||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
@ -359,6 +359,8 @@
|
||||
<artifactId>apache-rat-plugin</artifactId>
|
||||
<configuration>
|
||||
<excludes>
|
||||
<exclude>src/test/resources/classutils.txt</exclude>
|
||||
<exclude>src/main/conf/httpfs-signature.secret</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
|
@ -29,6 +29,9 @@
|
||||
import javax.servlet.ServletResponse;
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
/**
|
||||
* Filter that resolves the requester hostname.
|
||||
@ -36,6 +39,7 @@
|
||||
@InterfaceAudience.Private
|
||||
public class HostnameFilter implements Filter {
|
||||
static final ThreadLocal<String> HOSTNAME_TL = new ThreadLocal<String>();
|
||||
private static final Logger log = LoggerFactory.getLogger(HostnameFilter.class);
|
||||
|
||||
/**
|
||||
* Initializes the filter.
|
||||
@ -66,7 +70,19 @@ public void init(FilterConfig config) throws ServletException {
|
||||
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
|
||||
throws IOException, ServletException {
|
||||
try {
|
||||
String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName();
|
||||
String hostname;
|
||||
try {
|
||||
String address = request.getRemoteAddr();
|
||||
if (address != null) {
|
||||
hostname = InetAddress.getByName(address).getCanonicalHostName();
|
||||
} else {
|
||||
log.warn("Request remote address is NULL");
|
||||
hostname = "???";
|
||||
}
|
||||
} catch (UnknownHostException ex) {
|
||||
log.warn("Request remote address could not be resolved, {0}", ex.toString(), ex);
|
||||
hostname = "???";
|
||||
}
|
||||
HOSTNAME_TL.set(hostname);
|
||||
chain.doFilter(request, response);
|
||||
} finally {
|
||||
|
@ -64,4 +64,30 @@ public void doFilter(ServletRequest servletRequest, ServletResponse servletRespo
|
||||
filter.destroy();
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMissingHostname() throws Exception {
|
||||
ServletRequest request = Mockito.mock(ServletRequest.class);
|
||||
Mockito.when(request.getRemoteAddr()).thenReturn(null);
|
||||
|
||||
ServletResponse response = Mockito.mock(ServletResponse.class);
|
||||
|
||||
final AtomicBoolean invoked = new AtomicBoolean();
|
||||
|
||||
FilterChain chain = new FilterChain() {
|
||||
@Override
|
||||
public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse)
|
||||
throws IOException, ServletException {
|
||||
assertTrue(HostnameFilter.get().contains("???"));
|
||||
invoked.set(true);
|
||||
}
|
||||
};
|
||||
|
||||
Filter filter = new HostnameFilter();
|
||||
filter.init(null);
|
||||
assertNull(HostnameFilter.get());
|
||||
filter.doFilter(request, response, chain);
|
||||
assertTrue(invoked.get());
|
||||
assertNull(HostnameFilter.get());
|
||||
filter.destroy();
|
||||
}
|
||||
}
|
||||
|
@ -309,6 +309,11 @@ Release 2.0.3-alpha - Unreleased
|
||||
HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null
|
||||
response. (suresh)
|
||||
|
||||
HDFS-4364. GetLinkTargetResponseProto does not handle null path. (suresh)
|
||||
|
||||
HDFS-4369. GetBlockKeysResponseProto does not handle null response.
|
||||
(suresh)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.
|
||||
@ -480,8 +485,22 @@ Release 2.0.3-alpha - Unreleased
|
||||
HDFS-4381. Document fsimage format details in FSImageFormat class javadoc.
|
||||
(Jing Zhao via suresh)
|
||||
|
||||
HDFS-4375. Use token request messages defined in hadoop common.
|
||||
(suresh)
|
||||
|
||||
HDFS-4392. Use NetUtils#getFreeSocketPort in MiniDFSCluster.
|
||||
(Andrew Purtell via suresh)
|
||||
|
||||
HDFS-4393. Make empty request and responses in protocol translators can be
|
||||
static final members. (Brandon Li via suresh)
|
||||
|
||||
HDFS-4403. DFSClient can infer checksum type when not provided by reading
|
||||
first byte (todd)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-3429. DataNode reads checksums even if client does not need them (todd)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever.
|
||||
@ -703,6 +722,12 @@ Release 2.0.3-alpha - Unreleased
|
||||
|
||||
HDFS-1245. Pluggable block id generation. (shv)
|
||||
|
||||
HDFS-4415. HostnameFilter should handle hostname resolution failures and
|
||||
continue processing. (Robert Kanter via atm)
|
||||
|
||||
HDFS-4359. Slow RPC responses from NN can prevent metrics collection on
|
||||
DNs. (liang xie via atm)
|
||||
|
||||
BREAKDOWN OF HDFS-3077 SUBTASKS
|
||||
|
||||
HDFS-3077. Quorum-based protocol for reading and writing edit logs.
|
||||
@ -805,9 +830,12 @@ Release 2.0.3-alpha - Unreleased
|
||||
HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet
|
||||
(Chao Shi via todd)
|
||||
|
||||
HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
|
||||
HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas
|
||||
needs to be updated when avoiding stale nodes. (Andrew Wang via szetszwo)
|
||||
|
||||
HDFS-4399. Fix RAT warnings by excluding images sub-dir in docs. (Thomas
|
||||
Graves via acmurthy)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -2185,6 +2213,18 @@ Release 2.0.0-alpha - 05-23-2012
|
||||
|
||||
HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm)
|
||||
|
||||
Release 0.23.7 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
|
||||
Release 0.23.6 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -2202,7 +2242,12 @@ Release 0.23.6 - UNRELEASED
|
||||
HDFS-4248. Renaming directories may incorrectly remove the paths in leases
|
||||
under the tree. (daryn via szetszwo)
|
||||
|
||||
Release 0.23.5 - UNRELEASED
|
||||
HDFS-4385. Maven RAT plugin is not checking all source files (tgraves)
|
||||
|
||||
HDFS-4426. Secondary namenode shuts down immediately after startup.
|
||||
(Arpit Agarwal via suresh)
|
||||
|
||||
Release 0.23.5 - 2012-11-28
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -242,3 +242,30 @@ For the org.apache.hadoop.util.bloom.* classes:
|
||||
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
* POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
For src/main/native/util/tree.h:
|
||||
|
||||
/*-
|
||||
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*/
|
||||
|
@ -516,9 +516,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<exclude>src/test/resources/data*</exclude>
|
||||
<exclude>src/test/resources/editsStored*</exclude>
|
||||
<exclude>src/test/resources/empty-file</exclude>
|
||||
<exclude>src/main/native/util/tree.h</exclude>
|
||||
<exclude>src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj</exclude>
|
||||
<exclude>src/main/webapps/datanode/robots.txt</exclude>
|
||||
<exclude>src/main/docs/releasenotes.html</exclude>
|
||||
<exclude>src/contrib/**</exclude>
|
||||
<exclude>src/site/resources/images/*</exclude>
|
||||
</excludes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
@ -563,6 +566,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<exec executable="make" dir="${project.build.directory}/native" failonerror="true">
|
||||
<arg line="VERBOSE=1"/>
|
||||
</exec>
|
||||
<!-- The second make is a workaround for HADOOP-9215. It can
|
||||
be removed when version 2.6 of cmake is no longer supported . -->
|
||||
<exec executable="make" dir="${project.build.directory}/native" failonerror="true"></exec>
|
||||
</target>
|
||||
</configuration>
|
||||
</execution>
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef CONFIG_H
|
||||
#define CONFIG_H
|
||||
|
||||
|
@ -92,10 +92,11 @@ There is no provision within HDFS for creating user identities, establishing gro
|
||||
|
||||
<section><title>Group Mapping</title>
|
||||
<p>
|
||||
Once a username has been determined as described above, the list of groups is determined by a <em>group mapping
|
||||
service</em>, configured by the <code>hadoop.security.group.mapping</code> property.
|
||||
The default implementation, <code>org.apache.hadoop.security.ShellBasedUnixGroupsMapping</code>, will shell out
|
||||
to the Unix <code>bash -c groups</code> command to resolve a list of groups for a user.
|
||||
Once a username has been determined as described above, the list of groups is
|
||||
determined by a <em>group mapping service</em>, configured by the
|
||||
<code>hadoop.security.group.mapping</code> property. Refer to the
|
||||
core-default.xml for details of the <code>hadoop.security.group.mapping</code>
|
||||
implementation.
|
||||
</p>
|
||||
<p>
|
||||
An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available
|
||||
|
@ -152,6 +152,7 @@
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenRenewer;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DataChecksum.Type;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
||||
@ -1571,7 +1572,7 @@ public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
|
||||
*/
|
||||
public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
|
||||
checkOpen();
|
||||
return getFileChecksum(src, namenode, socketFactory,
|
||||
return getFileChecksum(src, clientName, namenode, socketFactory,
|
||||
dfsClientConf.socketTimeout, getDataEncryptionKey(),
|
||||
dfsClientConf.connectToDnViaHostname);
|
||||
}
|
||||
@ -1614,9 +1615,16 @@ public DataEncryptionKey getDataEncryptionKey()
|
||||
/**
|
||||
* Get the checksum of a file.
|
||||
* @param src The file path
|
||||
* @param clientName the name of the client requesting the checksum.
|
||||
* @param namenode the RPC proxy for the namenode
|
||||
* @param socketFactory to create sockets to connect to DNs
|
||||
* @param socketTimeout timeout to use when connecting and waiting for a response
|
||||
* @param encryptionKey the key needed to communicate with DNs in this cluster
|
||||
* @param connectToDnViaHostname {@see #connectToDnViaHostname()}
|
||||
* @return The checksum
|
||||
*/
|
||||
public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
String clientName,
|
||||
ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
|
||||
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
|
||||
throws IOException {
|
||||
@ -1651,32 +1659,16 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
final int timeout = 3000 * datanodes.length + socketTimeout;
|
||||
boolean done = false;
|
||||
for(int j = 0; !done && j < datanodes.length; j++) {
|
||||
Socket sock = null;
|
||||
DataOutputStream out = null;
|
||||
DataInputStream in = null;
|
||||
|
||||
try {
|
||||
//connect to a datanode
|
||||
sock = socketFactory.createSocket();
|
||||
String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Connecting to datanode " + dnAddr);
|
||||
}
|
||||
NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
|
||||
sock.setSoTimeout(timeout);
|
||||
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(sock);
|
||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||
if (encryptionKey != null) {
|
||||
IOStreamPair encryptedStreams =
|
||||
DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufOut, unbufIn, encryptionKey);
|
||||
unbufOut = encryptedStreams.out;
|
||||
unbufIn = encryptedStreams.in;
|
||||
}
|
||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
|
||||
encryptionKey, datanodes[j], timeout);
|
||||
out = new DataOutputStream(new BufferedOutputStream(pair.out,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(unbufIn);
|
||||
in = new DataInputStream(pair.in);
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("write to " + datanodes[j] + ": "
|
||||
@ -1689,19 +1681,8 @@ public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
|
||||
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
|
||||
|
||||
if (reply.getStatus() != Status.SUCCESS) {
|
||||
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN
|
||||
&& i > lastRetriedIndex) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
|
||||
+ "for file " + src + " for block " + block
|
||||
+ " from datanode " + datanodes[j]
|
||||
+ ". Will retry the block once.");
|
||||
}
|
||||
lastRetriedIndex = i;
|
||||
done = true; // actually it's not done; but we'll retry
|
||||
i--; // repeat at i-th block
|
||||
refetchBlocks = true;
|
||||
break;
|
||||
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
|
||||
throw new InvalidBlockTokenException();
|
||||
} else {
|
||||
throw new IOException("Bad response " + reply + " for block "
|
||||
+ block + " from datanode " + datanodes[j]);
|
||||
@ -1733,8 +1714,18 @@ else if (bpc != bytesPerCRC) {
|
||||
md5.write(md5out);
|
||||
|
||||
// read crc-type
|
||||
final DataChecksum.Type ct = PBHelper.convert(checksumData
|
||||
.getCrcType());
|
||||
final DataChecksum.Type ct;
|
||||
if (checksumData.hasCrcType()) {
|
||||
ct = PBHelper.convert(checksumData
|
||||
.getCrcType());
|
||||
} else {
|
||||
LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
|
||||
"inferring checksum by reading first byte");
|
||||
ct = inferChecksumTypeByReading(
|
||||
clientName, socketFactory, socketTimeout, lb, datanodes[j],
|
||||
encryptionKey, connectToDnViaHostname);
|
||||
}
|
||||
|
||||
if (i == 0) { // first block
|
||||
crcType = ct;
|
||||
} else if (crcType != DataChecksum.Type.MIXED
|
||||
@ -1752,12 +1743,25 @@ else if (bpc != bytesPerCRC) {
|
||||
}
|
||||
LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
|
||||
}
|
||||
} catch (InvalidBlockTokenException ibte) {
|
||||
if (i > lastRetriedIndex) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
|
||||
+ "for file " + src + " for block " + block
|
||||
+ " from datanode " + datanodes[j]
|
||||
+ ". Will retry the block once.");
|
||||
}
|
||||
lastRetriedIndex = i;
|
||||
done = true; // actually it's not done; but we'll retry
|
||||
i--; // repeat at i-th block
|
||||
refetchBlocks = true;
|
||||
break;
|
||||
}
|
||||
} catch (IOException ie) {
|
||||
LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
|
||||
} finally {
|
||||
IOUtils.closeStream(in);
|
||||
IOUtils.closeStream(out);
|
||||
IOUtils.closeSocket(sock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1789,6 +1793,90 @@ else if (bpc != bytesPerCRC) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to the given datanode's datantrasfer port, and return
|
||||
* the resulting IOStreamPair. This includes encryption wrapping, etc.
|
||||
*/
|
||||
private static IOStreamPair connectToDN(
|
||||
SocketFactory socketFactory, boolean connectToDnViaHostname,
|
||||
DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
|
||||
throws IOException
|
||||
{
|
||||
boolean success = false;
|
||||
Socket sock = null;
|
||||
try {
|
||||
sock = socketFactory.createSocket();
|
||||
String dnAddr = dn.getXferAddr(connectToDnViaHostname);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Connecting to datanode " + dnAddr);
|
||||
}
|
||||
NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
|
||||
sock.setSoTimeout(timeout);
|
||||
|
||||
OutputStream unbufOut = NetUtils.getOutputStream(sock);
|
||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||
IOStreamPair ret;
|
||||
if (encryptionKey != null) {
|
||||
ret = DataTransferEncryptor.getEncryptedStreams(
|
||||
unbufOut, unbufIn, encryptionKey);
|
||||
} else {
|
||||
ret = new IOStreamPair(unbufIn, unbufOut);
|
||||
}
|
||||
success = true;
|
||||
return ret;
|
||||
} finally {
|
||||
if (!success) {
|
||||
IOUtils.closeSocket(sock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Infer the checksum type for a replica by sending an OP_READ_BLOCK
|
||||
* for the first byte of that replica. This is used for compatibility
|
||||
* with older HDFS versions which did not include the checksum type in
|
||||
* OpBlockChecksumResponseProto.
|
||||
*
|
||||
* @param in input stream from datanode
|
||||
* @param out output stream to datanode
|
||||
* @param lb the located block
|
||||
* @param clientName the name of the DFSClient requesting the checksum
|
||||
* @param dn the connected datanode
|
||||
* @return the inferred checksum type
|
||||
* @throws IOException if an error occurs
|
||||
*/
|
||||
private static Type inferChecksumTypeByReading(
|
||||
String clientName, SocketFactory socketFactory, int socketTimeout,
|
||||
LocatedBlock lb, DatanodeInfo dn,
|
||||
DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
|
||||
throws IOException {
|
||||
IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
|
||||
encryptionKey, dn, socketTimeout);
|
||||
|
||||
try {
|
||||
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
DataInputStream in = new DataInputStream(pair.in);
|
||||
|
||||
new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
|
||||
final BlockOpResponseProto reply =
|
||||
BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
|
||||
|
||||
if (reply.getStatus() != Status.SUCCESS) {
|
||||
if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
|
||||
throw new InvalidBlockTokenException();
|
||||
} else {
|
||||
throw new IOException("Bad response " + reply + " trying to read "
|
||||
+ lb.getBlock() + " from datanode " + dn);
|
||||
}
|
||||
}
|
||||
|
||||
return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
|
||||
} finally {
|
||||
IOUtils.cleanup(null, pair.in, pair.out);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set permissions to a file or directory.
|
||||
* @param src path name.
|
||||
|
@ -40,14 +40,18 @@
|
||||
public class HDFSPolicyProvider extends PolicyProvider {
|
||||
private static final Service[] hdfsServices =
|
||||
new Service[] {
|
||||
new Service("security.client.protocol.acl", ClientProtocol.class),
|
||||
new Service("security.client.datanode.protocol.acl",
|
||||
ClientDatanodeProtocol.class),
|
||||
new Service("security.datanode.protocol.acl", DatanodeProtocol.class),
|
||||
new Service("security.inter.datanode.protocol.acl",
|
||||
InterDatanodeProtocol.class),
|
||||
new Service("security.namenode.protocol.acl", NamenodeProtocol.class),
|
||||
new Service("security.qjournal.service.protocol.acl", QJournalProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_CLIENT_PROTOCOL_ACL,
|
||||
ClientProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_CLIENT_DATANODE_PROTOCOL_ACL,
|
||||
ClientDatanodeProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_DATANODE_PROTOCOL_ACL,
|
||||
DatanodeProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_INTER_DATANODE_PROTOCOL_ACL,
|
||||
InterDatanodeProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_NAMENODE_PROTOCOL_ACL,
|
||||
NamenodeProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL,
|
||||
QJournalProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL,
|
||||
HAServiceProtocol.class),
|
||||
new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL,
|
||||
|
@ -380,7 +380,8 @@ public static RemoteBlockReader newBlockReader( Socket sock, String file,
|
||||
// in and out will be closed when sock is closed (by the caller)
|
||||
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||
NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT)));
|
||||
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
|
||||
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
|
||||
verifyChecksum);
|
||||
|
||||
//
|
||||
// Get bytes in block, set streams
|
||||
|
@ -392,7 +392,8 @@ public static BlockReader newBlockReader(Socket sock, String file,
|
||||
// in and out will be closed when sock is closed (by the caller)
|
||||
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||
ioStreams.out));
|
||||
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len);
|
||||
new Sender(out).readBlock(block, blockToken, clientName, startOffset, len,
|
||||
verifyChecksum);
|
||||
|
||||
//
|
||||
// Get bytes in block
|
||||
|
@ -55,12 +55,15 @@ public interface DataTransferProtocol {
|
||||
* @param clientName client's name.
|
||||
* @param blockOffset offset of the block.
|
||||
* @param length maximum number of bytes for this read.
|
||||
* @param sendChecksum if false, the DN should skip reading and sending
|
||||
* checksums
|
||||
*/
|
||||
public void readBlock(final ExtendedBlock blk,
|
||||
final Token<BlockTokenIdentifier> blockToken,
|
||||
final String clientName,
|
||||
final long blockOffset,
|
||||
final long length) throws IOException;
|
||||
final long length,
|
||||
final boolean sendChecksum) throws IOException;
|
||||
|
||||
/**
|
||||
* Write a block to a datanode pipeline.
|
||||
|
@ -88,7 +88,8 @@ private void opReadBlock() throws IOException {
|
||||
PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
|
||||
proto.getHeader().getClientName(),
|
||||
proto.getOffset(),
|
||||
proto.getLen());
|
||||
proto.getLen(),
|
||||
proto.getSendChecksums());
|
||||
}
|
||||
|
||||
/** Receive OP_WRITE_BLOCK */
|
||||
|
@ -62,6 +62,10 @@ private static void op(final DataOutput out, final Op op
|
||||
|
||||
private static void send(final DataOutputStream out, final Op opcode,
|
||||
final Message proto) throws IOException {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName()
|
||||
+ ": " + proto);
|
||||
}
|
||||
op(out, opcode);
|
||||
proto.writeDelimitedTo(out);
|
||||
out.flush();
|
||||
@ -72,12 +76,14 @@ public void readBlock(final ExtendedBlock blk,
|
||||
final Token<BlockTokenIdentifier> blockToken,
|
||||
final String clientName,
|
||||
final long blockOffset,
|
||||
final long length) throws IOException {
|
||||
final long length,
|
||||
final boolean sendChecksum) throws IOException {
|
||||
|
||||
OpReadBlockProto proto = OpReadBlockProto.newBuilder()
|
||||
.setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken))
|
||||
.setOffset(blockOffset)
|
||||
.setLen(length)
|
||||
.setSendChecksums(sendChecksum)
|
||||
.build();
|
||||
|
||||
send(out, Op.READ_BLOCK, proto);
|
||||
|
@ -77,7 +77,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
|
||||
/** RpcController is not used and hence is set to null */
|
||||
private final static RpcController NULL_CONTROLLER = null;
|
||||
private final ClientDatanodeProtocolPB rpcProxy;
|
||||
private final static RefreshNamenodesRequestProto REFRESH_NAMENODES =
|
||||
private final static RefreshNamenodesRequestProto VOID_REFRESH_NAMENODES =
|
||||
RefreshNamenodesRequestProto.newBuilder().build();
|
||||
|
||||
public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
|
||||
@ -170,7 +170,7 @@ public long getReplicaVisibleLength(ExtendedBlock b) throws IOException {
|
||||
@Override
|
||||
public void refreshNamenodes() throws IOException {
|
||||
try {
|
||||
rpcProxy.refreshNamenodes(NULL_CONTROLLER, REFRESH_NAMENODES);
|
||||
rpcProxy.refreshNamenodes(NULL_CONTROLLER, VOID_REFRESH_NAMENODES);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
@ -40,8 +40,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
|
||||
@ -73,8 +71,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||
@ -107,8 +103,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
|
||||
@ -143,6 +137,12 @@
|
||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import com.google.protobuf.RpcController;
|
||||
@ -171,6 +171,78 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
|
||||
static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE =
|
||||
DisallowSnapshotResponseProto.newBuilder().build();
|
||||
|
||||
private static final CreateResponseProto VOID_CREATE_RESPONSE =
|
||||
CreateResponseProto.newBuilder().build();
|
||||
|
||||
private static final AppendResponseProto VOID_APPEND_RESPONSE =
|
||||
AppendResponseProto.newBuilder().build();
|
||||
|
||||
private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE =
|
||||
SetPermissionResponseProto.newBuilder().build();
|
||||
|
||||
private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE =
|
||||
SetOwnerResponseProto.newBuilder().build();
|
||||
|
||||
private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE =
|
||||
AbandonBlockResponseProto.newBuilder().build();
|
||||
|
||||
private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE =
|
||||
ReportBadBlocksResponseProto.newBuilder().build();
|
||||
|
||||
private static final ConcatResponseProto VOID_CONCAT_RESPONSE =
|
||||
ConcatResponseProto.newBuilder().build();
|
||||
|
||||
private static final Rename2ResponseProto VOID_RENAME2_RESPONSE =
|
||||
Rename2ResponseProto.newBuilder().build();
|
||||
|
||||
private static final GetListingResponseProto VOID_GETLISTING_RESPONSE =
|
||||
GetListingResponseProto.newBuilder().build();
|
||||
|
||||
private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE =
|
||||
RenewLeaseResponseProto.newBuilder().build();
|
||||
|
||||
private static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE =
|
||||
SaveNamespaceResponseProto.newBuilder().build();
|
||||
|
||||
private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE =
|
||||
RefreshNodesResponseProto.newBuilder().build();
|
||||
|
||||
private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE =
|
||||
FinalizeUpgradeResponseProto.newBuilder().build();
|
||||
|
||||
private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE =
|
||||
MetaSaveResponseProto.newBuilder().build();
|
||||
|
||||
private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE =
|
||||
GetFileInfoResponseProto.newBuilder().build();
|
||||
|
||||
private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE =
|
||||
GetFileLinkInfoResponseProto.newBuilder().build();
|
||||
|
||||
private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE =
|
||||
SetQuotaResponseProto.newBuilder().build();
|
||||
|
||||
private static final FsyncResponseProto VOID_FSYNC_RESPONSE =
|
||||
FsyncResponseProto.newBuilder().build();
|
||||
|
||||
private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE =
|
||||
SetTimesResponseProto.newBuilder().build();
|
||||
|
||||
private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE =
|
||||
CreateSymlinkResponseProto.newBuilder().build();
|
||||
|
||||
private static final UpdatePipelineResponseProto
|
||||
VOID_UPDATEPIPELINE_RESPONSE =
|
||||
UpdatePipelineResponseProto.newBuilder().build();
|
||||
|
||||
private static final CancelDelegationTokenResponseProto
|
||||
VOID_CANCELDELEGATIONTOKEN_RESPONSE =
|
||||
CancelDelegationTokenResponseProto.newBuilder().build();
|
||||
|
||||
private static final SetBalancerBandwidthResponseProto
|
||||
VOID_SETBALANCERBANDWIDTH_RESPONSE =
|
||||
SetBalancerBandwidthResponseProto.newBuilder().build();
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
@ -215,9 +287,6 @@ public GetServerDefaultsResponseProto getServerDefaults(
|
||||
}
|
||||
|
||||
|
||||
static final CreateResponseProto VOID_CREATE_RESPONSE =
|
||||
CreateResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public CreateResponseProto create(RpcController controller,
|
||||
CreateRequestProto req) throws ServiceException {
|
||||
@ -232,9 +301,6 @@ public CreateResponseProto create(RpcController controller,
|
||||
return VOID_CREATE_RESPONSE;
|
||||
}
|
||||
|
||||
static final AppendResponseProto NULL_APPEND_RESPONSE =
|
||||
AppendResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public AppendResponseProto append(RpcController controller,
|
||||
AppendRequestProto req) throws ServiceException {
|
||||
@ -244,7 +310,7 @@ public AppendResponseProto append(RpcController controller,
|
||||
return AppendResponseProto.newBuilder()
|
||||
.setBlock(PBHelper.convert(result)).build();
|
||||
}
|
||||
return NULL_APPEND_RESPONSE;
|
||||
return VOID_APPEND_RESPONSE;
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
@ -263,9 +329,6 @@ public SetReplicationResponseProto setReplication(RpcController controller,
|
||||
}
|
||||
|
||||
|
||||
static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE =
|
||||
SetPermissionResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public SetPermissionResponseProto setPermission(RpcController controller,
|
||||
SetPermissionRequestProto req) throws ServiceException {
|
||||
@ -277,9 +340,6 @@ public SetPermissionResponseProto setPermission(RpcController controller,
|
||||
return VOID_SET_PERM_RESPONSE;
|
||||
}
|
||||
|
||||
static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE =
|
||||
SetOwnerResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public SetOwnerResponseProto setOwner(RpcController controller,
|
||||
SetOwnerRequestProto req) throws ServiceException {
|
||||
@ -293,9 +353,6 @@ public SetOwnerResponseProto setOwner(RpcController controller,
|
||||
return VOID_SET_OWNER_RESPONSE;
|
||||
}
|
||||
|
||||
static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE =
|
||||
AbandonBlockResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public AbandonBlockResponseProto abandonBlock(RpcController controller,
|
||||
AbandonBlockRequestProto req) throws ServiceException {
|
||||
@ -361,9 +418,6 @@ public CompleteResponseProto complete(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE =
|
||||
ReportBadBlocksResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
|
||||
ReportBadBlocksRequestProto req) throws ServiceException {
|
||||
@ -377,9 +431,6 @@ public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
|
||||
return VOID_REP_BAD_BLOCK_RESPONSE;
|
||||
}
|
||||
|
||||
static final ConcatResponseProto VOID_CONCAT_RESPONSE =
|
||||
ConcatResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public ConcatResponseProto concat(RpcController controller,
|
||||
ConcatRequestProto req) throws ServiceException {
|
||||
@ -403,9 +454,6 @@ public RenameResponseProto rename(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final Rename2ResponseProto VOID_RENAME2_RESPONSE =
|
||||
Rename2ResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public Rename2ResponseProto rename2(RpcController controller,
|
||||
Rename2RequestProto req) throws ServiceException {
|
||||
@ -442,8 +490,6 @@ public MkdirsResponseProto mkdirs(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final GetListingResponseProto NULL_GETLISTING_RESPONSE =
|
||||
GetListingResponseProto.newBuilder().build();
|
||||
@Override
|
||||
public GetListingResponseProto getListing(RpcController controller,
|
||||
GetListingRequestProto req) throws ServiceException {
|
||||
@ -455,16 +501,13 @@ public GetListingResponseProto getListing(RpcController controller,
|
||||
return GetListingResponseProto.newBuilder().setDirList(
|
||||
PBHelper.convert(result)).build();
|
||||
} else {
|
||||
return NULL_GETLISTING_RESPONSE;
|
||||
return VOID_GETLISTING_RESPONSE;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE =
|
||||
RenewLeaseResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public RenewLeaseResponseProto renewLease(RpcController controller,
|
||||
RenewLeaseRequestProto req) throws ServiceException {
|
||||
@ -549,9 +592,6 @@ public SetSafeModeResponseProto setSafeMode(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE =
|
||||
SaveNamespaceResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public SaveNamespaceResponseProto saveNamespace(RpcController controller,
|
||||
SaveNamespaceRequestProto req) throws ServiceException {
|
||||
@ -578,9 +618,6 @@ public RollEditsResponseProto rollEdits(RpcController controller,
|
||||
}
|
||||
|
||||
|
||||
static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE =
|
||||
RefreshNodesResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public RefreshNodesResponseProto refreshNodes(RpcController controller,
|
||||
RefreshNodesRequestProto req) throws ServiceException {
|
||||
@ -593,9 +630,6 @@ public RefreshNodesResponseProto refreshNodes(RpcController controller,
|
||||
|
||||
}
|
||||
|
||||
static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE =
|
||||
FinalizeUpgradeResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller,
|
||||
FinalizeUpgradeRequestProto req) throws ServiceException {
|
||||
@ -622,9 +656,6 @@ public ListCorruptFileBlocksResponseProto listCorruptFileBlocks(
|
||||
}
|
||||
}
|
||||
|
||||
static final MetaSaveResponseProto VOID_METASAVE_RESPONSE =
|
||||
MetaSaveResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public MetaSaveResponseProto metaSave(RpcController controller,
|
||||
MetaSaveRequestProto req) throws ServiceException {
|
||||
@ -637,8 +668,6 @@ public MetaSaveResponseProto metaSave(RpcController controller,
|
||||
|
||||
}
|
||||
|
||||
static final GetFileInfoResponseProto NULL_GETFILEINFO_RESPONSE =
|
||||
GetFileInfoResponseProto.newBuilder().build();
|
||||
@Override
|
||||
public GetFileInfoResponseProto getFileInfo(RpcController controller,
|
||||
GetFileInfoRequestProto req) throws ServiceException {
|
||||
@ -649,14 +678,12 @@ public GetFileInfoResponseProto getFileInfo(RpcController controller,
|
||||
return GetFileInfoResponseProto.newBuilder().setFs(
|
||||
PBHelper.convert(result)).build();
|
||||
}
|
||||
return NULL_GETFILEINFO_RESPONSE;
|
||||
return VOID_GETFILEINFO_RESPONSE;
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
static final GetFileLinkInfoResponseProto NULL_GETFILELINKINFO_RESPONSE =
|
||||
GetFileLinkInfoResponseProto.newBuilder().build();
|
||||
@Override
|
||||
public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
|
||||
GetFileLinkInfoRequestProto req) throws ServiceException {
|
||||
@ -668,7 +695,7 @@ public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
|
||||
PBHelper.convert(result)).build();
|
||||
} else {
|
||||
System.out.println("got null result for getFileLinkInfo for " + req.getSrc());
|
||||
return NULL_GETFILELINKINFO_RESPONSE;
|
||||
return VOID_GETFILELINKINFO_RESPONSE;
|
||||
}
|
||||
|
||||
} catch (IOException e) {
|
||||
@ -689,9 +716,6 @@ public GetContentSummaryResponseProto getContentSummary(
|
||||
}
|
||||
}
|
||||
|
||||
static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE =
|
||||
SetQuotaResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public SetQuotaResponseProto setQuota(RpcController controller,
|
||||
SetQuotaRequestProto req) throws ServiceException {
|
||||
@ -704,9 +728,6 @@ public SetQuotaResponseProto setQuota(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final FsyncResponseProto VOID_FSYNC_RESPONSE =
|
||||
FsyncResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public FsyncResponseProto fsync(RpcController controller,
|
||||
FsyncRequestProto req) throws ServiceException {
|
||||
@ -718,9 +739,6 @@ public FsyncResponseProto fsync(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final SetTimesResponseProto VOID_SETTIMES_RESPONSE =
|
||||
SetTimesResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public SetTimesResponseProto setTimes(RpcController controller,
|
||||
SetTimesRequestProto req) throws ServiceException {
|
||||
@ -732,9 +750,6 @@ public SetTimesResponseProto setTimes(RpcController controller,
|
||||
}
|
||||
}
|
||||
|
||||
static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE =
|
||||
CreateSymlinkResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public CreateSymlinkResponseProto createSymlink(RpcController controller,
|
||||
CreateSymlinkRequestProto req) throws ServiceException {
|
||||
@ -752,8 +767,12 @@ public GetLinkTargetResponseProto getLinkTarget(RpcController controller,
|
||||
GetLinkTargetRequestProto req) throws ServiceException {
|
||||
try {
|
||||
String result = server.getLinkTarget(req.getPath());
|
||||
return GetLinkTargetResponseProto.newBuilder().setTargetPath(result)
|
||||
.build();
|
||||
GetLinkTargetResponseProto.Builder builder = GetLinkTargetResponseProto
|
||||
.newBuilder();
|
||||
if (result != null) {
|
||||
builder.setTargetPath(result);
|
||||
}
|
||||
return builder.build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
@ -774,9 +793,6 @@ public UpdateBlockForPipelineResponseProto updateBlockForPipeline(
|
||||
}
|
||||
}
|
||||
|
||||
static final UpdatePipelineResponseProto VOID_UPDATEPIPELINE_RESPONSE =
|
||||
UpdatePipelineResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public UpdatePipelineResponseProto updatePipeline(RpcController controller,
|
||||
UpdatePipelineRequestProto req) throws ServiceException {
|
||||
@ -818,16 +834,12 @@ public RenewDelegationTokenResponseProto renewDelegationToken(
|
||||
long result = server.renewDelegationToken(PBHelper
|
||||
.convertDelegationToken(req.getToken()));
|
||||
return RenewDelegationTokenResponseProto.newBuilder()
|
||||
.setNewExireTime(result).build();
|
||||
.setNewExpiryTime(result).build();
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
}
|
||||
|
||||
static final CancelDelegationTokenResponseProto
|
||||
VOID_CANCELDELEGATIONTOKEN_RESPONSE =
|
||||
CancelDelegationTokenResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public CancelDelegationTokenResponseProto cancelDelegationToken(
|
||||
RpcController controller, CancelDelegationTokenRequestProto req)
|
||||
@ -841,10 +853,6 @@ public CancelDelegationTokenResponseProto cancelDelegationToken(
|
||||
}
|
||||
}
|
||||
|
||||
static final SetBalancerBandwidthResponseProto
|
||||
VOID_SETBALANCERBANDWIDTH_RESPONSE =
|
||||
SetBalancerBandwidthResponseProto.newBuilder().build();
|
||||
|
||||
@Override
|
||||
public SetBalancerBandwidthResponseProto setBalancerBandwidth(
|
||||
RpcController controller, SetBalancerBandwidthRequestProto req)
|
||||
|
@ -52,7 +52,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto;
|
||||
@ -70,14 +69,13 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
|
||||
@ -92,7 +90,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto;
|
||||
@ -120,6 +117,10 @@
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.RpcClientUtil;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
@ -136,6 +137,29 @@ public class ClientNamenodeProtocolTranslatorPB implements
|
||||
ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator {
|
||||
final private ClientNamenodeProtocolPB rpcProxy;
|
||||
|
||||
static final GetServerDefaultsRequestProto VOID_GET_SERVER_DEFAULT_REQUEST =
|
||||
GetServerDefaultsRequestProto.newBuilder().build();
|
||||
|
||||
private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST =
|
||||
GetFsStatusRequestProto.newBuilder().build();
|
||||
|
||||
private final static SaveNamespaceRequestProto VOID_SAVE_NAMESPACE_REQUEST =
|
||||
SaveNamespaceRequestProto.newBuilder().build();
|
||||
|
||||
private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST =
|
||||
RollEditsRequestProto.getDefaultInstance();
|
||||
|
||||
private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST =
|
||||
RefreshNodesRequestProto.newBuilder().build();
|
||||
|
||||
private final static FinalizeUpgradeRequestProto
|
||||
VOID_FINALIZE_UPGRADE_REQUEST =
|
||||
FinalizeUpgradeRequestProto.newBuilder().build();
|
||||
|
||||
private final static GetDataEncryptionKeyRequestProto
|
||||
VOID_GET_DATA_ENCRYPTIONKEY_REQUEST =
|
||||
GetDataEncryptionKeyRequestProto.newBuilder().build();
|
||||
|
||||
public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) {
|
||||
rpcProxy = proxy;
|
||||
}
|
||||
@ -167,7 +191,7 @@ public LocatedBlocks getBlockLocations(String src, long offset, long length)
|
||||
|
||||
@Override
|
||||
public FsServerDefaults getServerDefaults() throws IOException {
|
||||
GetServerDefaultsRequestProto req = GetServerDefaultsRequestProto.newBuilder().build();
|
||||
GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
|
||||
try {
|
||||
return PBHelper
|
||||
.convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
|
||||
@ -480,9 +504,9 @@ public boolean recoverLease(String src, String clientName)
|
||||
|
||||
@Override
|
||||
public long[] getStats() throws IOException {
|
||||
GetFsStatusRequestProto req = GetFsStatusRequestProto.newBuilder().build();
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.getFsStats(null, req));
|
||||
return PBHelper.convert(rpcProxy.getFsStats(null,
|
||||
VOID_GET_FSSTATUS_REQUEST));
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -529,10 +553,8 @@ public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOEx
|
||||
|
||||
@Override
|
||||
public void saveNamespace() throws AccessControlException, IOException {
|
||||
SaveNamespaceRequestProto req = SaveNamespaceRequestProto.newBuilder()
|
||||
.build();
|
||||
try {
|
||||
rpcProxy.saveNamespace(null, req);
|
||||
rpcProxy.saveNamespace(null, VOID_SAVE_NAMESPACE_REQUEST);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -540,9 +562,9 @@ public void saveNamespace() throws AccessControlException, IOException {
|
||||
|
||||
@Override
|
||||
public long rollEdits() throws AccessControlException, IOException {
|
||||
RollEditsRequestProto req = RollEditsRequestProto.getDefaultInstance();
|
||||
try {
|
||||
RollEditsResponseProto resp = rpcProxy.rollEdits(null, req);
|
||||
RollEditsResponseProto resp = rpcProxy.rollEdits(null,
|
||||
VOID_ROLLEDITS_REQUEST);
|
||||
return resp.getNewSegmentTxId();
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufHelper.getRemoteException(se);
|
||||
@ -564,9 +586,8 @@ public boolean restoreFailedStorage(String arg)
|
||||
|
||||
@Override
|
||||
public void refreshNodes() throws IOException {
|
||||
RefreshNodesRequestProto req = RefreshNodesRequestProto.newBuilder().build();
|
||||
try {
|
||||
rpcProxy.refreshNodes(null, req);
|
||||
rpcProxy.refreshNodes(null, VOID_REFRESH_NODES_REQUEST);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -574,9 +595,8 @@ public void refreshNodes() throws IOException {
|
||||
|
||||
@Override
|
||||
public void finalizeUpgrade() throws IOException {
|
||||
FinalizeUpgradeRequestProto req = FinalizeUpgradeRequestProto.newBuilder().build();
|
||||
try {
|
||||
rpcProxy.finalizeUpgrade(null, req);
|
||||
rpcProxy.finalizeUpgrade(null, VOID_FINALIZE_UPGRADE_REQUEST);
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -722,7 +742,8 @@ public String getLinkTarget(String path) throws AccessControlException,
|
||||
GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder()
|
||||
.setPath(path).build();
|
||||
try {
|
||||
return rpcProxy.getLinkTarget(null, req).getTargetPath();
|
||||
GetLinkTargetResponseProto rsp = rpcProxy.getLinkTarget(null, req);
|
||||
return rsp.hasTargetPath() ? rsp.getTargetPath() : null;
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -783,7 +804,7 @@ public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
|
||||
setToken(PBHelper.convert(token)).
|
||||
build();
|
||||
try {
|
||||
return rpcProxy.renewDelegationToken(null, req).getNewExireTime();
|
||||
return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime();
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -824,12 +845,10 @@ public boolean isMethodSupported(String methodName) throws IOException {
|
||||
|
||||
@Override
|
||||
public DataEncryptionKey getDataEncryptionKey() throws IOException {
|
||||
GetDataEncryptionKeyRequestProto req = GetDataEncryptionKeyRequestProto
|
||||
.newBuilder().build();
|
||||
try {
|
||||
GetDataEncryptionKeyResponseProto rsp =
|
||||
rpcProxy.getDataEncryptionKey(null, req);
|
||||
return rsp.hasDataEncryptionKey() ?
|
||||
GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey(
|
||||
null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST);
|
||||
return rsp.hasDataEncryptionKey() ?
|
||||
PBHelper.convert(rsp.getDataEncryptionKey()) : null;
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
|
@ -84,7 +84,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
|
||||
|
||||
/** RpcController is not used and hence is set to null */
|
||||
private final DatanodeProtocolPB rpcProxy;
|
||||
private static final VersionRequestProto VERSION_REQUEST =
|
||||
private static final VersionRequestProto VOID_VERSION_REQUEST =
|
||||
VersionRequestProto.newBuilder().build();
|
||||
private final static RpcController NULL_CONTROLLER = null;
|
||||
|
||||
@ -243,7 +243,7 @@ public void errorReport(DatanodeRegistration registration, int errorCode,
|
||||
public NamespaceInfo versionRequest() throws IOException {
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
|
||||
VERSION_REQUEST).getInfo());
|
||||
VOID_VERSION_REQUEST).getInfo());
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
@ -62,15 +62,17 @@ public class DatanodeProtocolServerSideTranslatorPB implements
|
||||
DatanodeProtocolPB {
|
||||
|
||||
private final DatanodeProtocol impl;
|
||||
private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO =
|
||||
ErrorReportResponseProto.newBuilder().build();
|
||||
private static final ErrorReportResponseProto
|
||||
VOID_ERROR_REPORT_RESPONSE_PROTO =
|
||||
ErrorReportResponseProto.newBuilder().build();
|
||||
private static final BlockReceivedAndDeletedResponseProto
|
||||
BLOCK_RECEIVED_AND_DELETE_RESPONSE =
|
||||
VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE =
|
||||
BlockReceivedAndDeletedResponseProto.newBuilder().build();
|
||||
private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE =
|
||||
ReportBadBlocksResponseProto.newBuilder().build();
|
||||
private static final ReportBadBlocksResponseProto
|
||||
VOID_REPORT_BAD_BLOCK_RESPONSE =
|
||||
ReportBadBlocksResponseProto.newBuilder().build();
|
||||
private static final CommitBlockSynchronizationResponseProto
|
||||
COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
|
||||
VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO =
|
||||
CommitBlockSynchronizationResponseProto.newBuilder().build();
|
||||
|
||||
public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) {
|
||||
@ -180,7 +182,7 @@ public BlockReceivedAndDeletedResponseProto blockReceivedAndDeleted(
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return BLOCK_RECEIVED_AND_DELETE_RESPONSE;
|
||||
return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -192,7 +194,7 @@ public ErrorReportResponseProto errorReport(RpcController controller,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return ERROR_REPORT_RESPONSE_PROTO;
|
||||
return VOID_ERROR_REPORT_RESPONSE_PROTO;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -221,7 +223,7 @@ public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return REPORT_BAD_BLOCK_RESPONSE;
|
||||
return VOID_REPORT_BAD_BLOCK_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -242,6 +244,6 @@ public CommitBlockSynchronizationResponseProto commitBlockSynchronization(
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
|
||||
return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO;
|
||||
}
|
||||
}
|
||||
|
@ -42,6 +42,13 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB
|
||||
/** Server side implementation to delegate the requests to */
|
||||
private final JournalProtocol impl;
|
||||
|
||||
private final static JournalResponseProto VOID_JOURNAL_RESPONSE =
|
||||
JournalResponseProto.newBuilder().build();
|
||||
|
||||
private final static StartLogSegmentResponseProto
|
||||
VOID_START_LOG_SEGMENT_RESPONSE =
|
||||
StartLogSegmentResponseProto.newBuilder().build();
|
||||
|
||||
public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) {
|
||||
this.impl = impl;
|
||||
}
|
||||
@ -56,7 +63,7 @@ public JournalResponseProto journal(RpcController unused,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return JournalResponseProto.newBuilder().build();
|
||||
return VOID_JOURNAL_RESPONSE;
|
||||
}
|
||||
|
||||
/** @see JournalProtocol#startLogSegment */
|
||||
@ -69,7 +76,7 @@ public StartLogSegmentResponseProto startLogSegment(RpcController controller,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return StartLogSegmentResponseProto.newBuilder().build();
|
||||
return VOID_START_LOG_SEGMENT_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -63,6 +63,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements
|
||||
NamenodeProtocolPB {
|
||||
private final NamenodeProtocol impl;
|
||||
|
||||
private final static ErrorReportResponseProto VOID_ERROR_REPORT_RESPONSE =
|
||||
ErrorReportResponseProto.newBuilder().build();
|
||||
|
||||
private final static EndCheckpointResponseProto VOID_END_CHECKPOINT_RESPONSE =
|
||||
EndCheckpointResponseProto.newBuilder().build();
|
||||
|
||||
public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) {
|
||||
this.impl = impl;
|
||||
}
|
||||
@ -91,8 +97,12 @@ public GetBlockKeysResponseProto getBlockKeys(RpcController unused,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return GetBlockKeysResponseProto.newBuilder()
|
||||
.setKeys(PBHelper.convert(keys)).build();
|
||||
GetBlockKeysResponseProto.Builder builder =
|
||||
GetBlockKeysResponseProto.newBuilder();
|
||||
if (keys != null) {
|
||||
builder.setKeys(PBHelper.convert(keys));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -143,7 +153,7 @@ public ErrorReportResponseProto errorReport(RpcController unused,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return ErrorReportResponseProto.newBuilder().build();
|
||||
return VOID_ERROR_REPORT_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -181,7 +191,7 @@ public EndCheckpointResponseProto endCheckpoint(RpcController unused,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return EndCheckpointResponseProto.newBuilder().build();
|
||||
return VOID_END_CHECKPOINT_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -29,6 +29,7 @@
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
|
||||
@ -67,13 +68,13 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
|
||||
/*
|
||||
* Protobuf requests with no parameters instantiated only once
|
||||
*/
|
||||
private static final GetBlockKeysRequestProto GET_BLOCKKEYS =
|
||||
private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST =
|
||||
GetBlockKeysRequestProto.newBuilder().build();
|
||||
private static final GetTransactionIdRequestProto GET_TRANSACTIONID =
|
||||
private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST =
|
||||
GetTransactionIdRequestProto.newBuilder().build();
|
||||
private static final RollEditLogRequestProto ROLL_EDITLOG =
|
||||
private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST =
|
||||
RollEditLogRequestProto.newBuilder().build();
|
||||
private static final VersionRequestProto VERSION_REQUEST =
|
||||
private static final VersionRequestProto VOID_VERSION_REQUEST =
|
||||
VersionRequestProto.newBuilder().build();
|
||||
|
||||
final private NamenodeProtocolPB rpcProxy;
|
||||
@ -104,8 +105,9 @@ public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
|
||||
@Override
|
||||
public ExportedBlockKeys getBlockKeys() throws IOException {
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER,
|
||||
GET_BLOCKKEYS).getKeys());
|
||||
GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER,
|
||||
VOID_GET_BLOCKKEYS_REQUEST);
|
||||
return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null;
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -114,8 +116,8 @@ public ExportedBlockKeys getBlockKeys() throws IOException {
|
||||
@Override
|
||||
public long getTransactionID() throws IOException {
|
||||
try {
|
||||
return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID)
|
||||
.getTxId();
|
||||
return rpcProxy.getTransactionId(NULL_CONTROLLER,
|
||||
VOID_GET_TRANSACTIONID_REQUEST).getTxId();
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -135,7 +137,7 @@ public long getMostRecentCheckpointTxId() throws IOException {
|
||||
public CheckpointSignature rollEditLog() throws IOException {
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER,
|
||||
ROLL_EDITLOG).getSignature());
|
||||
VOID_ROLL_EDITLOG_REQUEST).getSignature());
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
@ -145,7 +147,7 @@ public CheckpointSignature rollEditLog() throws IOException {
|
||||
public NamespaceInfo versionRequest() throws IOException {
|
||||
try {
|
||||
return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER,
|
||||
VERSION_REQUEST).getInfo());
|
||||
VOID_VERSION_REQUEST).getInfo());
|
||||
} catch (ServiceException e) {
|
||||
throw ProtobufHelper.getRemoteException(e);
|
||||
}
|
||||
|
@ -38,6 +38,10 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements
|
||||
private final static RpcController NULL_CONTROLLER = null;
|
||||
private final RefreshAuthorizationPolicyProtocolPB rpcProxy;
|
||||
|
||||
private final static RefreshServiceAclRequestProto
|
||||
VOID_REFRESH_SERVICE_ACL_REQUEST =
|
||||
RefreshServiceAclRequestProto.newBuilder().build();
|
||||
|
||||
public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(
|
||||
RefreshAuthorizationPolicyProtocolPB rpcProxy) {
|
||||
this.rpcProxy = rpcProxy;
|
||||
@ -50,10 +54,9 @@ public void close() throws IOException {
|
||||
|
||||
@Override
|
||||
public void refreshServiceAcl() throws IOException {
|
||||
RefreshServiceAclRequestProto request = RefreshServiceAclRequestProto
|
||||
.newBuilder().build();
|
||||
try {
|
||||
rpcProxy.refreshServiceAcl(NULL_CONTROLLER, request);
|
||||
rpcProxy.refreshServiceAcl(NULL_CONTROLLER,
|
||||
VOID_REFRESH_SERVICE_ACL_REQUEST);
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufHelper.getRemoteException(se);
|
||||
}
|
||||
|
@ -32,6 +32,10 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements
|
||||
|
||||
private final RefreshAuthorizationPolicyProtocol impl;
|
||||
|
||||
private final static RefreshServiceAclResponseProto
|
||||
VOID_REFRESH_SERVICE_ACL_RESPONSE = RefreshServiceAclResponseProto
|
||||
.newBuilder().build();
|
||||
|
||||
public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB(
|
||||
RefreshAuthorizationPolicyProtocol impl) {
|
||||
this.impl = impl;
|
||||
@ -46,6 +50,6 @@ public RefreshServiceAclResponseProto refreshServiceAcl(
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return RefreshServiceAclResponseProto.newBuilder().build();
|
||||
return VOID_REFRESH_SERVICE_ACL_RESPONSE;
|
||||
}
|
||||
}
|
||||
|
@ -39,6 +39,14 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements
|
||||
private final static RpcController NULL_CONTROLLER = null;
|
||||
private final RefreshUserMappingsProtocolPB rpcProxy;
|
||||
|
||||
private final static RefreshUserToGroupsMappingsRequestProto
|
||||
VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST =
|
||||
RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
|
||||
|
||||
private final static RefreshSuperUserGroupsConfigurationRequestProto
|
||||
VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST =
|
||||
RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
|
||||
|
||||
public RefreshUserMappingsProtocolClientSideTranslatorPB(
|
||||
RefreshUserMappingsProtocolPB rpcProxy) {
|
||||
this.rpcProxy = rpcProxy;
|
||||
@ -51,10 +59,9 @@ public void close() throws IOException {
|
||||
|
||||
@Override
|
||||
public void refreshUserToGroupsMappings() throws IOException {
|
||||
RefreshUserToGroupsMappingsRequestProto request =
|
||||
RefreshUserToGroupsMappingsRequestProto.newBuilder().build();
|
||||
try {
|
||||
rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER, request);
|
||||
rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER,
|
||||
VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST);
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufHelper.getRemoteException(se);
|
||||
}
|
||||
@ -62,10 +69,9 @@ public void refreshUserToGroupsMappings() throws IOException {
|
||||
|
||||
@Override
|
||||
public void refreshSuperUserGroupsConfiguration() throws IOException {
|
||||
RefreshSuperUserGroupsConfigurationRequestProto request =
|
||||
RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build();
|
||||
try {
|
||||
rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER, request);
|
||||
rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER,
|
||||
VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST);
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufHelper.getRemoteException(se);
|
||||
}
|
||||
|
@ -33,6 +33,15 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres
|
||||
|
||||
private final RefreshUserMappingsProtocol impl;
|
||||
|
||||
private final static RefreshUserToGroupsMappingsResponseProto
|
||||
VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE =
|
||||
RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
|
||||
|
||||
private final static RefreshSuperUserGroupsConfigurationResponseProto
|
||||
VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE =
|
||||
RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
|
||||
.build();
|
||||
|
||||
public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) {
|
||||
this.impl = impl;
|
||||
}
|
||||
@ -47,7 +56,7 @@ public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProt
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return RefreshUserToGroupsMappingsResponseProto.newBuilder().build();
|
||||
return VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -60,7 +69,6 @@ public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProt
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder()
|
||||
.build();
|
||||
return VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE;
|
||||
}
|
||||
}
|
||||
|
@ -65,6 +65,13 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP
|
||||
/** Server side implementation to delegate the requests to */
|
||||
private final QJournalProtocol impl;
|
||||
|
||||
private final static JournalResponseProto VOID_JOURNAL_RESPONSE =
|
||||
JournalResponseProto.newBuilder().build();
|
||||
|
||||
private final static StartLogSegmentResponseProto
|
||||
VOID_START_LOG_SEGMENT_RESPONSE =
|
||||
StartLogSegmentResponseProto.newBuilder().build();
|
||||
|
||||
public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) {
|
||||
this.impl = impl;
|
||||
}
|
||||
@ -135,7 +142,7 @@ public JournalResponseProto journal(RpcController unused,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return JournalResponseProto.newBuilder().build();
|
||||
return VOID_JOURNAL_RESPONSE;
|
||||
}
|
||||
|
||||
/** @see JournalProtocol#heartbeat */
|
||||
@ -160,7 +167,7 @@ public StartLogSegmentResponseProto startLogSegment(RpcController controller,
|
||||
} catch (IOException e) {
|
||||
throw new ServiceException(e);
|
||||
}
|
||||
return StartLogSegmentResponseProto.newBuilder().build();
|
||||
return VOID_START_LOG_SEGMENT_RESPONSE;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -395,7 +395,7 @@ synchronized DatanodeProtocolClientSideTranslatorPB getActiveNN() {
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
synchronized List<BPServiceActor> getBPServiceActors() {
|
||||
List<BPServiceActor> getBPServiceActors() {
|
||||
return Lists.newArrayList(bpServices);
|
||||
}
|
||||
|
||||
|
@ -388,8 +388,8 @@ void verifyBlock(ExtendedBlock block) {
|
||||
try {
|
||||
adjustThrottler();
|
||||
|
||||
blockSender = new BlockSender(block, 0, -1, false, true, datanode,
|
||||
null);
|
||||
blockSender = new BlockSender(block, 0, -1, false, true, true,
|
||||
datanode, null);
|
||||
|
||||
DataOutputStream out =
|
||||
new DataOutputStream(new IOUtils.NullOutputStream());
|
||||
|
@ -45,6 +45,8 @@
|
||||
import org.apache.hadoop.net.SocketOutputStream;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
/**
|
||||
* Reads a block from the disk and sends it to a recipient.
|
||||
*
|
||||
@ -158,12 +160,14 @@ class BlockSender implements java.io.Closeable {
|
||||
* @param length length of data to read
|
||||
* @param corruptChecksumOk
|
||||
* @param verifyChecksum verify checksum while reading the data
|
||||
* @param sendChecksum send checksum to client.
|
||||
* @param datanode datanode from which the block is being read
|
||||
* @param clientTraceFmt format string used to print client trace logs
|
||||
* @throws IOException
|
||||
*/
|
||||
BlockSender(ExtendedBlock block, long startOffset, long length,
|
||||
boolean corruptChecksumOk, boolean verifyChecksum,
|
||||
boolean sendChecksum,
|
||||
DataNode datanode, String clientTraceFmt)
|
||||
throws IOException {
|
||||
try {
|
||||
@ -175,6 +179,13 @@ class BlockSender implements java.io.Closeable {
|
||||
this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads;
|
||||
this.datanode = datanode;
|
||||
|
||||
if (verifyChecksum) {
|
||||
// To simplify implementation, callers may not specify verification
|
||||
// without sending.
|
||||
Preconditions.checkArgument(sendChecksum,
|
||||
"If verifying checksum, currently must also send it.");
|
||||
}
|
||||
|
||||
final Replica replica;
|
||||
final long replicaVisibleLength;
|
||||
synchronized(datanode.data) {
|
||||
@ -213,29 +224,37 @@ class BlockSender implements java.io.Closeable {
|
||||
* False, True: will verify checksum
|
||||
* False, False: throws IOException file not found
|
||||
*/
|
||||
DataChecksum csum;
|
||||
final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
|
||||
if (!corruptChecksumOk || metaIn != null) {
|
||||
if (metaIn == null) {
|
||||
//need checksum but meta-data not found
|
||||
throw new FileNotFoundException("Meta-data not found for " + block);
|
||||
}
|
||||
|
||||
checksumIn = new DataInputStream(
|
||||
new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
DataChecksum csum = null;
|
||||
if (verifyChecksum || sendChecksum) {
|
||||
final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
|
||||
if (!corruptChecksumOk || metaIn != null) {
|
||||
if (metaIn == null) {
|
||||
//need checksum but meta-data not found
|
||||
throw new FileNotFoundException("Meta-data not found for " + block);
|
||||
}
|
||||
|
||||
// read and handle the common header here. For now just a version
|
||||
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
||||
short version = header.getVersion();
|
||||
if (version != BlockMetadataHeader.VERSION) {
|
||||
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
||||
+ block + " ignoring ...");
|
||||
checksumIn = new DataInputStream(
|
||||
new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
|
||||
|
||||
// read and handle the common header here. For now just a version
|
||||
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
|
||||
short version = header.getVersion();
|
||||
if (version != BlockMetadataHeader.VERSION) {
|
||||
LOG.warn("Wrong version (" + version + ") for metadata file for "
|
||||
+ block + " ignoring ...");
|
||||
}
|
||||
csum = header.getChecksum();
|
||||
} else {
|
||||
LOG.warn("Could not find metadata file for " + block);
|
||||
}
|
||||
csum = header.getChecksum();
|
||||
} else {
|
||||
LOG.warn("Could not find metadata file for " + block);
|
||||
// This only decides the buffer size. Use BUFFER_SIZE?
|
||||
csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 16 * 1024);
|
||||
}
|
||||
if (csum == null) {
|
||||
// The number of bytes per checksum here determines the alignment
|
||||
// of reads: we always start reading at a checksum chunk boundary,
|
||||
// even if the checksum type is NULL. So, choosing too big of a value
|
||||
// would risk sending too much unnecessary data. 512 (1 disk sector)
|
||||
// is likely to result in minimal extra IO.
|
||||
csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1441,7 +1441,7 @@ public void run() {
|
||||
HdfsConstants.SMALL_BUFFER_SIZE));
|
||||
in = new DataInputStream(unbufIn);
|
||||
blockSender = new BlockSender(b, 0, b.getNumBytes(),
|
||||
false, false, DataNode.this, null);
|
||||
false, false, true, DataNode.this, null);
|
||||
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
|
||||
|
||||
//
|
||||
|
@ -241,7 +241,8 @@ public void readBlock(final ExtendedBlock block,
|
||||
final Token<BlockTokenIdentifier> blockToken,
|
||||
final String clientName,
|
||||
final long blockOffset,
|
||||
final long length) throws IOException {
|
||||
final long length,
|
||||
final boolean sendChecksum) throws IOException {
|
||||
previousOpClientName = clientName;
|
||||
|
||||
OutputStream baseStream = getOutputStream();
|
||||
@ -266,7 +267,7 @@ public void readBlock(final ExtendedBlock block,
|
||||
try {
|
||||
try {
|
||||
blockSender = new BlockSender(block, blockOffset, length,
|
||||
true, false, datanode, clientTraceFmt);
|
||||
true, false, sendChecksum, datanode, clientTraceFmt);
|
||||
} catch(IOException e) {
|
||||
String msg = "opReadBlock " + block + " received exception " + e;
|
||||
LOG.info(msg);
|
||||
@ -654,7 +655,7 @@ public void copyBlock(final ExtendedBlock block,
|
||||
|
||||
try {
|
||||
// check if the block exists or not
|
||||
blockSender = new BlockSender(block, 0, -1, false, false, datanode,
|
||||
blockSender = new BlockSender(block, 0, -1, false, false, true, datanode,
|
||||
null);
|
||||
|
||||
// set up response stream
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -21,7 +21,6 @@
|
||||
import java.io.PrintWriter;
|
||||
import java.net.URL;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.servlet.ServletContext;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
@ -33,14 +32,11 @@
|
||||
import org.apache.hadoop.hdfs.DFSClient;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||
import org.apache.hadoop.hdfs.server.common.JspHelper;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.ServletUtil;
|
||||
import org.znerd.xmlenc.XMLOutputter;
|
||||
@ -116,18 +112,11 @@ public void doGet(HttpServletRequest request, HttpServletResponse response
|
||||
final DataNode datanode = (DataNode) context.getAttribute("datanode");
|
||||
final Configuration conf =
|
||||
new HdfsConfiguration(datanode.getConf());
|
||||
final int socketTimeout = conf.getInt(
|
||||
DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||
HdfsServerConstants.READ_TIMEOUT);
|
||||
final SocketFactory socketFactory = NetUtils.getSocketFactory(conf,
|
||||
ClientProtocol.class);
|
||||
|
||||
try {
|
||||
final DFSClient dfs = DatanodeJspHelper.getDFSClient(request,
|
||||
datanode, conf, getUGI(request, conf));
|
||||
final ClientProtocol nnproxy = dfs.getNamenode();
|
||||
final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
|
||||
path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey(), false);
|
||||
final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path);
|
||||
MD5MD5CRC32FileChecksum.write(xml, checksum);
|
||||
} catch(IOException ioe) {
|
||||
writeXml(ioe, path, xml);
|
||||
|
@ -281,6 +281,17 @@ private void initialize(final Configuration conf,
|
||||
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
|
||||
}
|
||||
|
||||
/**
|
||||
* Wait for the service to finish.
|
||||
* (Normally, it runs forever.)
|
||||
*/
|
||||
private void join() {
|
||||
try {
|
||||
infoServer.join();
|
||||
} catch (InterruptedException ie) {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shut down this instance of the datanode.
|
||||
* Returns only after shutdown is complete.
|
||||
@ -607,6 +618,7 @@ public static void main(String[] argv) throws Exception {
|
||||
|
||||
if (secondary != null) {
|
||||
secondary.startCheckpointThread();
|
||||
secondary.join();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,7 +168,7 @@ message RenameRequestProto {
|
||||
required string dst = 2;
|
||||
}
|
||||
|
||||
message RenameResponseProto { // void response
|
||||
message RenameResponseProto {
|
||||
required bool result = 1;
|
||||
}
|
||||
|
||||
@ -393,7 +393,7 @@ message GetLinkTargetRequestProto {
|
||||
required string path = 1;
|
||||
}
|
||||
message GetLinkTargetResponseProto {
|
||||
required string targetPath = 1;
|
||||
optional string targetPath = 1;
|
||||
}
|
||||
|
||||
message UpdateBlockForPipelineRequestProto {
|
||||
@ -415,29 +415,6 @@ message UpdatePipelineRequestProto {
|
||||
message UpdatePipelineResponseProto { // void response
|
||||
}
|
||||
|
||||
message GetDelegationTokenRequestProto {
|
||||
required string renewer = 1;
|
||||
}
|
||||
|
||||
message GetDelegationTokenResponseProto {
|
||||
optional hadoop.common.TokenProto token = 1;
|
||||
}
|
||||
|
||||
message RenewDelegationTokenRequestProto {
|
||||
required hadoop.common.TokenProto token = 1;
|
||||
}
|
||||
|
||||
message RenewDelegationTokenResponseProto {
|
||||
required uint64 newExireTime = 1;
|
||||
}
|
||||
|
||||
message CancelDelegationTokenRequestProto {
|
||||
required hadoop.common.TokenProto token = 1;
|
||||
}
|
||||
|
||||
message CancelDelegationTokenResponseProto { // void response
|
||||
}
|
||||
|
||||
message SetBalancerBandwidthRequestProto {
|
||||
required int64 bandwidth = 1;
|
||||
}
|
||||
@ -554,12 +531,12 @@ service ClientNamenodeProtocol {
|
||||
returns(UpdateBlockForPipelineResponseProto);
|
||||
rpc updatePipeline(UpdatePipelineRequestProto)
|
||||
returns(UpdatePipelineResponseProto);
|
||||
rpc getDelegationToken(GetDelegationTokenRequestProto)
|
||||
returns(GetDelegationTokenResponseProto);
|
||||
rpc renewDelegationToken(RenewDelegationTokenRequestProto)
|
||||
returns(RenewDelegationTokenResponseProto);
|
||||
rpc cancelDelegationToken(CancelDelegationTokenRequestProto)
|
||||
returns(CancelDelegationTokenResponseProto);
|
||||
rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto)
|
||||
returns(hadoop.common.GetDelegationTokenResponseProto);
|
||||
rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto)
|
||||
returns(hadoop.common.RenewDelegationTokenResponseProto);
|
||||
rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto)
|
||||
returns(hadoop.common.CancelDelegationTokenResponseProto);
|
||||
rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto)
|
||||
returns(SetBalancerBandwidthResponseProto);
|
||||
rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto)
|
||||
|
@ -56,7 +56,7 @@ message GetBlockKeysRequestProto {
|
||||
* keys - Information about block keys at the active namenode
|
||||
*/
|
||||
message GetBlockKeysResponseProto {
|
||||
required ExportedBlockKeysProto keys = 1;
|
||||
optional ExportedBlockKeysProto keys = 1;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -52,6 +52,7 @@ message OpReadBlockProto {
|
||||
required ClientOperationHeaderProto header = 1;
|
||||
required uint64 offset = 2;
|
||||
required uint64 len = 3;
|
||||
optional bool sendChecksums = 4 [default = true];
|
||||
}
|
||||
|
||||
|
||||
@ -182,5 +183,5 @@ message OpBlockChecksumResponseProto {
|
||||
required uint32 bytesPerCrc = 1;
|
||||
required uint64 crcPerBlock = 2;
|
||||
required bytes md5 = 3;
|
||||
optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32];
|
||||
optional ChecksumTypeProto crcType = 4;
|
||||
}
|
||||
|
@ -1,2 +1,15 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
|
||||
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
|
||||
|
@ -1,3 +1,16 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.hdfs.DFSClient$Renewer
|
||||
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
|
||||
org.apache.hadoop.hdfs.HftpFileSystem$TokenManager
|
||||
|
@ -48,7 +48,6 @@
|
||||
import java.io.PrintWriter;
|
||||
import java.io.RandomAccessFile;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.ServerSocket;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.channels.FileChannel;
|
||||
@ -2290,19 +2289,6 @@ public NameNode addNameNode(Configuration conf, int namenodePort)
|
||||
return nameNodes[nnIndex].nameNode;
|
||||
}
|
||||
|
||||
private int getFreeSocketPort() {
|
||||
int port = 0;
|
||||
try {
|
||||
ServerSocket s = new ServerSocket(0);
|
||||
port = s.getLocalPort();
|
||||
s.close();
|
||||
return port;
|
||||
} catch (IOException e) {
|
||||
// Could not get a free port. Return default port 0.
|
||||
}
|
||||
return port;
|
||||
}
|
||||
|
||||
protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
|
||||
boolean checkDataNodeAddrConfig) throws IOException {
|
||||
if (setupHostsFile) {
|
||||
@ -2311,7 +2297,7 @@ protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile,
|
||||
throw new IOException("Parameter dfs.hosts is not setup in conf");
|
||||
}
|
||||
// Setup datanode in the include file, if it is defined in the conf
|
||||
String address = "127.0.0.1:" + getFreeSocketPort();
|
||||
String address = "127.0.0.1:" + NetUtils.getFreeSocketPort();
|
||||
if (checkDataNodeAddrConfig) {
|
||||
conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address);
|
||||
} else {
|
||||
|
@ -444,21 +444,21 @@ public void testDataTransferProtocol() throws IOException {
|
||||
recvBuf.reset();
|
||||
blk.setBlockId(blkid-1);
|
||||
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
|
||||
0L, fileLen);
|
||||
0L, fileLen, true);
|
||||
sendRecvData("Wrong block ID " + newBlockId + " for read", false);
|
||||
|
||||
// negative block start offset -1L
|
||||
sendBuf.reset();
|
||||
blk.setBlockId(blkid);
|
||||
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
|
||||
-1L, fileLen);
|
||||
-1L, fileLen, true);
|
||||
sendRecvData("Negative start-offset for read for block " +
|
||||
firstBlock.getBlockId(), false);
|
||||
|
||||
// bad block start offset
|
||||
sendBuf.reset();
|
||||
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
|
||||
fileLen, fileLen);
|
||||
fileLen, fileLen, true);
|
||||
sendRecvData("Wrong start-offset for reading block " +
|
||||
firstBlock.getBlockId(), false);
|
||||
|
||||
@ -475,7 +475,7 @@ public void testDataTransferProtocol() throws IOException {
|
||||
|
||||
sendBuf.reset();
|
||||
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
|
||||
0L, -1L-random.nextInt(oneMil));
|
||||
0L, -1L-random.nextInt(oneMil), true);
|
||||
sendRecvData("Negative length for reading block " +
|
||||
firstBlock.getBlockId(), false);
|
||||
|
||||
@ -488,14 +488,14 @@ public void testDataTransferProtocol() throws IOException {
|
||||
recvOut);
|
||||
sendBuf.reset();
|
||||
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
|
||||
0L, fileLen+1);
|
||||
0L, fileLen+1, true);
|
||||
sendRecvData("Wrong length for reading block " +
|
||||
firstBlock.getBlockId(), false);
|
||||
|
||||
//At the end of all this, read the file to make sure that succeeds finally.
|
||||
sendBuf.reset();
|
||||
sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl",
|
||||
0L, fileLen);
|
||||
0L, fileLen, true);
|
||||
readFile(fileSys, file, fileLen);
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
|
@ -19,6 +19,9 @@
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
@ -56,4 +59,11 @@ public void testParallelReadByteBuffer() throws IOException {
|
||||
public void testParallelReadMixed() throws IOException {
|
||||
runTestWorkload(new MixedWorkloadHelper());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testParallelNoChecksums() throws IOException {
|
||||
verifyChecksums = false;
|
||||
runTestWorkload(new MixedWorkloadHelper());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -46,6 +46,7 @@ public class TestParallelReadUtil {
|
||||
static final int FILE_SIZE_K = 256;
|
||||
static Random rand = null;
|
||||
static final int DEFAULT_REPLICATION_FACTOR = 2;
|
||||
protected boolean verifyChecksums = true;
|
||||
|
||||
static {
|
||||
// The client-trace log ends up causing a lot of blocking threads
|
||||
@ -317,7 +318,8 @@ boolean runParallelRead(int nFiles, int nWorkerEach, ReadWorkerHelper helper) th
|
||||
|
||||
testInfo.filepath = new Path("/TestParallelRead.dat." + i);
|
||||
testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K);
|
||||
testInfo.dis = dfsClient.open(testInfo.filepath.toString());
|
||||
testInfo.dis = dfsClient.open(testInfo.filepath.toString(),
|
||||
dfsClient.dfsClientConf.ioBufferSize, verifyChecksums);
|
||||
|
||||
for (int j = 0; j < nWorkerEach; ++j) {
|
||||
workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper);
|
||||
|
@ -24,11 +24,14 @@
|
||||
import java.io.IOException;
|
||||
import java.util.Random;
|
||||
|
||||
import org.apache.commons.logging.impl.Log4JLogger;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
@ -194,11 +197,19 @@ private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
|
||||
*/
|
||||
@Test
|
||||
public void testPreadDFS() throws IOException {
|
||||
dfsPreadTest(false); //normal pread
|
||||
dfsPreadTest(true); //trigger read code path without transferTo.
|
||||
dfsPreadTest(false, true); //normal pread
|
||||
dfsPreadTest(true, true); //trigger read code path without transferTo.
|
||||
}
|
||||
|
||||
private void dfsPreadTest(boolean disableTransferTo) throws IOException {
|
||||
@Test
|
||||
public void testPreadDFSNoChecksum() throws IOException {
|
||||
((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL);
|
||||
dfsPreadTest(false, false);
|
||||
dfsPreadTest(true, false);
|
||||
}
|
||||
|
||||
private void dfsPreadTest(boolean disableTransferTo, boolean verifyChecksum)
|
||||
throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
|
||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
|
||||
@ -210,6 +221,7 @@ private void dfsPreadTest(boolean disableTransferTo) throws IOException {
|
||||
}
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
FileSystem fileSys = cluster.getFileSystem();
|
||||
fileSys.setVerifyChecksum(verifyChecksum);
|
||||
try {
|
||||
Path file1 = new Path("preadtest.dat");
|
||||
writeFile(fileSys, file1);
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.tools;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -37,4 +54,4 @@ public static void reset() {
|
||||
lastRenewed = null;
|
||||
lastCanceled = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1 +1,14 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.tools.FakeRenewer
|
||||
|
@ -48,9 +48,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||
<groupId>org.apache.rat</groupId>
|
||||
<artifactId>apache-rat-plugin</artifactId>
|
||||
<configuration>
|
||||
<includes>
|
||||
<include>pom.xml</include>
|
||||
</includes>
|
||||
</configuration>
|
||||
</plugin>
|
||||
</plugins>
|
||||
|
@ -19,6 +19,8 @@ Trunk (Unreleased)
|
||||
MAPREDUCE-4887. Add RehashPartitioner, to smooth distributions
|
||||
with poor implementations of Object#hashCode(). (Radim Kolar via cutting)
|
||||
|
||||
MAPREDUCE-4808. Refactor MapOutput and MergeManager to facilitate reuse by Shuffle implementations. (masokan via tucu)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for
|
||||
@ -151,9 +153,6 @@ Trunk (Unreleased)
|
||||
|
||||
MAPREDUCE-3223. Remove MR1 configs from mapred-default.xml (tlipcon via harsh)
|
||||
|
||||
MAPREDUCE-4678. Running the Pentomino example with defaults throws
|
||||
java.lang.NegativeArraySizeException (Chris McConnell via harsh)
|
||||
|
||||
MAPREDUCE-4695. Fix LocalRunner on trunk after MAPREDUCE-3223 broke it
|
||||
(harsh)
|
||||
|
||||
@ -170,6 +169,9 @@ Release 2.0.3-alpha - Unreleased
|
||||
MAPREDUCE-4123. Remove the 'mapred groups' command, which is no longer
|
||||
supported. (Devaraj K via sseth)
|
||||
|
||||
MAPREDUCE-4938. Use token request messages defined in hadoop common.
|
||||
(suresh)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
MAPREDUCE-4520. Added support for MapReduce applications to request for
|
||||
@ -207,6 +209,8 @@ Release 2.0.3-alpha - Unreleased
|
||||
MAPREDUCE-4907. TrackerDistributedCacheManager issues too many getFileStatus
|
||||
calls. (sandyr via tucu)
|
||||
|
||||
MAPREDUCE-4949. Enable multiple pi jobs to run in parallel. (sandyr via tucu)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
BUG FIXES
|
||||
@ -253,6 +257,17 @@ Release 2.0.3-alpha - Unreleased
|
||||
MAPREDUCE-1700. User supplied dependencies may conflict with MapReduce
|
||||
system JARs. (tomwhite)
|
||||
|
||||
MAPREDUCE-4936. JobImpl uber checks for cpu are wrong (Arun C Murthy via
|
||||
jlowe)
|
||||
|
||||
MAPREDUCE-4924. flakey test: org.apache.hadoop.mapred.TestClusterMRNotification.testMR.
|
||||
(rkanter via tucu)
|
||||
|
||||
MAPREDUCE-4923. Add toString method to TaggedInputSplit. (sandyr via tucu)
|
||||
|
||||
MAPREDUCE-4948. Fix a failing unit test TestYARNRunner.testHistoryServerToken.
|
||||
(Junping Du via sseth)
|
||||
|
||||
Release 2.0.2-alpha - 2012-09-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -629,6 +644,24 @@ Release 2.0.0-alpha - 05-23-2012
|
||||
MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is
|
||||
bad (Jason Lowe via bobby)
|
||||
|
||||
Release 0.23.7 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
|
||||
number of map completion event type conversions. (Jason Lowe via sseth)
|
||||
|
||||
BUG FIXES
|
||||
|
||||
MAPREDUCE-4458. Warn if java.library.path is used for AM or Task
|
||||
(Robert Parker via jeagles)
|
||||
|
||||
Release 0.23.6 - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
@ -694,7 +727,15 @@ Release 0.23.6 - UNRELEASED
|
||||
MAPREDUCE-4921. JobClient should acquire HS token with RM principal
|
||||
(daryn via bobby)
|
||||
|
||||
Release 0.23.5 - UNRELEASED
|
||||
MAPREDUCE-4934. Maven RAT plugin is not checking all source files (tgraves)
|
||||
|
||||
MAPREDUCE-4678. Running the Pentomino example with defaults throws
|
||||
java.lang.NegativeArraySizeException (Chris McConnell via harsh)
|
||||
|
||||
MAPREDUCE-4925. The pentomino option parser may be buggy.
|
||||
(Karthik Kambatla via harsh)
|
||||
|
||||
Release 0.23.5 - 2012-11-28
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
||||
|
@ -1,5 +1,18 @@
|
||||
<?xml version="1.0"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<!--
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License. See accompanying LICENSE file.
|
||||
-->
|
||||
|
||||
<!-- Put site-specific property overrides in this file. -->
|
||||
|
||||
|
@ -268,7 +268,7 @@
|
||||
This class is unlikely to get subclassed, so ignore
|
||||
-->
|
||||
<Match>
|
||||
<Class name="org.apache.hadoop.mapreduce.task.reduce.MergeManager" />
|
||||
<Class name="org.apache.hadoop.mapreduce.task.reduce.MergeManagerImpl" />
|
||||
<Bug pattern="SC_START_IN_CTOR" />
|
||||
</Match>
|
||||
|
||||
|
@ -275,14 +275,13 @@ public MapTaskCompletionEventsUpdate getMapCompletionEvents(
|
||||
boolean shouldReset = false;
|
||||
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID =
|
||||
TypeConverter.toYarn(taskAttemptID);
|
||||
org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[] events =
|
||||
TaskCompletionEvent[] events =
|
||||
context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents(
|
||||
startIndex, maxEvents);
|
||||
|
||||
taskHeartbeatHandler.progressing(attemptID);
|
||||
|
||||
return new MapTaskCompletionEventsUpdate(
|
||||
TypeConverter.fromYarn(events), shouldReset);
|
||||
return new MapTaskCompletionEventsUpdate(events, shouldReset);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,8 +125,8 @@ public void start() {
|
||||
.getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
|
||||
byte[] bytes = Base64.decodeBase64(secretKeyStr);
|
||||
secretManager =
|
||||
new ClientToAMTokenSecretManager(this.appContext.getApplicationID(),
|
||||
bytes);
|
||||
new ClientToAMTokenSecretManager(
|
||||
this.appContext.getApplicationAttemptId(), bytes);
|
||||
}
|
||||
server =
|
||||
rpc.getServer(MRClientProtocol.class, protocolHandler, address,
|
||||
|
@ -24,6 +24,7 @@
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapred.TaskCompletionEvent;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
|
||||
@ -88,7 +89,7 @@ public interface Job {
|
||||
TaskAttemptCompletionEvent[]
|
||||
getTaskAttemptCompletionEvents(int fromEventId, int maxEvents);
|
||||
|
||||
TaskAttemptCompletionEvent[]
|
||||
TaskCompletionEvent[]
|
||||
getMapAttemptCompletionEvents(int startIndex, int maxEvents);
|
||||
|
||||
/**
|
||||
|
@ -43,6 +43,7 @@
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.mapred.JobACLsManager;
|
||||
import org.apache.hadoop.mapred.JobConf;
|
||||
import org.apache.hadoop.mapred.TaskCompletionEvent;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.JobContext;
|
||||
@ -130,6 +131,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||
private static final TaskAttemptCompletionEvent[]
|
||||
EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0];
|
||||
|
||||
private static final TaskCompletionEvent[]
|
||||
EMPTY_TASK_COMPLETION_EVENTS = new TaskCompletionEvent[0];
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(JobImpl.class);
|
||||
|
||||
//The maximum fraction of fetch failures allowed for a map
|
||||
@ -196,7 +200,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||
private int allowedMapFailuresPercent = 0;
|
||||
private int allowedReduceFailuresPercent = 0;
|
||||
private List<TaskAttemptCompletionEvent> taskAttemptCompletionEvents;
|
||||
private List<TaskAttemptCompletionEvent> mapAttemptCompletionEvents;
|
||||
private List<TaskCompletionEvent> mapAttemptCompletionEvents;
|
||||
private List<Integer> taskCompletionIdxToMapCompletionIdx;
|
||||
private final List<String> diagnostics = new ArrayList<String>();
|
||||
|
||||
//task/attempt related datastructures
|
||||
@ -684,27 +689,31 @@ public static Counters incrTaskCounters(
|
||||
@Override
|
||||
public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
|
||||
int fromEventId, int maxEvents) {
|
||||
return getAttemptCompletionEvents(taskAttemptCompletionEvents,
|
||||
fromEventId, maxEvents);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
|
||||
int startIndex, int maxEvents) {
|
||||
return getAttemptCompletionEvents(mapAttemptCompletionEvents,
|
||||
startIndex, maxEvents);
|
||||
}
|
||||
|
||||
private TaskAttemptCompletionEvent[] getAttemptCompletionEvents(
|
||||
List<TaskAttemptCompletionEvent> eventList,
|
||||
int startIndex, int maxEvents) {
|
||||
TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS;
|
||||
readLock.lock();
|
||||
try {
|
||||
if (eventList.size() > startIndex) {
|
||||
if (taskAttemptCompletionEvents.size() > fromEventId) {
|
||||
int actualMax = Math.min(maxEvents,
|
||||
(eventList.size() - startIndex));
|
||||
events = eventList.subList(startIndex,
|
||||
(taskAttemptCompletionEvents.size() - fromEventId));
|
||||
events = taskAttemptCompletionEvents.subList(fromEventId,
|
||||
actualMax + fromEventId).toArray(events);
|
||||
}
|
||||
return events;
|
||||
} finally {
|
||||
readLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
|
||||
int startIndex, int maxEvents) {
|
||||
TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS;
|
||||
readLock.lock();
|
||||
try {
|
||||
if (mapAttemptCompletionEvents.size() > startIndex) {
|
||||
int actualMax = Math.min(maxEvents,
|
||||
(mapAttemptCompletionEvents.size() - startIndex));
|
||||
events = mapAttemptCompletionEvents.subList(startIndex,
|
||||
actualMax + startIndex).toArray(events);
|
||||
}
|
||||
return events;
|
||||
@ -1068,9 +1077,13 @@ private void makeUberDecision(long dataInputLength) {
|
||||
boolean smallCpu =
|
||||
(
|
||||
Math.max(
|
||||
conf.getInt(MRJobConfig.MAP_CPU_VCORES, 1),
|
||||
conf.getInt(MRJobConfig.REDUCE_CPU_VCORES, 1)) <
|
||||
sysCPUSizeForUberSlot
|
||||
conf.getInt(
|
||||
MRJobConfig.MAP_CPU_VCORES,
|
||||
MRJobConfig.DEFAULT_MAP_CPU_VCORES),
|
||||
conf.getInt(
|
||||
MRJobConfig.REDUCE_CPU_VCORES,
|
||||
MRJobConfig.DEFAULT_REDUCE_CPU_VCORES))
|
||||
<= sysCPUSizeForUberSlot
|
||||
);
|
||||
boolean notChainJob = !isChainJob(conf);
|
||||
|
||||
@ -1243,7 +1256,9 @@ public JobStateInternal transition(JobImpl job, JobEvent event) {
|
||||
new ArrayList<TaskAttemptCompletionEvent>(
|
||||
job.numMapTasks + job.numReduceTasks + 10);
|
||||
job.mapAttemptCompletionEvents =
|
||||
new ArrayList<TaskAttemptCompletionEvent>(job.numMapTasks + 10);
|
||||
new ArrayList<TaskCompletionEvent>(job.numMapTasks + 10);
|
||||
job.taskCompletionIdxToMapCompletionIdx = new ArrayList<Integer>(
|
||||
job.numMapTasks + job.numReduceTasks + 10);
|
||||
|
||||
job.allowedMapFailuresPercent =
|
||||
job.conf.getInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 0);
|
||||
@ -1558,19 +1573,37 @@ public void transition(JobImpl job, JobEvent event) {
|
||||
//eventId is equal to index in the arraylist
|
||||
tce.setEventId(job.taskAttemptCompletionEvents.size());
|
||||
job.taskAttemptCompletionEvents.add(tce);
|
||||
int mapEventIdx = -1;
|
||||
if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) {
|
||||
job.mapAttemptCompletionEvents.add(tce);
|
||||
// we track map completions separately from task completions because
|
||||
// - getMapAttemptCompletionEvents uses index ranges specific to maps
|
||||
// - type converting the same events over and over is expensive
|
||||
mapEventIdx = job.mapAttemptCompletionEvents.size();
|
||||
job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce));
|
||||
}
|
||||
job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx);
|
||||
|
||||
TaskAttemptId attemptId = tce.getAttemptId();
|
||||
TaskId taskId = attemptId.getTaskId();
|
||||
//make the previous completion event as obsolete if it exists
|
||||
Object successEventNo =
|
||||
job.successAttemptCompletionEventNoMap.remove(taskId);
|
||||
Integer successEventNo =
|
||||
job.successAttemptCompletionEventNoMap.remove(taskId);
|
||||
if (successEventNo != null) {
|
||||
TaskAttemptCompletionEvent successEvent =
|
||||
job.taskAttemptCompletionEvents.get((Integer) successEventNo);
|
||||
job.taskAttemptCompletionEvents.get(successEventNo);
|
||||
successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE);
|
||||
int mapCompletionIdx =
|
||||
job.taskCompletionIdxToMapCompletionIdx.get(successEventNo);
|
||||
if (mapCompletionIdx >= 0) {
|
||||
// update the corresponding TaskCompletionEvent for the map
|
||||
TaskCompletionEvent mapEvent =
|
||||
job.mapAttemptCompletionEvents.get(mapCompletionIdx);
|
||||
job.mapAttemptCompletionEvents.set(mapCompletionIdx,
|
||||
new TaskCompletionEvent(mapEvent.getEventId(),
|
||||
mapEvent.getTaskAttemptId(), mapEvent.idWithinJob(),
|
||||
mapEvent.isMapTask(), TaskCompletionEvent.Status.OBSOLETE,
|
||||
mapEvent.getTaskTrackerHttp()));
|
||||
}
|
||||
}
|
||||
|
||||
// if this attempt is not successful then why is the previous successful
|
||||
|
@ -1 +1,14 @@
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
org.apache.hadoop.mapreduce.v2.app.MRClientSecurityInfo
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.mapreduce.TaskType;
|
||||
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent;
|
||||
@ -153,9 +154,12 @@ public void testGetMapCompletionEvents() throws IOException {
|
||||
.thenReturn(Arrays.copyOfRange(taskEvents, 0, 2));
|
||||
when(mockJob.getTaskAttemptCompletionEvents(2, 100))
|
||||
.thenReturn(Arrays.copyOfRange(taskEvents, 2, 4));
|
||||
when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(mapEvents);
|
||||
when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(mapEvents);
|
||||
when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(empty);
|
||||
when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(
|
||||
TypeConverter.fromYarn(mapEvents));
|
||||
when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(
|
||||
TypeConverter.fromYarn(mapEvents));
|
||||
when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(
|
||||
TypeConverter.fromYarn(empty));
|
||||
|
||||
AppContext appCtx = mock(AppContext.class);
|
||||
when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob);
|
||||
|
@ -33,6 +33,7 @@
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapred.JobACLsManager;
|
||||
import org.apache.hadoop.mapred.ShuffleHandler;
|
||||
import org.apache.hadoop.mapred.TaskCompletionEvent;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.FileSystemCounter;
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
@ -556,7 +557,7 @@ public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents(
|
||||
public TaskCompletionEvent[] getMapAttemptCompletionEvents(
|
||||
int startIndex, int maxEvents) {
|
||||
return null;
|
||||
}
|
||||
|
@ -25,8 +25,10 @@
|
||||
import java.util.Iterator;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.mapred.TaskCompletionEvent;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
|
||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
|
||||
@ -150,14 +152,16 @@ public void testFetchFailure() throws Exception {
|
||||
Assert.assertEquals("Event status not correct for reduce attempt1",
|
||||
TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
|
||||
|
||||
TaskAttemptCompletionEvent mapEvents[] =
|
||||
TaskCompletionEvent mapEvents[] =
|
||||
job.getMapAttemptCompletionEvents(0, 2);
|
||||
TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
|
||||
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
|
||||
Assert.assertArrayEquals("Unexpected map events",
|
||||
Arrays.copyOfRange(events, 0, 2), mapEvents);
|
||||
Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
|
||||
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
|
||||
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
|
||||
Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]);
|
||||
Assert.assertEquals("Unexpected map event", convertedEvents[2],
|
||||
mapEvents[0]);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -395,14 +399,16 @@ public void testFetchFailureMultipleReduces() throws Exception {
|
||||
Assert.assertEquals("Event status not correct for reduce attempt1",
|
||||
TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus());
|
||||
|
||||
TaskAttemptCompletionEvent mapEvents[] =
|
||||
TaskCompletionEvent mapEvents[] =
|
||||
job.getMapAttemptCompletionEvents(0, 2);
|
||||
TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events);
|
||||
Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length);
|
||||
Assert.assertArrayEquals("Unexpected map events",
|
||||
Arrays.copyOfRange(events, 0, 2), mapEvents);
|
||||
Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents);
|
||||
mapEvents = job.getMapAttemptCompletionEvents(2, 200);
|
||||
Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length);
|
||||
Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]);
|
||||
Assert.assertEquals("Unexpected map event", convertedEvents[2],
|
||||
mapEvents[0]);
|
||||
}
|
||||
|
||||
|
||||
|
@ -32,6 +32,7 @@
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapred.TaskCompletionEvent;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
|
||||
@ -441,7 +442,7 @@ public int getCompletedReduces() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public TaskAttemptCompletionEvent[]
|
||||
public TaskCompletionEvent[]
|
||||
getMapAttemptCompletionEvents(int startIndex, int maxEvents) {
|
||||
throw new UnsupportedOperationException("Not supported yet.");
|
||||
}
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.launcher;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
|
@ -1,3 +1,20 @@
|
||||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.local;
|
||||
|
||||
import static org.mockito.Matchers.isA;
|
||||
|
@ -82,10 +82,8 @@
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto;
|
||||
@ -95,7 +93,9 @@
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
|
||||
|
||||
@ -109,8 +109,7 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol {
|
||||
|
||||
public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException {
|
||||
RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class);
|
||||
proxy = (MRClientProtocolPB)RPC.getProxy(
|
||||
MRClientProtocolPB.class, clientVersion, addr, conf);
|
||||
proxy = RPC.getProxy(MRClientProtocolPB.class, clientVersion, addr, conf);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -73,14 +73,10 @@
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto;
|
||||
@ -99,8 +95,12 @@
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
|
||||
import com.google.protobuf.RpcController;
|
||||
|
@ -18,8 +18,8 @@
|
||||
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProtoOrBuilder;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProtoOrBuilder;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||
import org.apache.hadoop.yarn.api.records.DelegationToken;
|
||||
import org.apache.hadoop.yarn.api.records.ProtoBase;
|
||||
@ -52,10 +52,7 @@ public DelegationToken getDelegationToken() {
|
||||
if (this.token != null) {
|
||||
return this.token;
|
||||
}
|
||||
if (!p.hasDelegationToken()) {
|
||||
return null;
|
||||
}
|
||||
this.token = convertFromProtoFormat(p.getDelegationToken());
|
||||
this.token = convertFromProtoFormat(p.getToken());
|
||||
return this.token;
|
||||
}
|
||||
|
||||
@ -63,7 +60,7 @@ public DelegationToken getDelegationToken() {
|
||||
public void setDelegationToken(DelegationToken token) {
|
||||
maybeInitBuilder();
|
||||
if (token == null)
|
||||
builder.clearDelegationToken();
|
||||
builder.clearToken();
|
||||
this.token = token;
|
||||
}
|
||||
|
||||
@ -78,7 +75,7 @@ public CancelDelegationTokenRequestProto getProto() {
|
||||
|
||||
private void mergeLocalToBuilder() {
|
||||
if (token != null) {
|
||||
builder.setDelegationToken(convertToProtoFormat(this.token));
|
||||
builder.setToken(convertToProtoFormat(this.token));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse;
|
||||
import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
|
||||
import org.apache.hadoop.yarn.api.records.ProtoBase;
|
||||
|
||||
public class CancelDelegationTokenResponsePBImpl extends
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user