Merge remote-tracking branch 'apache/trunk' into HDFS-7285
Conflicts: hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Change-Id: Ic7946c4ea35bed587fe879ce58b959b25ecc0823
This commit is contained in:
commit
8fd5520246
|
@ -599,6 +599,8 @@ function hadoop_usage
|
||||||
echo "--run-tests Run all relevant tests below the base directory"
|
echo "--run-tests Run all relevant tests below the base directory"
|
||||||
echo "--skip-system-plugins Do not load plugins from ${BINDIR}/test-patch.d"
|
echo "--skip-system-plugins Do not load plugins from ${BINDIR}/test-patch.d"
|
||||||
echo "--testlist=<list> Specify which subsystem tests to use (comma delimited)"
|
echo "--testlist=<list> Specify which subsystem tests to use (comma delimited)"
|
||||||
|
echo "--test-parallel=<bool> Run multiple tests in parallel (default false in developer mode, true in Jenkins mode)"
|
||||||
|
echo "--test-threads=<int> Number of tests to run in parallel (default defined in ${PROJECT_NAME} build)"
|
||||||
|
|
||||||
echo "Shell binary overrides:"
|
echo "Shell binary overrides:"
|
||||||
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
|
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
|
||||||
|
@ -691,6 +693,7 @@ function parse_args
|
||||||
;;
|
;;
|
||||||
--jenkins)
|
--jenkins)
|
||||||
JENKINS=true
|
JENKINS=true
|
||||||
|
TEST_PARALLEL=${TEST_PARALLEL:-true}
|
||||||
;;
|
;;
|
||||||
--jira-cmd=*)
|
--jira-cmd=*)
|
||||||
JIRACLI=${i#*=}
|
JIRACLI=${i#*=}
|
||||||
|
@ -749,6 +752,12 @@ function parse_args
|
||||||
add_test "${j}"
|
add_test "${j}"
|
||||||
done
|
done
|
||||||
;;
|
;;
|
||||||
|
--test-parallel=*)
|
||||||
|
TEST_PARALLEL=${i#*=}
|
||||||
|
;;
|
||||||
|
--test-threads=*)
|
||||||
|
TEST_THREADS=${i#*=}
|
||||||
|
;;
|
||||||
--wget-cmd=*)
|
--wget-cmd=*)
|
||||||
WGET=${i#*=}
|
WGET=${i#*=}
|
||||||
;;
|
;;
|
||||||
|
@ -811,6 +820,13 @@ function parse_args
|
||||||
PATCH_DIR=$(cd -P -- "${PATCH_DIR}" >/dev/null && pwd -P)
|
PATCH_DIR=$(cd -P -- "${PATCH_DIR}" >/dev/null && pwd -P)
|
||||||
|
|
||||||
GITDIFFLINES=${PATCH_DIR}/gitdifflines.txt
|
GITDIFFLINES=${PATCH_DIR}/gitdifflines.txt
|
||||||
|
|
||||||
|
if [[ ${TEST_PARALLEL} == "true" ]] ; then
|
||||||
|
PARALLEL_TESTS_PROFILE=-Pparallel-tests
|
||||||
|
if [[ -n ${TEST_THREADS:-} ]]; then
|
||||||
|
TESTS_THREAD_COUNT="-DtestsThreadCount=$TEST_THREADS"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
## @description Locate the pom.xml file for a given directory
|
## @description Locate the pom.xml file for a given directory
|
||||||
|
@ -2245,13 +2261,22 @@ function check_unittests
|
||||||
|
|
||||||
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
|
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
|
||||||
echo " Running tests in ${module_suffix}"
|
echo " Running tests in ${module_suffix}"
|
||||||
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} -D${PROJECT_NAME}PatchProcess
|
# Temporary hack to run the parallel tests profile only for hadoop-common.
|
||||||
|
# This code will be removed once hadoop-hdfs is ready for parallel test
|
||||||
|
# execution.
|
||||||
|
if [[ ${module} == "hadoop-common-project/hadoop-common" ]] ; then
|
||||||
|
OPTIONAL_PARALLEL_TESTS_PROFILE=${PARALLEL_TESTS_PROFILE}
|
||||||
|
else
|
||||||
|
unset OPTIONAL_PARALLEL_TESTS_PROFILE
|
||||||
|
fi
|
||||||
|
# shellcheck disable=2086
|
||||||
|
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} ${OPTIONAL_PARALLEL_TESTS_PROFILE} ${TESTS_THREAD_COUNT} -D${PROJECT_NAME}PatchProcess
|
||||||
test_build_result=$?
|
test_build_result=$?
|
||||||
|
|
||||||
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
|
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
|
||||||
|
|
||||||
# shellcheck disable=2016
|
# shellcheck disable=2016
|
||||||
module_test_timeouts=$(${AWK} '/^Running / { if (last) { print last } last=$2 } /^Tests run: / { last="" }' "${test_logfile}")
|
module_test_timeouts=$(${AWK} '/^Running / { array[$NF] = 1 } /^Tests run: .* in / { delete array[$NF] } END { for (x in array) { print x } }' "${test_logfile}")
|
||||||
if [[ -n "${module_test_timeouts}" ]] ; then
|
if [[ -n "${module_test_timeouts}" ]] ; then
|
||||||
test_timeouts="${test_timeouts} ${module_test_timeouts}"
|
test_timeouts="${test_timeouts} ${module_test_timeouts}"
|
||||||
result=1
|
result=1
|
||||||
|
|
|
@ -348,10 +348,6 @@ Trunk (Unreleased)
|
||||||
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
|
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
|
||||||
to RPC Server and Client classes. (Brandon Li via suresh)
|
to RPC Server and Client classes. (Brandon Li via suresh)
|
||||||
|
|
||||||
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
|
|
||||||
required context item is not configured
|
|
||||||
(Brahma Reddy Battula via harsh)
|
|
||||||
|
|
||||||
HADOOP-8386. hadoop script doesn't work if 'cd' prints to stdout
|
HADOOP-8386. hadoop script doesn't work if 'cd' prints to stdout
|
||||||
(default behavior in some bash setups (esp. Ubuntu))
|
(default behavior in some bash setups (esp. Ubuntu))
|
||||||
(Chiristopher Berner and Andy Isaacson via harsh)
|
(Chiristopher Berner and Andy Isaacson via harsh)
|
||||||
|
@ -791,6 +787,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
HADOOP-12428. Fix inconsistency between log-level guards and statements.
|
HADOOP-12428. Fix inconsistency between log-level guards and statements.
|
||||||
(Jagadesh Kiran N and Jackie Chang via ozawa)
|
(Jagadesh Kiran N and Jackie Chang via ozawa)
|
||||||
|
|
||||||
|
HADOOP-12446. Undeprecate createNonRecursive() (Ted Yu via kihwal)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-11785. Reduce the number of listStatus operation in distcp
|
HADOOP-11785. Reduce the number of listStatus operation in distcp
|
||||||
|
@ -826,6 +824,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HADOOP-11878. FileContext#fixRelativePart should check for not null for a
|
HADOOP-11878. FileContext#fixRelativePart should check for not null for a
|
||||||
more informative exception. (Brahma Reddy Battula via kasha)
|
more informative exception. (Brahma Reddy Battula via kasha)
|
||||||
|
|
||||||
|
HADOOP-11984. Enable parallel JUnit tests in pre-commit.
|
||||||
|
(Chris Nauroth via vinayakumarb)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HADOOP-12374. Updated expunge command description.
|
HADOOP-12374. Updated expunge command description.
|
||||||
|
@ -1084,6 +1085,19 @@ Release 2.8.0 - UNRELEASED
|
||||||
HADOOP-12386. RetryPolicies.RETRY_FOREVER should be able to specify a
|
HADOOP-12386. RetryPolicies.RETRY_FOREVER should be able to specify a
|
||||||
retry interval. (Sunil G via wangda)
|
retry interval. (Sunil G via wangda)
|
||||||
|
|
||||||
|
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
|
||||||
|
required context item is not configured
|
||||||
|
(Brahma Reddy Battula via harsh)
|
||||||
|
|
||||||
|
HADOOP-12252. LocalDirAllocator should not throw NPE with empty string
|
||||||
|
configuration. (Zhihai Xu)
|
||||||
|
|
||||||
|
HADOOP-11918. Listing an empty s3a root directory throws FileNotFound.
|
||||||
|
(Lei (Eddy) Xu via cnauroth)
|
||||||
|
|
||||||
|
HADOOP-12440. TestRPC#testRPCServerShutdown did not produce the desired
|
||||||
|
thread states before shutting down. (Xiao Chen via mingma)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
|
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
|
||||||
|
@ -1144,6 +1158,15 @@ Release 2.8.0 - UNRELEASED
|
||||||
HADOOP-12417. TestWebDelegationToken failing with port in use.
|
HADOOP-12417. TestWebDelegationToken failing with port in use.
|
||||||
(Mingliang Liu via wheat9)
|
(Mingliang Liu via wheat9)
|
||||||
|
|
||||||
|
HADOOP-12438. Reset RawLocalFileSystem.useDeprecatedFileStatus in
|
||||||
|
TestLocalFileSystem. (Chris Nauroth via wheat9)
|
||||||
|
|
||||||
|
HADOOP-12437. Allow SecurityUtil to lookup alternate hostnames.
|
||||||
|
(Arpit Agarwal)
|
||||||
|
|
||||||
|
HADOOP-12442. Display help if the command option to 'hdfs dfs' is not valid
|
||||||
|
(nijel via vinayakumarb)
|
||||||
|
|
||||||
Release 2.7.2 - UNRELEASED
|
Release 2.7.2 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -1924,7 +1947,7 @@ Release 2.6.2 - UNRELEASED
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
Release 2.6.1 - 2015-09-09
|
Release 2.6.1 - 2015-09-23
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
||||||
|
|
|
@ -246,7 +246,7 @@
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.zookeeper</groupId>
|
<groupId>org.apache.zookeeper</groupId>
|
||||||
|
@ -878,12 +878,53 @@
|
||||||
<id>parallel-tests</id>
|
<id>parallel-tests</id>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<artifactId>maven-antrun-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>create-parallel-tests-dirs</id>
|
||||||
|
<phase>test-compile</phase>
|
||||||
|
<configuration>
|
||||||
|
<target>
|
||||||
|
<script language="javascript"><![CDATA[
|
||||||
|
var baseDirs = [
|
||||||
|
"${test.build.data}",
|
||||||
|
"${test.build.dir}",
|
||||||
|
"${hadoop.tmp.dir}" ];
|
||||||
|
for (var i in baseDirs) {
|
||||||
|
for (var j = 1; j <= ${testsThreadCount}; ++j) {
|
||||||
|
var mkdir = project.createTask("mkdir");
|
||||||
|
mkdir.setDir(new java.io.File(baseDirs[i], j));
|
||||||
|
mkdir.perform();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]]></script>
|
||||||
|
</target>
|
||||||
|
</configuration>
|
||||||
|
<goals>
|
||||||
|
<goal>run</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-surefire-plugin</artifactId>
|
<artifactId>maven-surefire-plugin</artifactId>
|
||||||
<configuration>
|
<configuration>
|
||||||
<forkCount>${testsThreadCount}</forkCount>
|
<forkCount>${testsThreadCount}</forkCount>
|
||||||
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
|
<reuseForks>false</reuseForks>
|
||||||
|
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
|
||||||
|
<systemPropertyVariables>
|
||||||
|
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
|
||||||
|
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
|
||||||
|
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
|
||||||
|
|
||||||
|
<!-- Due to a Maven quirk, setting this to just -->
|
||||||
|
<!-- surefire.forkNumber won't do the parameter substitution. -->
|
||||||
|
<!-- Putting a prefix in front of it like "fork-" makes it -->
|
||||||
|
<!-- work. -->
|
||||||
|
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
|
||||||
|
</systemPropertyVariables>
|
||||||
</configuration>
|
</configuration>
|
||||||
</plugin>
|
</plugin>
|
||||||
</plugins>
|
</plugins>
|
||||||
|
|
|
@ -310,4 +310,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
||||||
public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
|
public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
|
||||||
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
|
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
|
||||||
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
|
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
|
||||||
|
|
||||||
|
// HDFS client HTrace configuration.
|
||||||
|
public static final String FS_CLIENT_HTRACE_PREFIX = "fs.client.htrace.";
|
||||||
}
|
}
|
||||||
|
|
|
@ -294,6 +294,12 @@ public class CommonConfigurationKeysPublic {
|
||||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||||
public static final String HADOOP_SECURITY_AUTH_TO_LOCAL =
|
public static final String HADOOP_SECURITY_AUTH_TO_LOCAL =
|
||||||
"hadoop.security.auth_to_local";
|
"hadoop.security.auth_to_local";
|
||||||
|
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||||
|
public static final String HADOOP_SECURITY_DNS_INTERFACE_KEY =
|
||||||
|
"hadoop.security.dns.interface";
|
||||||
|
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||||
|
public static final String HADOOP_SECURITY_DNS_NAMESERVER_KEY =
|
||||||
|
"hadoop.security.dns.nameserver";
|
||||||
|
|
||||||
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
|
||||||
public static final String HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN =
|
public static final String HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN =
|
||||||
|
|
|
@ -21,8 +21,8 @@ package org.apache.hadoop.fs;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.NullScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
@ -43,6 +43,8 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
private byte checksum[];
|
private byte checksum[];
|
||||||
// The number of valid bytes in the buffer.
|
// The number of valid bytes in the buffer.
|
||||||
private int count;
|
private int count;
|
||||||
|
// The HTrace tracer to use
|
||||||
|
private Tracer tracer;
|
||||||
|
|
||||||
// We want this value to be a multiple of 3 because the native code checksums
|
// We want this value to be a multiple of 3 because the native code checksums
|
||||||
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
|
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
|
||||||
|
@ -201,7 +203,7 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected TraceScope createWriteTraceScope() {
|
protected TraceScope createWriteTraceScope() {
|
||||||
return NullScope.INSTANCE;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Generate checksums for the given data chunks and output chunks & checksums
|
/** Generate checksums for the given data chunks and output chunks & checksums
|
||||||
|
@ -219,9 +221,11 @@ abstract public class FSOutputSummer extends OutputStream {
|
||||||
getChecksumSize());
|
getChecksumSize());
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
if (scope != null) {
|
||||||
scope.close();
|
scope.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Converts a checksum integer value to a byte stream
|
* Converts a checksum integer value to a byte stream
|
||||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The FileContext class provides an interface for users of the Hadoop
|
* The FileContext class provides an interface for users of the Hadoop
|
||||||
|
@ -222,12 +223,14 @@ public class FileContext {
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
private final UserGroupInformation ugi;
|
private final UserGroupInformation ugi;
|
||||||
final boolean resolveSymlinks;
|
final boolean resolveSymlinks;
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
private FileContext(final AbstractFileSystem defFs,
|
private FileContext(final AbstractFileSystem defFs,
|
||||||
final FsPermission theUmask, final Configuration aConf) {
|
final FsPermission theUmask, final Configuration aConf) {
|
||||||
defaultFS = defFs;
|
defaultFS = defFs;
|
||||||
umask = FsPermission.getUMask(aConf);
|
umask = FsPermission.getUMask(aConf);
|
||||||
conf = aConf;
|
conf = aConf;
|
||||||
|
tracer = FsTracer.get(aConf);
|
||||||
try {
|
try {
|
||||||
ugi = UserGroupInformation.getCurrentUser();
|
ugi = UserGroupInformation.getCurrentUser();
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -2721,4 +2724,8 @@ public class FileContext {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
return defaultFS.getAllStoragePolicies();
|
return defaultFS.getAllStoragePolicies();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Tracer getTracer() {
|
||||||
|
return tracer;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,9 +67,8 @@ import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
import org.apache.hadoop.util.ShutdownHookManager;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
@ -129,6 +128,13 @@ public abstract class FileSystem extends Configured implements Closeable {
|
||||||
private Set<Path> deleteOnExit = new TreeSet<Path>();
|
private Set<Path> deleteOnExit = new TreeSet<Path>();
|
||||||
|
|
||||||
boolean resolveSymlinks;
|
boolean resolveSymlinks;
|
||||||
|
|
||||||
|
private Tracer tracer;
|
||||||
|
|
||||||
|
protected final Tracer getTracer() {
|
||||||
|
return tracer;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This method adds a file system for testing so that we can find it later. It
|
* This method adds a file system for testing so that we can find it later. It
|
||||||
* is only for testing.
|
* is only for testing.
|
||||||
|
@ -1083,9 +1089,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
||||||
* @param progress
|
* @param progress
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @see #setPermission(Path, FsPermission)
|
* @see #setPermission(Path, FsPermission)
|
||||||
* @deprecated API only for 0.20-append
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
|
||||||
public FSDataOutputStream createNonRecursive(Path f,
|
public FSDataOutputStream createNonRecursive(Path f,
|
||||||
boolean overwrite,
|
boolean overwrite,
|
||||||
int bufferSize, short replication, long blockSize,
|
int bufferSize, short replication, long blockSize,
|
||||||
|
@ -1108,9 +1112,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
||||||
* @param progress
|
* @param progress
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @see #setPermission(Path, FsPermission)
|
* @see #setPermission(Path, FsPermission)
|
||||||
* @deprecated API only for 0.20-append
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
|
||||||
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
||||||
boolean overwrite, int bufferSize, short replication, long blockSize,
|
boolean overwrite, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
|
@ -1133,9 +1135,7 @@ public abstract class FileSystem extends Configured implements Closeable {
|
||||||
* @param progress
|
* @param progress
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
* @see #setPermission(Path, FsPermission)
|
* @see #setPermission(Path, FsPermission)
|
||||||
* @deprecated API only for 0.20-append
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
|
||||||
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
||||||
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
|
@ -2706,14 +2706,13 @@ public abstract class FileSystem extends Configured implements Closeable {
|
||||||
|
|
||||||
private static FileSystem createFileSystem(URI uri, Configuration conf
|
private static FileSystem createFileSystem(URI uri, Configuration conf
|
||||||
) throws IOException {
|
) throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("FileSystem#createFileSystem");
|
Tracer tracer = FsTracer.get(conf);
|
||||||
Span span = scope.getSpan();
|
TraceScope scope = tracer.newScope("FileSystem#createFileSystem");
|
||||||
if (span != null) {
|
scope.addKVAnnotation("scheme", uri.getScheme());
|
||||||
span.addKVAnnotation("scheme", uri.getScheme());
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
|
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
|
||||||
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
|
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
|
||||||
|
fs.tracer = tracer;
|
||||||
fs.initialize(uri, conf);
|
fs.initialize(uri, conf);
|
||||||
return fs;
|
return fs;
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -203,7 +203,6 @@ public class FilterFileSystem extends FileSystem {
|
||||||
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Deprecated
|
|
||||||
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
||||||
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
|
|
|
@ -32,16 +32,13 @@ import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.fs.shell.Command;
|
import org.apache.hadoop.fs.shell.Command;
|
||||||
import org.apache.hadoop.fs.shell.CommandFactory;
|
import org.apache.hadoop.fs.shell.CommandFactory;
|
||||||
import org.apache.hadoop.fs.shell.FsCommand;
|
import org.apache.hadoop.fs.shell.FsCommand;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
|
||||||
import org.apache.hadoop.tools.TableListing;
|
import org.apache.hadoop.tools.TableListing;
|
||||||
import org.apache.hadoop.tracing.TraceUtils;
|
import org.apache.hadoop.tracing.TraceUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.SamplerBuilder;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
/** Provide command line access to a FileSystem. */
|
/** Provide command line access to a FileSystem. */
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -54,13 +51,12 @@ public class FsShell extends Configured implements Tool {
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
private Trash trash;
|
private Trash trash;
|
||||||
protected CommandFactory commandFactory;
|
protected CommandFactory commandFactory;
|
||||||
private Sampler traceSampler;
|
|
||||||
|
|
||||||
private final String usagePrefix =
|
private final String usagePrefix =
|
||||||
"Usage: hadoop fs [generic options]";
|
"Usage: hadoop fs [generic options]";
|
||||||
|
|
||||||
private SpanReceiverHost spanReceiverHost;
|
private Tracer tracer;
|
||||||
static final String SEHLL_HTRACE_PREFIX = "dfs.shell.htrace.";
|
static final String SHELL_HTRACE_PREFIX = "fs.shell.htrace.";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Default ctor with no configuration. Be sure to invoke
|
* Default ctor with no configuration. Be sure to invoke
|
||||||
|
@ -102,8 +98,9 @@ public class FsShell extends Configured implements Tool {
|
||||||
commandFactory.addObject(new Usage(), "-usage");
|
commandFactory.addObject(new Usage(), "-usage");
|
||||||
registerCommands(commandFactory);
|
registerCommands(commandFactory);
|
||||||
}
|
}
|
||||||
this.spanReceiverHost =
|
this.tracer = new Tracer.Builder("FsShell").
|
||||||
SpanReceiverHost.get(getConf(), SEHLL_HTRACE_PREFIX);
|
conf(TraceUtils.wrapHadoopConf(SHELL_HTRACE_PREFIX, getConf())).
|
||||||
|
build();
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void registerCommands(CommandFactory factory) {
|
protected void registerCommands(CommandFactory factory) {
|
||||||
|
@ -289,8 +286,6 @@ public class FsShell extends Configured implements Tool {
|
||||||
public int run(String argv[]) throws Exception {
|
public int run(String argv[]) throws Exception {
|
||||||
// initialize FsShell
|
// initialize FsShell
|
||||||
init();
|
init();
|
||||||
traceSampler = new SamplerBuilder(TraceUtils.
|
|
||||||
wrapHadoopConf(SEHLL_HTRACE_PREFIX, getConf())).build();
|
|
||||||
int exitCode = -1;
|
int exitCode = -1;
|
||||||
if (argv.length < 1) {
|
if (argv.length < 1) {
|
||||||
printUsage(System.err);
|
printUsage(System.err);
|
||||||
|
@ -302,7 +297,7 @@ public class FsShell extends Configured implements Tool {
|
||||||
if (instance == null) {
|
if (instance == null) {
|
||||||
throw new UnknownCommandException();
|
throw new UnknownCommandException();
|
||||||
}
|
}
|
||||||
TraceScope scope = Trace.startSpan(instance.getCommandName(), traceSampler);
|
TraceScope scope = tracer.newScope(instance.getCommandName());
|
||||||
if (scope.getSpan() != null) {
|
if (scope.getSpan() != null) {
|
||||||
String args = StringUtils.join(" ", argv);
|
String args = StringUtils.join(" ", argv);
|
||||||
if (args.length() > 2048) {
|
if (args.length() > 2048) {
|
||||||
|
@ -317,6 +312,7 @@ public class FsShell extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
} catch (IllegalArgumentException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
displayError(cmd, e.getLocalizedMessage());
|
displayError(cmd, e.getLocalizedMessage());
|
||||||
|
printUsage(System.err);
|
||||||
if (instance != null) {
|
if (instance != null) {
|
||||||
printInstanceUsage(System.err, instance);
|
printInstanceUsage(System.err, instance);
|
||||||
}
|
}
|
||||||
|
@ -327,6 +323,7 @@ public class FsShell extends Configured implements Tool {
|
||||||
e.printStackTrace(System.err);
|
e.printStackTrace(System.err);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
tracer.close();
|
||||||
return exitCode;
|
return exitCode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,9 +350,6 @@ public class FsShell extends Configured implements Tool {
|
||||||
fs.close();
|
fs.close();
|
||||||
fs = null;
|
fs = null;
|
||||||
}
|
}
|
||||||
if (this.spanReceiverHost != null) {
|
|
||||||
this.spanReceiverHost.closeReceivers();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -0,0 +1,64 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.tracing.TraceUtils;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Holds the HTrace Tracer used for FileSystem operations.
|
||||||
|
*
|
||||||
|
* Ideally, this would be owned by the DFSClient, rather than global. However,
|
||||||
|
* the FileContext API may create a new DFSClient for each operation in some
|
||||||
|
* cases. Because of this, we cannot store this Tracer inside DFSClient. See
|
||||||
|
* HADOOP-6356 for details.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
@InterfaceStability.Unstable
|
||||||
|
public final class FsTracer {
|
||||||
|
private static Tracer instance;
|
||||||
|
|
||||||
|
public static synchronized Tracer get(Configuration conf) {
|
||||||
|
if (instance == null) {
|
||||||
|
instance = new Tracer.Builder("FSClient").
|
||||||
|
conf(TraceUtils.wrapHadoopConf(CommonConfigurationKeys.
|
||||||
|
FS_CLIENT_HTRACE_PREFIX, conf)).
|
||||||
|
build();
|
||||||
|
}
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public static synchronized void clear() {
|
||||||
|
if (instance == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
instance.close();
|
||||||
|
} finally {
|
||||||
|
instance = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private FsTracer() {
|
||||||
|
}
|
||||||
|
}
|
|
@ -28,9 +28,8 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
|
@ -41,12 +40,14 @@ class Globber {
|
||||||
private final FileContext fc;
|
private final FileContext fc;
|
||||||
private final Path pathPattern;
|
private final Path pathPattern;
|
||||||
private final PathFilter filter;
|
private final PathFilter filter;
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
|
public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
|
||||||
this.fs = fs;
|
this.fs = fs;
|
||||||
this.fc = null;
|
this.fc = null;
|
||||||
this.pathPattern = pathPattern;
|
this.pathPattern = pathPattern;
|
||||||
this.filter = filter;
|
this.filter = filter;
|
||||||
|
this.tracer = fs.getTracer();
|
||||||
}
|
}
|
||||||
|
|
||||||
public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
|
public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
|
||||||
|
@ -54,6 +55,7 @@ class Globber {
|
||||||
this.fc = fc;
|
this.fc = fc;
|
||||||
this.pathPattern = pathPattern;
|
this.pathPattern = pathPattern;
|
||||||
this.filter = filter;
|
this.filter = filter;
|
||||||
|
this.tracer = fc.getTracer();
|
||||||
}
|
}
|
||||||
|
|
||||||
private FileStatus getFileStatus(Path path) throws IOException {
|
private FileStatus getFileStatus(Path path) throws IOException {
|
||||||
|
@ -140,11 +142,8 @@ class Globber {
|
||||||
}
|
}
|
||||||
|
|
||||||
public FileStatus[] glob() throws IOException {
|
public FileStatus[] glob() throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("Globber#glob");
|
TraceScope scope = tracer.newScope("Globber#glob");
|
||||||
Span span = scope.getSpan();
|
scope.addKVAnnotation("pattern", pathPattern.toUri().getPath());
|
||||||
if (span != null) {
|
|
||||||
span.addKVAnnotation("pattern", pathPattern.toUri().getPath());
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
return doGlob();
|
return doGlob();
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -713,7 +713,6 @@ public class HarFileSystem extends FileSystem {
|
||||||
throw new IOException("Har: create not allowed.");
|
throw new IOException("Har: create not allowed.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
@Override
|
@Override
|
||||||
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
|
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
|
||||||
int bufferSize, short replication, long blockSize, Progressable progress)
|
int bufferSize, short replication, long blockSize, Progressable progress)
|
||||||
|
|
|
@ -250,9 +250,9 @@ public class LocalDirAllocator {
|
||||||
private int dirNumLastAccessed;
|
private int dirNumLastAccessed;
|
||||||
private Random dirIndexRandomizer = new Random();
|
private Random dirIndexRandomizer = new Random();
|
||||||
private FileSystem localFS;
|
private FileSystem localFS;
|
||||||
private DF[] dirDF;
|
private DF[] dirDF = new DF[0];
|
||||||
private String contextCfgItemName;
|
private String contextCfgItemName;
|
||||||
private String[] localDirs;
|
private String[] localDirs = new String[0];
|
||||||
private String savedLocalDirs = "";
|
private String savedLocalDirs = "";
|
||||||
|
|
||||||
public AllocatorPerContext(String contextCfgItemName) {
|
public AllocatorPerContext(String contextCfgItemName) {
|
||||||
|
|
|
@ -319,7 +319,6 @@ public class RawLocalFileSystem extends FileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Deprecated
|
|
||||||
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
||||||
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
|
|
|
@ -238,7 +238,13 @@ abstract class CommandWithDestination extends FsCommand {
|
||||||
e.setTargetPath(dstPath.toString());
|
e.setTargetPath(dstPath.toString());
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
if (dstPath.startsWith(srcPath+Path.SEPARATOR)) {
|
// When a path is normalized, all trailing slashes are removed
|
||||||
|
// except for the root
|
||||||
|
if(!srcPath.endsWith(Path.SEPARATOR)) {
|
||||||
|
srcPath += Path.SEPARATOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(dstPath.startsWith(srcPath)) {
|
||||||
PathIOException e = new PathIOException(src.toString(),
|
PathIOException e = new PathIOException(src.toString(),
|
||||||
"is a subdirectory of itself");
|
"is a subdirectory of itself");
|
||||||
e.setTargetPath(target.toString());
|
e.setTargetPath(target.toString());
|
||||||
|
|
|
@ -185,7 +185,6 @@ class ChRootedFileSystem extends FilterFileSystem {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@Deprecated
|
|
||||||
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
|
||||||
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
|
||||||
Progressable progress) throws IOException {
|
Progressable progress) throws IOException {
|
||||||
|
|
|
@ -92,7 +92,8 @@ import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Span;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -722,8 +723,9 @@ public class Client {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Connecting to "+server);
|
LOG.debug("Connecting to "+server);
|
||||||
}
|
}
|
||||||
if (Trace.isTracing()) {
|
Span span = Tracer.getCurrentSpan();
|
||||||
Trace.addTimelineAnnotation("IPC client connecting to " + server);
|
if (span != null) {
|
||||||
|
span.addTimelineAnnotation("IPC client connecting to " + server);
|
||||||
}
|
}
|
||||||
short numRetries = 0;
|
short numRetries = 0;
|
||||||
Random rand = null;
|
Random rand = null;
|
||||||
|
@ -796,8 +798,9 @@ public class Client {
|
||||||
// update last activity time
|
// update last activity time
|
||||||
touch();
|
touch();
|
||||||
|
|
||||||
if (Trace.isTracing()) {
|
span = Tracer.getCurrentSpan();
|
||||||
Trace.addTimelineAnnotation("IPC client connected to " + server);
|
if (span != null) {
|
||||||
|
span.addTimelineAnnotation("IPC client connected to " + server);
|
||||||
}
|
}
|
||||||
|
|
||||||
// start the receiver thread after the socket connection has been set
|
// start the receiver thread after the socket connection has been set
|
||||||
|
|
|
@ -49,8 +49,8 @@ import org.apache.hadoop.security.token.SecretManager;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.util.ProtoUtil;
|
import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.protobuf.BlockingService;
|
import com.google.protobuf.BlockingService;
|
||||||
|
@ -206,12 +206,13 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
+ method.getName() + "]");
|
+ method.getName() + "]");
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceScope traceScope = null;
|
|
||||||
// if Tracing is on then start a new span for this rpc.
|
// if Tracing is on then start a new span for this rpc.
|
||||||
// guard it in the if statement to make sure there isn't
|
// guard it in the if statement to make sure there isn't
|
||||||
// any extra string manipulation.
|
// any extra string manipulation.
|
||||||
if (Trace.isTracing()) {
|
Tracer tracer = Tracer.curThreadTracer();
|
||||||
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
|
TraceScope traceScope = null;
|
||||||
|
if (tracer != null) {
|
||||||
|
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
|
||||||
}
|
}
|
||||||
|
|
||||||
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
||||||
|
@ -236,9 +237,9 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
remoteId + ": " + method.getName() +
|
remoteId + ": " + method.getName() +
|
||||||
" {" + e + "}");
|
" {" + e + "}");
|
||||||
}
|
}
|
||||||
if (Trace.isTracing()) {
|
if (traceScope != null) {
|
||||||
traceScope.getSpan().addTimelineAnnotation(
|
traceScope.addTimelineAnnotation("Call got exception: " +
|
||||||
"Call got exception: " + e.toString());
|
e.toString());
|
||||||
}
|
}
|
||||||
throw new ServiceException(e);
|
throw new ServiceException(e);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -117,10 +117,9 @@ import org.apache.hadoop.util.ProtoUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceInfo;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
@ -141,6 +140,7 @@ public abstract class Server {
|
||||||
private List<AuthMethod> enabledAuthMethods;
|
private List<AuthMethod> enabledAuthMethods;
|
||||||
private RpcSaslProto negotiateResponse;
|
private RpcSaslProto negotiateResponse;
|
||||||
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
|
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
|
||||||
|
private Tracer tracer;
|
||||||
|
|
||||||
public void addTerseExceptions(Class<?>... exceptionClass) {
|
public void addTerseExceptions(Class<?>... exceptionClass) {
|
||||||
exceptionsHandler.addTerseExceptions(exceptionClass);
|
exceptionsHandler.addTerseExceptions(exceptionClass);
|
||||||
|
@ -581,7 +581,7 @@ public abstract class Server {
|
||||||
private ByteBuffer rpcResponse; // the response for this call
|
private ByteBuffer rpcResponse; // the response for this call
|
||||||
private final RPC.RpcKind rpcKind;
|
private final RPC.RpcKind rpcKind;
|
||||||
private final byte[] clientId;
|
private final byte[] clientId;
|
||||||
private final Span traceSpan; // the tracing span on the server side
|
private final TraceScope traceScope; // the HTrace scope on the server side
|
||||||
|
|
||||||
public Call(int id, int retryCount, Writable param,
|
public Call(int id, int retryCount, Writable param,
|
||||||
Connection connection) {
|
Connection connection) {
|
||||||
|
@ -595,7 +595,7 @@ public abstract class Server {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Call(int id, int retryCount, Writable param, Connection connection,
|
public Call(int id, int retryCount, Writable param, Connection connection,
|
||||||
RPC.RpcKind kind, byte[] clientId, Span span) {
|
RPC.RpcKind kind, byte[] clientId, TraceScope traceScope) {
|
||||||
this.callId = id;
|
this.callId = id;
|
||||||
this.retryCount = retryCount;
|
this.retryCount = retryCount;
|
||||||
this.rpcRequest = param;
|
this.rpcRequest = param;
|
||||||
|
@ -604,7 +604,7 @@ public abstract class Server {
|
||||||
this.rpcResponse = null;
|
this.rpcResponse = null;
|
||||||
this.rpcKind = kind;
|
this.rpcKind = kind;
|
||||||
this.clientId = clientId;
|
this.clientId = clientId;
|
||||||
this.traceSpan = span;
|
this.traceScope = traceScope;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -2014,19 +2014,24 @@ public abstract class Server {
|
||||||
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
|
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
Span traceSpan = null;
|
TraceScope traceScope = null;
|
||||||
if (header.hasTraceInfo()) {
|
if (header.hasTraceInfo()) {
|
||||||
// If the incoming RPC included tracing info, always continue the trace
|
if (tracer != null) {
|
||||||
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
|
// If the incoming RPC included tracing info, always continue the
|
||||||
|
// trace
|
||||||
|
SpanId parentSpanId = new SpanId(
|
||||||
|
header.getTraceInfo().getTraceId(),
|
||||||
header.getTraceInfo().getParentId());
|
header.getTraceInfo().getParentId());
|
||||||
traceSpan = Trace.startSpan(
|
traceScope = tracer.newScope(
|
||||||
RpcClientUtil.toTraceName(rpcRequest.toString()),
|
RpcClientUtil.toTraceName(rpcRequest.toString()),
|
||||||
parentSpan).detach();
|
parentSpanId);
|
||||||
|
traceScope.detach();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
||||||
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
|
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
|
||||||
header.getClientId().toByteArray(), traceSpan);
|
header.getClientId().toByteArray(), traceScope);
|
||||||
|
|
||||||
if (callQueue.isClientBackoffEnabled()) {
|
if (callQueue.isClientBackoffEnabled()) {
|
||||||
// if RPC queue is full, we will ask the RPC client to back off by
|
// if RPC queue is full, we will ask the RPC client to back off by
|
||||||
|
@ -2209,8 +2214,9 @@ public abstract class Server {
|
||||||
Writable value = null;
|
Writable value = null;
|
||||||
|
|
||||||
CurCall.set(call);
|
CurCall.set(call);
|
||||||
if (call.traceSpan != null) {
|
if (call.traceScope != null) {
|
||||||
traceScope = Trace.continueSpan(call.traceSpan);
|
call.traceScope.reattach();
|
||||||
|
traceScope = call.traceScope;
|
||||||
traceScope.getSpan().addTimelineAnnotation("called");
|
traceScope.getSpan().addTimelineAnnotation("called");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2287,21 +2293,18 @@ public abstract class Server {
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
if (running) { // unexpected -- log it
|
if (running) { // unexpected -- log it
|
||||||
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
|
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
|
||||||
if (Trace.isTracing()) {
|
if (traceScope != null) {
|
||||||
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
|
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
|
||||||
StringUtils.stringifyException(e));
|
StringUtils.stringifyException(e));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
|
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
|
||||||
if (Trace.isTracing()) {
|
if (traceScope != null) {
|
||||||
traceScope.getSpan().addTimelineAnnotation("Exception: " +
|
traceScope.getSpan().addTimelineAnnotation("Exception: " +
|
||||||
StringUtils.stringifyException(e));
|
StringUtils.stringifyException(e));
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (traceScope != null) {
|
|
||||||
traceScope.close();
|
|
||||||
}
|
|
||||||
IOUtils.cleanup(LOG, traceScope);
|
IOUtils.cleanup(LOG, traceScope);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2615,6 +2618,10 @@ public abstract class Server {
|
||||||
/** Sets the socket buffer size used for responding to RPCs */
|
/** Sets the socket buffer size used for responding to RPCs */
|
||||||
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
|
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
|
||||||
|
|
||||||
|
public void setTracer(Tracer t) {
|
||||||
|
this.tracer = t;
|
||||||
|
}
|
||||||
|
|
||||||
/** Starts the service. Must be called before any calls will be handled. */
|
/** Starts the service. Must be called before any calls will be handled. */
|
||||||
public synchronized void start() {
|
public synchronized void start() {
|
||||||
responder.start();
|
responder.start();
|
||||||
|
|
|
@ -42,8 +42,8 @@ import org.apache.hadoop.util.Time;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.*;
|
import org.apache.hadoop.conf.*;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
/** An RpcEngine implementation for Writable data. */
|
/** An RpcEngine implementation for Writable data. */
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
|
@ -233,9 +233,14 @@ public class WritableRpcEngine implements RpcEngine {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
startTime = Time.now();
|
startTime = Time.now();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// if Tracing is on then start a new span for this rpc.
|
||||||
|
// guard it in the if statement to make sure there isn't
|
||||||
|
// any extra string manipulation.
|
||||||
|
Tracer tracer = Tracer.curThreadTracer();
|
||||||
TraceScope traceScope = null;
|
TraceScope traceScope = null;
|
||||||
if (Trace.isTracing()) {
|
if (tracer != null) {
|
||||||
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
|
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
|
||||||
}
|
}
|
||||||
ObjectWritable value;
|
ObjectWritable value;
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
|
|
||||||
package org.apache.hadoop.net;
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import com.google.common.net.InetAddresses;
|
||||||
|
import com.sun.istack.Nullable;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
@ -27,9 +29,11 @@ import java.net.InetAddress;
|
||||||
import java.net.NetworkInterface;
|
import java.net.NetworkInterface;
|
||||||
import java.net.SocketException;
|
import java.net.SocketException;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Enumeration;
|
import java.util.Enumeration;
|
||||||
import java.util.LinkedHashSet;
|
import java.util.LinkedHashSet;
|
||||||
|
import java.util.List;
|
||||||
import java.util.Vector;
|
import java.util.Vector;
|
||||||
|
|
||||||
import javax.naming.NamingException;
|
import javax.naming.NamingException;
|
||||||
|
@ -68,7 +72,7 @@ public class DNS {
|
||||||
* @return The host name associated with the provided IP
|
* @return The host name associated with the provided IP
|
||||||
* @throws NamingException If a NamingException is encountered
|
* @throws NamingException If a NamingException is encountered
|
||||||
*/
|
*/
|
||||||
public static String reverseDns(InetAddress hostIp, String ns)
|
public static String reverseDns(InetAddress hostIp, @Nullable String ns)
|
||||||
throws NamingException {
|
throws NamingException {
|
||||||
//
|
//
|
||||||
// Builds the reverse IP lookup form
|
// Builds the reverse IP lookup form
|
||||||
|
@ -228,29 +232,45 @@ public class DNS {
|
||||||
* (e.g. eth0 or eth0:0)
|
* (e.g. eth0 or eth0:0)
|
||||||
* @param nameserver
|
* @param nameserver
|
||||||
* The DNS host name
|
* The DNS host name
|
||||||
|
* @param tryfallbackResolution
|
||||||
|
* if true and if reverse DNS resolution fails then attempt to
|
||||||
|
* resolve the hostname with
|
||||||
|
* {@link InetAddress#getCanonicalHostName()} which includes
|
||||||
|
* hosts file resolution.
|
||||||
* @return A string vector of all host names associated with the IPs tied to
|
* @return A string vector of all host names associated with the IPs tied to
|
||||||
* the specified interface
|
* the specified interface
|
||||||
* @throws UnknownHostException if the given interface is invalid
|
* @throws UnknownHostException if the given interface is invalid
|
||||||
*/
|
*/
|
||||||
public static String[] getHosts(String strInterface, String nameserver)
|
public static String[] getHosts(String strInterface,
|
||||||
|
@Nullable String nameserver,
|
||||||
|
boolean tryfallbackResolution)
|
||||||
throws UnknownHostException {
|
throws UnknownHostException {
|
||||||
String[] ips = getIPs(strInterface);
|
final List<String> hosts = new Vector<String>();
|
||||||
Vector<String> hosts = new Vector<String>();
|
final List<InetAddress> addresses =
|
||||||
for (int ctr = 0; ctr < ips.length; ctr++) {
|
getIPsAsInetAddressList(strInterface, true);
|
||||||
|
for (InetAddress address : addresses) {
|
||||||
try {
|
try {
|
||||||
hosts.add(reverseDns(InetAddress.getByName(ips[ctr]),
|
hosts.add(reverseDns(address, nameserver));
|
||||||
nameserver));
|
|
||||||
} catch (UnknownHostException ignored) {
|
|
||||||
} catch (NamingException ignored) {
|
} catch (NamingException ignored) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (hosts.isEmpty()) {
|
if (hosts.isEmpty() && tryfallbackResolution) {
|
||||||
LOG.warn("Unable to determine hostname for interface " + strInterface);
|
for (InetAddress address : addresses) {
|
||||||
return new String[] { cachedHostname };
|
final String canonicalHostName = address.getCanonicalHostName();
|
||||||
} else {
|
// Don't use the result if it looks like an IP address.
|
||||||
return hosts.toArray(new String[hosts.size()]);
|
if (!InetAddresses.isInetAddress(canonicalHostName)) {
|
||||||
|
hosts.add(canonicalHostName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hosts.isEmpty()) {
|
||||||
|
LOG.warn("Unable to determine hostname for interface " +
|
||||||
|
strInterface);
|
||||||
|
hosts.add(cachedHostname);
|
||||||
|
}
|
||||||
|
return hosts.toArray(new String[hosts.size()]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -315,7 +335,7 @@ public class DNS {
|
||||||
*/
|
*/
|
||||||
public static String[] getHosts(String strInterface)
|
public static String[] getHosts(String strInterface)
|
||||||
throws UnknownHostException {
|
throws UnknownHostException {
|
||||||
return getHosts(strInterface, null);
|
return getHosts(strInterface, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -331,17 +351,19 @@ public class DNS {
|
||||||
* @throws UnknownHostException
|
* @throws UnknownHostException
|
||||||
* If one is encountered while querying the default interface
|
* If one is encountered while querying the default interface
|
||||||
*/
|
*/
|
||||||
public static String getDefaultHost(String strInterface, String nameserver)
|
public static String getDefaultHost(@Nullable String strInterface,
|
||||||
|
@Nullable String nameserver,
|
||||||
|
boolean tryfallbackResolution)
|
||||||
throws UnknownHostException {
|
throws UnknownHostException {
|
||||||
if ("default".equals(strInterface)) {
|
if (strInterface == null || "default".equals(strInterface)) {
|
||||||
return cachedHostname;
|
return cachedHostname;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ("default".equals(nameserver)) {
|
if (nameserver != null && "default".equals(nameserver)) {
|
||||||
return getDefaultHost(strInterface);
|
nameserver = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
String[] hosts = getHosts(strInterface, nameserver);
|
String[] hosts = getHosts(strInterface, nameserver, tryfallbackResolution);
|
||||||
return hosts[0];
|
return hosts[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -357,9 +379,74 @@ public class DNS {
|
||||||
* @throws UnknownHostException
|
* @throws UnknownHostException
|
||||||
* If one is encountered while querying the default interface
|
* If one is encountered while querying the default interface
|
||||||
*/
|
*/
|
||||||
public static String getDefaultHost(String strInterface)
|
public static String getDefaultHost(@Nullable String strInterface)
|
||||||
throws UnknownHostException {
|
throws UnknownHostException {
|
||||||
return getDefaultHost(strInterface, null);
|
return getDefaultHost(strInterface, null, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the default (first) host name associated by the provided
|
||||||
|
* nameserver with the address bound to the specified network interface.
|
||||||
|
*
|
||||||
|
* @param strInterface
|
||||||
|
* The name of the network interface to query (e.g. eth0)
|
||||||
|
* @param nameserver
|
||||||
|
* The DNS host name
|
||||||
|
* @throws UnknownHostException
|
||||||
|
* If one is encountered while querying the default interface
|
||||||
|
*/
|
||||||
|
public static String getDefaultHost(@Nullable String strInterface,
|
||||||
|
@Nullable String nameserver)
|
||||||
|
throws UnknownHostException {
|
||||||
|
return getDefaultHost(strInterface, nameserver, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns all the IPs associated with the provided interface, if any, as
|
||||||
|
* a list of InetAddress objects.
|
||||||
|
*
|
||||||
|
* @param strInterface
|
||||||
|
* The name of the network interface or sub-interface to query
|
||||||
|
* (eg eth0 or eth0:0) or the string "default"
|
||||||
|
* @param returnSubinterfaces
|
||||||
|
* Whether to return IPs associated with subinterfaces of
|
||||||
|
* the given interface
|
||||||
|
* @return A list of all the IPs associated with the provided
|
||||||
|
* interface. The local host IP is returned if the interface
|
||||||
|
* name "default" is specified or there is an I/O error looking
|
||||||
|
* for the given interface.
|
||||||
|
* @throws UnknownHostException
|
||||||
|
* If the given interface is invalid
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public static List<InetAddress> getIPsAsInetAddressList(String strInterface,
|
||||||
|
boolean returnSubinterfaces) throws UnknownHostException {
|
||||||
|
if ("default".equals(strInterface)) {
|
||||||
|
return Arrays.asList(InetAddress.getByName(cachedHostAddress));
|
||||||
|
}
|
||||||
|
NetworkInterface netIf;
|
||||||
|
try {
|
||||||
|
netIf = NetworkInterface.getByName(strInterface);
|
||||||
|
if (netIf == null) {
|
||||||
|
netIf = getSubinterface(strInterface);
|
||||||
|
}
|
||||||
|
} catch (SocketException e) {
|
||||||
|
LOG.warn("I/O error finding interface " + strInterface +
|
||||||
|
": " + e.getMessage());
|
||||||
|
return Arrays.asList(InetAddress.getByName(cachedHostAddress));
|
||||||
|
}
|
||||||
|
if (netIf == null) {
|
||||||
|
throw new UnknownHostException("No such interface " + strInterface);
|
||||||
|
}
|
||||||
|
|
||||||
|
// NB: Using a LinkedHashSet to preserve the order for callers
|
||||||
|
// that depend on a particular element being 1st in the array.
|
||||||
|
// For example, getDefaultIP always returns the first element.
|
||||||
|
LinkedHashSet<InetAddress> allAddrs = new LinkedHashSet<InetAddress>();
|
||||||
|
allAddrs.addAll(Collections.list(netIf.getInetAddresses()));
|
||||||
|
if (!returnSubinterfaces) {
|
||||||
|
allAddrs.removeAll(getSubinterfaceInetAddrs(netIf));
|
||||||
|
}
|
||||||
|
return new Vector<InetAddress>(allAddrs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
package org.apache.hadoop.security;
|
package org.apache.hadoop.security;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_INTERFACE_KEY;
|
||||||
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_NAMESERVER_KEY;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
@ -29,6 +31,7 @@ import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.ServiceLoader;
|
import java.util.ServiceLoader;
|
||||||
|
|
||||||
|
import javax.annotation.Nullable;
|
||||||
import javax.security.auth.kerberos.KerberosPrincipal;
|
import javax.security.auth.kerberos.KerberosPrincipal;
|
||||||
import javax.security.auth.kerberos.KerberosTicket;
|
import javax.security.auth.kerberos.KerberosTicket;
|
||||||
|
|
||||||
|
@ -39,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.net.DNS;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -180,13 +184,38 @@ public class SecurityUtil {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String fqdn = hostname;
|
String fqdn = hostname;
|
||||||
if (fqdn == null || fqdn.isEmpty() || fqdn.equals("0.0.0.0")) {
|
if (fqdn == null || fqdn.isEmpty() || fqdn.equals("0.0.0.0")) {
|
||||||
fqdn = getLocalHostName();
|
fqdn = getLocalHostName(null);
|
||||||
}
|
}
|
||||||
return components[0] + "/" +
|
return components[0] + "/" +
|
||||||
StringUtils.toLowerCase(fqdn) + "@" + components[2];
|
StringUtils.toLowerCase(fqdn) + "@" + components[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
static String getLocalHostName() throws UnknownHostException {
|
/**
|
||||||
|
* Retrieve the name of the current host. Multihomed hosts may restrict the
|
||||||
|
* hostname lookup to a specific interface and nameserver with {@link
|
||||||
|
* org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_INTERFACE_KEY}
|
||||||
|
* and {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_NAMESERVER_KEY}
|
||||||
|
*
|
||||||
|
* @param conf Configuration object. May be null.
|
||||||
|
* @return
|
||||||
|
* @throws UnknownHostException
|
||||||
|
*/
|
||||||
|
static String getLocalHostName(@Nullable Configuration conf)
|
||||||
|
throws UnknownHostException {
|
||||||
|
if (conf != null) {
|
||||||
|
String dnsInterface = conf.get(HADOOP_SECURITY_DNS_INTERFACE_KEY);
|
||||||
|
String nameServer = conf.get(HADOOP_SECURITY_DNS_NAMESERVER_KEY);
|
||||||
|
|
||||||
|
if (dnsInterface != null) {
|
||||||
|
return DNS.getDefaultHost(dnsInterface, nameServer, true);
|
||||||
|
} else if (nameServer != null) {
|
||||||
|
throw new IllegalArgumentException(HADOOP_SECURITY_DNS_NAMESERVER_KEY +
|
||||||
|
" requires " + HADOOP_SECURITY_DNS_INTERFACE_KEY + ". Check your" +
|
||||||
|
"configuration.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback to querying the default hostname as we did before.
|
||||||
return InetAddress.getLocalHost().getCanonicalHostName();
|
return InetAddress.getLocalHost().getCanonicalHostName();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,7 +236,7 @@ public class SecurityUtil {
|
||||||
@InterfaceStability.Evolving
|
@InterfaceStability.Evolving
|
||||||
public static void login(final Configuration conf,
|
public static void login(final Configuration conf,
|
||||||
final String keytabFileKey, final String userNameKey) throws IOException {
|
final String keytabFileKey, final String userNameKey) throws IOException {
|
||||||
login(conf, keytabFileKey, userNameKey, getLocalHostName());
|
login(conf, keytabFileKey, userNameKey, getLocalHostName(conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,208 +0,0 @@
|
||||||
/**
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
package org.apache.hadoop.tracing;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.TreeMap;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
|
||||||
import org.apache.hadoop.util.ShutdownHookManager;
|
|
||||||
import org.apache.htrace.SpanReceiver;
|
|
||||||
import org.apache.htrace.SpanReceiverBuilder;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.impl.LocalFileSpanReceiver;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This class provides functions for reading the names of SpanReceivers from
|
|
||||||
* the Hadoop configuration, adding those SpanReceivers to the Tracer,
|
|
||||||
* and closing those SpanReceivers when appropriate.
|
|
||||||
* This class does nothing If no SpanReceiver is configured.
|
|
||||||
*/
|
|
||||||
@InterfaceAudience.Private
|
|
||||||
public class SpanReceiverHost implements TraceAdminProtocol {
|
|
||||||
public static final String SPAN_RECEIVERS_CONF_SUFFIX =
|
|
||||||
"spanreceiver.classes";
|
|
||||||
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
|
|
||||||
private static final HashMap<String, SpanReceiverHost> hosts =
|
|
||||||
new HashMap<String, SpanReceiverHost>(1);
|
|
||||||
private final TreeMap<Long, SpanReceiver> receivers =
|
|
||||||
new TreeMap<Long, SpanReceiver>();
|
|
||||||
private final String confPrefix;
|
|
||||||
private Configuration config;
|
|
||||||
private boolean closed = false;
|
|
||||||
private long highestId = 1;
|
|
||||||
|
|
||||||
private final static String LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX =
|
|
||||||
"local-file-span-receiver.path";
|
|
||||||
|
|
||||||
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
|
|
||||||
synchronized (SpanReceiverHost.class) {
|
|
||||||
SpanReceiverHost host = hosts.get(confPrefix);
|
|
||||||
if (host != null) {
|
|
||||||
return host;
|
|
||||||
}
|
|
||||||
final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
|
|
||||||
newHost.loadSpanReceivers(conf);
|
|
||||||
ShutdownHookManager.get().addShutdownHook(new Runnable() {
|
|
||||||
public void run() {
|
|
||||||
newHost.closeReceivers();
|
|
||||||
}
|
|
||||||
}, 0);
|
|
||||||
hosts.put(confPrefix, newHost);
|
|
||||||
return newHost;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
|
||||||
|
|
||||||
private SpanReceiverHost(String confPrefix) {
|
|
||||||
this.confPrefix = confPrefix;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Reads the names of classes specified in the
|
|
||||||
* "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
|
|
||||||
* them with the Tracer as SpanReceiver's.
|
|
||||||
*
|
|
||||||
* The nullary constructor is called during construction, but if the classes
|
|
||||||
* specified implement the Configurable interface, setConfiguration() will be
|
|
||||||
* called on them. This allows SpanReceivers to use values from the Hadoop
|
|
||||||
* configuration.
|
|
||||||
*/
|
|
||||||
public synchronized void loadSpanReceivers(Configuration conf) {
|
|
||||||
config = new Configuration(conf);
|
|
||||||
String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
|
|
||||||
String[] receiverNames = config.getTrimmedStrings(receiverKey);
|
|
||||||
if (receiverNames == null || receiverNames.length == 0) {
|
|
||||||
if (LOG.isTraceEnabled()) {
|
|
||||||
LOG.trace("No span receiver names found in " + receiverKey + ".");
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// It's convenient to have each daemon log to a random trace file when
|
|
||||||
// testing.
|
|
||||||
String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
|
|
||||||
if (config.get(pathKey) == null) {
|
|
||||||
String uniqueFile = LocalFileSpanReceiver.getUniqueLocalTraceFileName();
|
|
||||||
config.set(pathKey, uniqueFile);
|
|
||||||
if (LOG.isTraceEnabled()) {
|
|
||||||
LOG.trace("Set " + pathKey + " to " + uniqueFile);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (String className : receiverNames) {
|
|
||||||
try {
|
|
||||||
SpanReceiver rcvr = loadInstance(className, EMPTY);
|
|
||||||
Trace.addReceiver(rcvr);
|
|
||||||
receivers.put(highestId++, rcvr);
|
|
||||||
LOG.info("Loaded SpanReceiver " + className + " successfully.");
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.error("Failed to load SpanReceiver", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private synchronized SpanReceiver loadInstance(String className,
|
|
||||||
List<ConfigurationPair> extraConfig) throws IOException {
|
|
||||||
SpanReceiverBuilder builder =
|
|
||||||
new SpanReceiverBuilder(TraceUtils.
|
|
||||||
wrapHadoopConf(confPrefix, config, extraConfig));
|
|
||||||
SpanReceiver rcvr = builder.spanReceiverClass(className.trim()).build();
|
|
||||||
if (rcvr == null) {
|
|
||||||
throw new IOException("Failed to load SpanReceiver " + className);
|
|
||||||
}
|
|
||||||
return rcvr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
|
|
||||||
*/
|
|
||||||
public synchronized void closeReceivers() {
|
|
||||||
if (closed) return;
|
|
||||||
closed = true;
|
|
||||||
for (SpanReceiver rcvr : receivers.values()) {
|
|
||||||
try {
|
|
||||||
rcvr.close();
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
receivers.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized SpanReceiverInfo[] listSpanReceivers()
|
|
||||||
throws IOException {
|
|
||||||
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.size()];
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
for(Map.Entry<Long, SpanReceiver> entry : receivers.entrySet()) {
|
|
||||||
info[i] = new SpanReceiverInfo(entry.getKey(),
|
|
||||||
entry.getValue().getClass().getName());
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
return info;
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized long addSpanReceiver(SpanReceiverInfo info)
|
|
||||||
throws IOException {
|
|
||||||
StringBuilder configStringBuilder = new StringBuilder();
|
|
||||||
String prefix = "";
|
|
||||||
for (ConfigurationPair pair : info.configPairs) {
|
|
||||||
configStringBuilder.append(prefix).append(pair.getKey()).
|
|
||||||
append(" = ").append(pair.getValue());
|
|
||||||
prefix = ", ";
|
|
||||||
}
|
|
||||||
SpanReceiver rcvr = null;
|
|
||||||
try {
|
|
||||||
rcvr = loadInstance(info.getClassName(), info.configPairs);
|
|
||||||
} catch (IOException e) {
|
|
||||||
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
|
|
||||||
" with configuration " + configStringBuilder.toString(), e);
|
|
||||||
throw e;
|
|
||||||
} catch (RuntimeException e) {
|
|
||||||
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
|
|
||||||
" with configuration " + configStringBuilder.toString(), e);
|
|
||||||
throw e;
|
|
||||||
}
|
|
||||||
Trace.addReceiver(rcvr);
|
|
||||||
long newId = highestId++;
|
|
||||||
receivers.put(newId, rcvr);
|
|
||||||
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
|
|
||||||
" with configuration " + configStringBuilder.toString());
|
|
||||||
return newId;
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized void removeSpanReceiver(long spanReceiverId)
|
|
||||||
throws IOException {
|
|
||||||
SpanReceiver rcvr = receivers.remove(spanReceiverId);
|
|
||||||
if (rcvr == null) {
|
|
||||||
throw new IOException("There is no span receiver with id " + spanReceiverId);
|
|
||||||
}
|
|
||||||
Trace.removeReceiver(rcvr);
|
|
||||||
rcvr.close();
|
|
||||||
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
|
|
||||||
" with class " + rcvr.getClass().getName());
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -24,7 +24,7 @@ import java.util.List;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||||
import org.apache.htrace.HTraceConfiguration;
|
import org.apache.htrace.core.HTraceConfiguration;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class provides utility functions for tracing.
|
* This class provides utility functions for tracing.
|
||||||
|
@ -32,6 +32,7 @@ import org.apache.htrace.HTraceConfiguration;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class TraceUtils {
|
public class TraceUtils {
|
||||||
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
||||||
|
static final String DEFAULT_HADOOP_PREFIX = "hadoop.htrace.";
|
||||||
|
|
||||||
public static HTraceConfiguration wrapHadoopConf(final String prefix,
|
public static HTraceConfiguration wrapHadoopConf(final String prefix,
|
||||||
final Configuration conf) {
|
final Configuration conf) {
|
||||||
|
@ -47,16 +48,27 @@ public class TraceUtils {
|
||||||
return new HTraceConfiguration() {
|
return new HTraceConfiguration() {
|
||||||
@Override
|
@Override
|
||||||
public String get(String key) {
|
public String get(String key) {
|
||||||
return get(key, "");
|
String ret = getInternal(prefix + key);
|
||||||
|
if (ret != null) {
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
return getInternal(DEFAULT_HADOOP_PREFIX + key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String get(String key, String defaultValue) {
|
public String get(String key, String defaultValue) {
|
||||||
String prefixedKey = prefix + key;
|
String ret = get(key);
|
||||||
if (extraMap.containsKey(prefixedKey)) {
|
if (ret != null) {
|
||||||
return extraMap.get(prefixedKey);
|
return ret;
|
||||||
}
|
}
|
||||||
return conf.get(prefixedKey, defaultValue);
|
return defaultValue;
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getInternal(String key) {
|
||||||
|
if (extraMap.containsKey(key)) {
|
||||||
|
return extraMap.get(key);
|
||||||
|
}
|
||||||
|
return conf.get(key);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,100 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.tracing;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||||
|
import org.apache.htrace.core.SpanReceiver;
|
||||||
|
import org.apache.htrace.core.TracerPool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class provides functions for managing the tracer configuration at
|
||||||
|
* runtime via an RPC protocol.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class TracerConfigurationManager implements TraceAdminProtocol {
|
||||||
|
private static final Log LOG =
|
||||||
|
LogFactory.getLog(TracerConfigurationManager.class);
|
||||||
|
|
||||||
|
private final String confPrefix;
|
||||||
|
private final Configuration conf;
|
||||||
|
|
||||||
|
public TracerConfigurationManager(String confPrefix, Configuration conf) {
|
||||||
|
this.confPrefix = confPrefix;
|
||||||
|
this.conf = conf;
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized SpanReceiverInfo[] listSpanReceivers()
|
||||||
|
throws IOException {
|
||||||
|
TracerPool pool = TracerPool.getGlobalTracerPool();
|
||||||
|
SpanReceiver[] receivers = pool.getReceivers();
|
||||||
|
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.length];
|
||||||
|
for (int i = 0; i < receivers.length; i++) {
|
||||||
|
SpanReceiver receiver = receivers[i];
|
||||||
|
info[i] = new SpanReceiverInfo(receiver.getId(),
|
||||||
|
receiver.getClass().getName());
|
||||||
|
}
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized long addSpanReceiver(SpanReceiverInfo info)
|
||||||
|
throws IOException {
|
||||||
|
StringBuilder configStringBuilder = new StringBuilder();
|
||||||
|
String prefix = "";
|
||||||
|
for (ConfigurationPair pair : info.configPairs) {
|
||||||
|
configStringBuilder.append(prefix).append(pair.getKey()).
|
||||||
|
append(" = ").append(pair.getValue());
|
||||||
|
prefix = ", ";
|
||||||
|
}
|
||||||
|
SpanReceiver rcvr = null;
|
||||||
|
try {
|
||||||
|
rcvr = new SpanReceiver.Builder(TraceUtils.wrapHadoopConf(
|
||||||
|
confPrefix, conf, info.configPairs)).
|
||||||
|
className(info.getClassName().trim()).
|
||||||
|
build();
|
||||||
|
} catch (RuntimeException e) {
|
||||||
|
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
|
||||||
|
" with configuration " + configStringBuilder.toString(), e);
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
TracerPool.getGlobalTracerPool().addReceiver(rcvr);
|
||||||
|
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
|
||||||
|
" with configuration " + configStringBuilder.toString());
|
||||||
|
return rcvr.getId();
|
||||||
|
}
|
||||||
|
|
||||||
|
public synchronized void removeSpanReceiver(long spanReceiverId)
|
||||||
|
throws IOException {
|
||||||
|
SpanReceiver[] receivers =
|
||||||
|
TracerPool.getGlobalTracerPool().getReceivers();
|
||||||
|
for (SpanReceiver receiver : receivers) {
|
||||||
|
if (receiver.getId() == spanReceiverId) {
|
||||||
|
TracerPool.getGlobalTracerPool().removeAndCloseReceiver(receiver);
|
||||||
|
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
|
||||||
|
" with class " + receiver.getClass().getName());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw new IOException("There is no span receiver with id " + spanReceiverId);
|
||||||
|
}
|
||||||
|
}
|
|
@ -27,8 +27,8 @@ import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformation
|
||||||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
|
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
import com.google.protobuf.ByteString;
|
import com.google.protobuf.ByteString;
|
||||||
|
|
||||||
|
@ -169,11 +169,12 @@ public abstract class ProtoUtil {
|
||||||
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
|
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
|
||||||
|
|
||||||
// Add tracing info if we are currently tracing.
|
// Add tracing info if we are currently tracing.
|
||||||
if (Trace.isTracing()) {
|
Span span = Tracer.getCurrentSpan();
|
||||||
Span s = Trace.currentSpan();
|
if (span != null) {
|
||||||
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
|
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
|
||||||
.setParentId(s.getSpanId())
|
.setTraceId(span.getSpanId().getHigh())
|
||||||
.setTraceId(s.getTraceId()).build());
|
.setParentId(span.getSpanId().getLow())
|
||||||
|
.build());
|
||||||
}
|
}
|
||||||
|
|
||||||
return result.build();
|
return result.build();
|
||||||
|
|
|
@ -61,8 +61,9 @@ enum RpcKindProto {
|
||||||
* what span caused the new span we will create when this message is received.
|
* what span caused the new span we will create when this message is received.
|
||||||
*/
|
*/
|
||||||
message RPCTraceInfoProto {
|
message RPCTraceInfoProto {
|
||||||
optional int64 traceId = 1;
|
optional int64 traceId = 1; // parentIdHigh
|
||||||
optional int64 parentId = 2;
|
optional int64 parentId = 2; // parentIdLow
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
message RpcRequestHeaderProto { // the header for the RpcRequest
|
message RpcRequestHeaderProto { // the header for the RpcRequest
|
||||||
|
|
|
@ -88,6 +88,31 @@
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.security.dns.interface</name>
|
||||||
|
<description>
|
||||||
|
The name of the Network Interface from which the service should determine
|
||||||
|
its host name for Kerberos login. e.g. eth2. In a multi-homed environment,
|
||||||
|
the setting can be used to affect the _HOST subsitution in the service
|
||||||
|
Kerberos principal. If this configuration value is not set, the service
|
||||||
|
will use its default hostname as returned by
|
||||||
|
InetAddress.getLocalHost().getCanonicalHostName().
|
||||||
|
|
||||||
|
Most clusters will not require this setting.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.security.dns.nameserver</name>
|
||||||
|
<description>
|
||||||
|
The host name or IP address of the name server (DNS) which a service Node
|
||||||
|
should use to determine its own host name for Kerberos Login. Requires
|
||||||
|
hadoop.security.dns.interface.
|
||||||
|
|
||||||
|
Most clusters will not require this setting.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
=== Multiple group mapping providers configuration sample ===
|
=== Multiple group mapping providers configuration sample ===
|
||||||
This sample illustrates a typical use case for CompositeGroupsMapping where
|
This sample illustrates a typical use case for CompositeGroupsMapping where
|
||||||
|
@ -1973,4 +1998,19 @@ for ldap providers in the same way as above does.
|
||||||
the limit is 0 or the -safely is not specified in -rm command.
|
the limit is 0 or the -safely is not specified in -rm command.
|
||||||
</description>
|
</description>
|
||||||
</property>
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>fs.client.htrace.sampler.classes</name>
|
||||||
|
<value></value>
|
||||||
|
<description>The class names of the HTrace Samplers to use for Hadoop
|
||||||
|
filesystem clients.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
<property>
|
||||||
|
<name>hadoop.htrace.span.receiver.classes</name>
|
||||||
|
<value></value>
|
||||||
|
<description>The class names of the Span Receivers to use for Hadoop.
|
||||||
|
</description>
|
||||||
|
</property>
|
||||||
</configuration>
|
</configuration>
|
||||||
|
|
|
@ -192,7 +192,6 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
|
||||||
| `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
|
| `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
|
||||||
| `PutImageAvgTime` | Average fsimage upload time in milliseconds |
|
| `PutImageAvgTime` | Average fsimage upload time in milliseconds |
|
||||||
| `TotalFileOps`| Total number of file operations performed |
|
| `TotalFileOps`| Total number of file operations performed |
|
||||||
| `NNStarted`| NameNode start time |
|
|
||||||
| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
|
| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
|
||||||
|
|
||||||
FSNamesystem
|
FSNamesystem
|
||||||
|
|
|
@ -49,37 +49,14 @@ interface bundled with HTrace or implementing it by yourself.
|
||||||
* HTracedRESTReceiver
|
* HTracedRESTReceiver
|
||||||
* ZipkinSpanReceiver
|
* ZipkinSpanReceiver
|
||||||
|
|
||||||
In order to set up SpanReceivers for HDFS servers,
|
See core-default.xml for a description of HTrace configuration keys. In some
|
||||||
configure what SpanReceivers you'd like to use
|
cases, you will also need to add the jar containing the SpanReceiver that you
|
||||||
by putting a comma separated list of the fully-qualified class name of classes implementing SpanReceiver
|
are using to the classpath of Hadoop on each node. (In the example above,
|
||||||
in `hdfs-site.xml` property: `dfs.htrace.spanreceiver.classes`.
|
LocalFileSpanReceiver is included in the htrace-core4 jar which is bundled
|
||||||
|
with Hadoop.)
|
||||||
```xml
|
|
||||||
<property>
|
|
||||||
<name>dfs.htrace.spanreceiver.classes</name>
|
|
||||||
<value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>dfs.htrace.local-file-span-receiver.path</name>
|
|
||||||
<value>/var/log/hadoop/htrace.out</value>
|
|
||||||
</property>
|
|
||||||
```
|
|
||||||
|
|
||||||
You can omit package name prefix if you use span receiver bundled with HTrace.
|
|
||||||
|
|
||||||
```xml
|
|
||||||
<property>
|
|
||||||
<name>dfs.htrace.spanreceiver.classes</name>
|
|
||||||
<value>LocalFileSpanReceiver</value>
|
|
||||||
</property>
|
|
||||||
```
|
|
||||||
|
|
||||||
You also need to add the jar bundling SpanReceiver to the classpath of Hadoop
|
|
||||||
on each node. (LocalFileSpanReceiver in the example above is included in the
|
|
||||||
jar of htrace-core which is bundled with Hadoop.)
|
|
||||||
|
|
||||||
```
|
```
|
||||||
$ cp htrace-htraced/target/htrace-htraced-3.2.0-incubating.jar $HADOOP_HOME/share/hadoop/common/lib/
|
$ cp htrace-htraced/target/htrace-htraced-4.0.1-incubating.jar $HADOOP_HOME/share/hadoop/common/lib/
|
||||||
```
|
```
|
||||||
|
|
||||||
### Dynamic update of tracing configuration
|
### Dynamic update of tracing configuration
|
||||||
|
@ -92,11 +69,11 @@ You need to run the command against all servers if you want to update the config
|
||||||
|
|
||||||
$ hadoop trace -list -host 192.168.56.2:9000
|
$ hadoop trace -list -host 192.168.56.2:9000
|
||||||
ID CLASS
|
ID CLASS
|
||||||
1 org.apache.htrace.impl.LocalFileSpanReceiver
|
1 org.apache.htrace.core.LocalFileSpanReceiver
|
||||||
|
|
||||||
$ hadoop trace -list -host 192.168.56.2:50020
|
$ hadoop trace -list -host 192.168.56.2:50020
|
||||||
ID CLASS
|
ID CLASS
|
||||||
1 org.apache.htrace.impl.LocalFileSpanReceiver
|
1 org.apache.htrace.core.LocalFileSpanReceiver
|
||||||
|
|
||||||
`hadoop trace -remove` removes span receiver from server.
|
`hadoop trace -remove` removes span receiver from server.
|
||||||
`-remove` options takes id of span receiver as argument.
|
`-remove` options takes id of span receiver as argument.
|
||||||
|
@ -113,7 +90,7 @@ You can specify the configuration associated with span receiver by `-Ckey=value`
|
||||||
|
|
||||||
$ hadoop trace -list -host 192.168.56.2:9000
|
$ hadoop trace -list -host 192.168.56.2:9000
|
||||||
ID CLASS
|
ID CLASS
|
||||||
2 org.apache.htrace.impl.LocalFileSpanReceiver
|
2 org.apache.htrace.core.LocalFileSpanReceiver
|
||||||
|
|
||||||
### Starting tracing spans by HTrace API
|
### Starting tracing spans by HTrace API
|
||||||
|
|
||||||
|
@ -121,26 +98,21 @@ In order to trace, you will need to wrap the traced logic with **tracing span**
|
||||||
When there is running tracing spans,
|
When there is running tracing spans,
|
||||||
the tracing information is propagated to servers along with RPC requests.
|
the tracing information is propagated to servers along with RPC requests.
|
||||||
|
|
||||||
In addition, you need to initialize `SpanReceiverHost` once per process.
|
|
||||||
|
|
||||||
```java
|
```java
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
SpanReceiverHost.getInstance(new HdfsConfiguration());
|
|
||||||
|
|
||||||
...
|
...
|
||||||
|
|
||||||
TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS);
|
TraceScope ts = tracer.newScope("Gets");
|
||||||
try {
|
try {
|
||||||
... // traced logic
|
... // traced logic
|
||||||
} finally {
|
} finally {
|
||||||
if (ts != null) ts.close();
|
ts.close();
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -154,11 +126,10 @@ which start tracing span before invoking HDFS shell command.
|
||||||
import org.apache.hadoop.fs.FsShell;
|
import org.apache.hadoop.fs.FsShell;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
import org.apache.hadoop.tracing.TraceUtils;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.Trace;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
public class TracingFsShell {
|
public class TracingFsShell {
|
||||||
public static void main(String argv[]) throws Exception {
|
public static void main(String argv[]) throws Exception {
|
||||||
|
@ -166,13 +137,19 @@ which start tracing span before invoking HDFS shell command.
|
||||||
FsShell shell = new FsShell();
|
FsShell shell = new FsShell();
|
||||||
conf.setQuietMode(false);
|
conf.setQuietMode(false);
|
||||||
shell.setConf(conf);
|
shell.setConf(conf);
|
||||||
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
|
Tracer tracer = new Tracer.Builder().
|
||||||
|
name("TracingFsShell).
|
||||||
|
conf(TraceUtils.wrapHadoopConf("tracing.fs.shell.htrace.", conf)).
|
||||||
|
build();
|
||||||
int res = 0;
|
int res = 0;
|
||||||
try (TraceScope ts = Trace.startSpan("FsShell", Sampler.ALWAYS)) {
|
TraceScope scope = tracer.newScope("FsShell");
|
||||||
|
try {
|
||||||
res = ToolRunner.run(shell, argv);
|
res = ToolRunner.run(shell, argv);
|
||||||
} finally {
|
} finally {
|
||||||
|
scope.close();
|
||||||
shell.close();
|
shell.close();
|
||||||
}
|
}
|
||||||
|
tracer.close();
|
||||||
System.exit(res);
|
System.exit(res);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -189,16 +166,15 @@ The DFSClient can enable tracing internally. This allows you to use HTrace with
|
||||||
your client without modifying the client source code.
|
your client without modifying the client source code.
|
||||||
|
|
||||||
Configure the span receivers and samplers in `hdfs-site.xml`
|
Configure the span receivers and samplers in `hdfs-site.xml`
|
||||||
by properties `dfs.client.htrace.sampler` and `dfs.client.htrace.sampler`.
|
by properties `fs.client.htrace.sampler.classes` and
|
||||||
The value of `dfs.client.htrace.sampler` can be NeverSampler, AlwaysSampler or ProbabilitySampler.
|
`fs.client.htrace.spanreceiver.classes`. The value of
|
||||||
|
`fs.client.htrace.sampler.classes` can be NeverSampler, AlwaysSampler or
|
||||||
|
ProbabilitySampler.
|
||||||
|
|
||||||
* NeverSampler: HTrace is OFF for all requests to namenodes and datanodes;
|
* NeverSampler: HTrace is OFF for all requests to namenodes and datanodes;
|
||||||
* AlwaysSampler: HTrace is ON for all requests to namenodes and datanodes;
|
* AlwaysSampler: HTrace is ON for all requests to namenodes and datanodes;
|
||||||
* ProbabilitySampler: HTrace is ON for some percentage% of requests to namenodes and datanodes
|
* ProbabilitySampler: HTrace is ON for some percentage% of requests to namenodes and datanodes
|
||||||
|
|
||||||
You do not need to enable this if your client program has been modified
|
|
||||||
to use HTrace.
|
|
||||||
|
|
||||||
```xml
|
```xml
|
||||||
<property>
|
<property>
|
||||||
<name>dfs.client.htrace.spanreceiver.classes</name>
|
<name>dfs.client.htrace.spanreceiver.classes</name>
|
||||||
|
|
|
@ -1493,7 +1493,7 @@ public class TestConfiguration extends TestCase {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
for (int i = 0; i < 100000; i++) {
|
for (int i = 0; i < 10000; i++) {
|
||||||
config.set("some.config.value-" + prefix + i, "value");
|
config.set("some.config.value-" + prefix + i, "value");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,13 +17,16 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.fs;
|
package org.apache.hadoop.fs;
|
||||||
|
|
||||||
|
import java.io.ByteArrayOutputStream;
|
||||||
|
import java.io.PrintStream;
|
||||||
|
|
||||||
import junit.framework.AssertionFailedError;
|
import junit.framework.AssertionFailedError;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.tracing.SetSpanReceiver;
|
import org.apache.hadoop.tracing.SetSpanReceiver;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.SamplerBuilder;
|
import org.apache.htrace.core.AlwaysSampler;
|
||||||
import org.apache.htrace.impl.AlwaysSampler;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -49,10 +52,10 @@ public class TestFsShell {
|
||||||
@Test
|
@Test
|
||||||
public void testTracing() throws Throwable {
|
public void testTracing() throws Throwable {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
String prefix = FsShell.SEHLL_HTRACE_PREFIX;
|
String prefix = "fs.shell.htrace.";
|
||||||
conf.set(prefix + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
|
conf.set(prefix + Tracer.SPAN_RECEIVER_CLASSES_KEY,
|
||||||
SetSpanReceiver.class.getName());
|
SetSpanReceiver.class.getName());
|
||||||
conf.set(prefix + SamplerBuilder.SAMPLER_CONF_KEY,
|
conf.set(prefix + Tracer.SAMPLER_CLASSES_KEY,
|
||||||
AlwaysSampler.class.getName());
|
AlwaysSampler.class.getName());
|
||||||
conf.setQuietMode(false);
|
conf.setQuietMode(false);
|
||||||
FsShell shell = new FsShell(conf);
|
FsShell shell = new FsShell(conf);
|
||||||
|
@ -67,4 +70,33 @@ public class TestFsShell {
|
||||||
SetSpanReceiver.getMap()
|
SetSpanReceiver.getMap()
|
||||||
.get("help").get(0).getKVAnnotations().get("args"));
|
.get("help").get(0).getKVAnnotations().get("args"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testDFSWithInvalidCommmand() throws Throwable {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
FsShell shell = new FsShell(conf);
|
||||||
|
String[] args = new String[1];
|
||||||
|
args[0] = "dfs -mkdirs";
|
||||||
|
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
|
||||||
|
final PrintStream out = new PrintStream(bytes);
|
||||||
|
final PrintStream oldErr = System.err;
|
||||||
|
try {
|
||||||
|
System.setErr(out);
|
||||||
|
ToolRunner.run(shell, args);
|
||||||
|
String errorValue=new String(bytes.toString());
|
||||||
|
Assert
|
||||||
|
.assertTrue(
|
||||||
|
"FSShell dfs command did not print the error " +
|
||||||
|
"message when invalid command is passed",
|
||||||
|
errorValue.contains("-mkdirs: Unknown command"));
|
||||||
|
Assert
|
||||||
|
.assertTrue(
|
||||||
|
"FSShell dfs command did not print help " +
|
||||||
|
"message when invalid command is passed",
|
||||||
|
errorValue.contains("Usage: hadoop fs [generic options]"));
|
||||||
|
} finally {
|
||||||
|
IOUtils.closeStream(out);
|
||||||
|
System.setErr(oldErr);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,6 @@ public class TestHarFileSystem {
|
||||||
* {@link HarFileSystem}. Either because there is a default implementation
|
* {@link HarFileSystem}. Either because there is a default implementation
|
||||||
* already available or because it is not relevant.
|
* already available or because it is not relevant.
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
private interface MustNotImplement {
|
private interface MustNotImplement {
|
||||||
public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
|
public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
|
||||||
public long getLength(Path f);
|
public long getLength(Path f);
|
||||||
|
|
|
@ -26,6 +26,7 @@ import java.util.Iterator;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
|
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
@ -312,7 +313,30 @@ public class TestLocalDirAllocator {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
assertEquals(CONTEXT + " not configured", e.getMessage());
|
assertEquals(CONTEXT + " not configured", e.getMessage());
|
||||||
} catch (NullPointerException e) {
|
} catch (NullPointerException e) {
|
||||||
fail("Lack of configuration should not have thrown an NPE.");
|
fail("Lack of configuration should not have thrown a NPE.");
|
||||||
|
}
|
||||||
|
|
||||||
|
String NEW_CONTEXT = CONTEXT + ".new";
|
||||||
|
conf1.set(NEW_CONTEXT, "");
|
||||||
|
LocalDirAllocator newDirAllocator = new LocalDirAllocator(NEW_CONTEXT);
|
||||||
|
try {
|
||||||
|
newDirAllocator.getLocalPathForWrite("/test", conf1);
|
||||||
|
fail("Exception not thrown when " + NEW_CONTEXT +
|
||||||
|
" is set to empty string");
|
||||||
|
} catch (IOException e) {
|
||||||
|
assertTrue(e instanceof DiskErrorException);
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
fail("Wrong configuration should not have thrown a NPE.");
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
newDirAllocator.getLocalPathToRead("/test", conf1);
|
||||||
|
fail("Exception not thrown when " + NEW_CONTEXT +
|
||||||
|
" is set to empty string");
|
||||||
|
} catch (IOException e) {
|
||||||
|
assertTrue(e instanceof DiskErrorException);
|
||||||
|
} catch (NullPointerException e) {
|
||||||
|
fail("Wrong configuration should not have thrown a NPE.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,7 @@ public class TestLocalFileSystem {
|
||||||
FileUtil.setWritable(base, true);
|
FileUtil.setWritable(base, true);
|
||||||
FileUtil.fullyDelete(base);
|
FileUtil.fullyDelete(base);
|
||||||
assertTrue(!base.exists());
|
assertTrue(!base.exists());
|
||||||
|
RawLocalFileSystem.useStatIfAvailable();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
|
||||||
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
|
||||||
|
@ -120,4 +121,17 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
|
||||||
assertIsDirectory(root);
|
assertIsDirectory(root);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testListEmptyRootDirectory() throws IOException {
|
||||||
|
//extra sanity checks here to avoid support calls about complete loss of data
|
||||||
|
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
|
||||||
|
FileSystem fs = getFileSystem();
|
||||||
|
Path root = new Path("/");
|
||||||
|
FileStatus[] statuses = fs.listStatus(root);
|
||||||
|
for (FileStatus status : statuses) {
|
||||||
|
ContractTestUtils.assertDeleted(fs, status.getPath(), true);
|
||||||
|
}
|
||||||
|
assertEquals("listStatus on empty root-directory returned a non-empty list",
|
||||||
|
0, fs.listStatus(root).length);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,10 +23,7 @@ import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStreamReader;
|
import java.io.InputStreamReader;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.io.RandomAccessFile;
|
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
import java.nio.channels.FileLock;
|
|
||||||
import java.nio.channels.OverlappingFileLockException;
|
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -34,8 +31,8 @@ import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.zookeeper.PortAssignment;
|
|
||||||
import org.apache.zookeeper.TestableZooKeeper;
|
import org.apache.zookeeper.TestableZooKeeper;
|
||||||
import org.apache.zookeeper.WatchedEvent;
|
import org.apache.zookeeper.WatchedEvent;
|
||||||
import org.apache.zookeeper.Watcher;
|
import org.apache.zookeeper.Watcher;
|
||||||
|
@ -167,10 +164,6 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
|
||||||
private LinkedList<ZooKeeper> allClients;
|
private LinkedList<ZooKeeper> allClients;
|
||||||
private boolean allClientsSetup = false;
|
private boolean allClientsSetup = false;
|
||||||
|
|
||||||
private RandomAccessFile portNumLockFile;
|
|
||||||
|
|
||||||
private File portNumFile;
|
|
||||||
|
|
||||||
protected TestableZooKeeper createClient(CountdownWatcher watcher, String hp)
|
protected TestableZooKeeper createClient(CountdownWatcher watcher, String hp)
|
||||||
throws IOException, InterruptedException
|
throws IOException, InterruptedException
|
||||||
{
|
{
|
||||||
|
@ -413,30 +406,12 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
|
||||||
|
|
||||||
private String initHostPort() {
|
private String initHostPort() {
|
||||||
BASETEST.mkdirs();
|
BASETEST.mkdirs();
|
||||||
int port;
|
int port = 0;
|
||||||
for (;;) {
|
|
||||||
port = PortAssignment.unique();
|
|
||||||
FileLock lock = null;
|
|
||||||
portNumLockFile = null;
|
|
||||||
try {
|
try {
|
||||||
try {
|
port = ServerSocketUtil.getPort(port, 100);
|
||||||
portNumFile = new File(BASETEST, port + ".lock");
|
|
||||||
portNumLockFile = new RandomAccessFile(portNumFile, "rw");
|
|
||||||
try {
|
|
||||||
lock = portNumLockFile.getChannel().tryLock();
|
|
||||||
} catch (OverlappingFileLockException e) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
if (lock != null)
|
|
||||||
break;
|
|
||||||
if (portNumLockFile != null)
|
|
||||||
portNumLockFile.close();
|
|
||||||
}
|
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new RuntimeException(e);
|
throw new RuntimeException(e);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return "127.0.0.1:" + port;
|
return "127.0.0.1:" + port;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -480,9 +455,6 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
|
||||||
|
|
||||||
stopServer();
|
stopServer();
|
||||||
|
|
||||||
portNumLockFile.close();
|
|
||||||
portNumFile.delete();
|
|
||||||
|
|
||||||
if (tmpDir != null) {
|
if (tmpDir != null) {
|
||||||
Assert.assertTrue("delete " + tmpDir.toString(), recursiveDelete(tmpDir));
|
Assert.assertTrue("delete " + tmpDir.toString(), recursiveDelete(tmpDir));
|
||||||
}
|
}
|
||||||
|
|
|
@ -113,10 +113,7 @@ public class TestAuthenticationSessionCookie {
|
||||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||||
|
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
Configuration sslConf = new Configuration(false);
|
Configuration sslConf = KeyStoreTestUtil.getSslConfig();
|
||||||
sslConf.addResource("ssl-server.xml");
|
|
||||||
sslConf.addResource("ssl-client.xml");
|
|
||||||
|
|
||||||
|
|
||||||
server = new HttpServer2.Builder()
|
server = new HttpServer2.Builder()
|
||||||
.setName("test")
|
.setName("test")
|
||||||
|
|
|
@ -17,7 +17,6 @@ import org.junit.Assert;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
|
|
||||||
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
|
||||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
import org.apache.hadoop.security.ssl.SSLFactory;
|
import org.apache.hadoop.security.ssl.SSLFactory;
|
||||||
|
@ -27,12 +26,10 @@ import org.junit.Test;
|
||||||
|
|
||||||
import javax.net.ssl.HttpsURLConnection;
|
import javax.net.ssl.HttpsURLConnection;
|
||||||
import javax.servlet.*;
|
import javax.servlet.*;
|
||||||
import javax.servlet.http.Cookie;
|
|
||||||
import javax.servlet.http.HttpServletResponse;
|
import javax.servlet.http.HttpServletResponse;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.HttpURLConnection;
|
import java.net.HttpURLConnection;
|
||||||
import java.net.MalformedURLException;
|
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
import java.security.GeneralSecurityException;
|
import java.security.GeneralSecurityException;
|
||||||
|
@ -89,9 +86,7 @@ public class TestHttpCookieFlag {
|
||||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||||
|
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
Configuration sslConf = new Configuration(false);
|
Configuration sslConf = KeyStoreTestUtil.getSslConfig();
|
||||||
sslConf.addResource("ssl-server.xml");
|
|
||||||
sslConf.addResource("ssl-client.xml");
|
|
||||||
|
|
||||||
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
|
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
|
||||||
clientSslFactory.init();
|
clientSslFactory.init();
|
||||||
|
|
|
@ -65,9 +65,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
|
||||||
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
|
||||||
|
|
||||||
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
|
||||||
Configuration sslConf = new Configuration(false);
|
Configuration sslConf = KeyStoreTestUtil.getSslConfig();
|
||||||
sslConf.addResource("ssl-server.xml");
|
|
||||||
sslConf.addResource("ssl-client.xml");
|
|
||||||
|
|
||||||
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
|
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
|
||||||
clientSslFactory.init();
|
clientSslFactory.init();
|
||||||
|
|
|
@ -1060,8 +1060,8 @@ public class TestRPC {
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
while (server.getCallQueueLen() != 1
|
while (server.getCallQueueLen() != 1
|
||||||
&& countThreads(CallQueueManager.class.getName()) != 1
|
|| countThreads(CallQueueManager.class.getName()) != 1
|
||||||
&& countThreads(TestProtocol.class.getName()) != 1) {
|
|| countThreads(TestImpl.class.getName()) != 1) {
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
public class ServerSocketUtil {
|
public class ServerSocketUtil {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(ServerSocketUtil.class);
|
private static final Log LOG = LogFactory.getLog(ServerSocketUtil.class);
|
||||||
|
private static Random rand = new Random();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Port scan & allocate is how most other apps find ports
|
* Port scan & allocate is how most other apps find ports
|
||||||
|
@ -38,13 +39,15 @@ public class ServerSocketUtil {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public static int getPort(int port, int retries) throws IOException {
|
public static int getPort(int port, int retries) throws IOException {
|
||||||
Random rand = new Random();
|
|
||||||
int tryPort = port;
|
int tryPort = port;
|
||||||
int tries = 0;
|
int tries = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
if (tries > 0) {
|
if (tries > 0 || tryPort == 0) {
|
||||||
tryPort = port + rand.nextInt(65535 - port);
|
tryPort = port + rand.nextInt(65535 - port);
|
||||||
}
|
}
|
||||||
|
if (tryPort == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
LOG.info("Using port " + tryPort);
|
LOG.info("Using port " + tryPort);
|
||||||
try (ServerSocket s = new ServerSocket(tryPort)) {
|
try (ServerSocket s = new ServerSocket(tryPort)) {
|
||||||
return tryPort;
|
return tryPort;
|
||||||
|
|
|
@ -18,6 +18,10 @@
|
||||||
|
|
||||||
package org.apache.hadoop.net;
|
package org.apache.hadoop.net;
|
||||||
|
|
||||||
|
import java.lang.reflect.Field;
|
||||||
|
import java.lang.reflect.Modifier;
|
||||||
|
import java.net.NetworkInterface;
|
||||||
|
import java.net.SocketException;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
|
||||||
|
@ -28,6 +32,9 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import static org.hamcrest.CoreMatchers.not;
|
||||||
|
import static org.hamcrest.core.Is.is;
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -38,6 +45,11 @@ public class TestDNS {
|
||||||
private static final Log LOG = LogFactory.getLog(TestDNS.class);
|
private static final Log LOG = LogFactory.getLog(TestDNS.class);
|
||||||
private static final String DEFAULT = "default";
|
private static final String DEFAULT = "default";
|
||||||
|
|
||||||
|
// This is not a legal hostname (starts with a hyphen). It will never
|
||||||
|
// be returned on any test machine.
|
||||||
|
private static final String DUMMY_HOSTNAME = "-DUMMY_HOSTNAME";
|
||||||
|
private static final String INVALID_DNS_SERVER = "0.0.0.0";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that asking for the default hostname works
|
* Test that asking for the default hostname works
|
||||||
* @throws Exception if hostname lookups fail
|
* @throws Exception if hostname lookups fail
|
||||||
|
@ -89,12 +101,8 @@ public class TestDNS {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testNullInterface() throws Exception {
|
public void testNullInterface() throws Exception {
|
||||||
try {
|
String host = DNS.getDefaultHost(null); // should work.
|
||||||
String host = DNS.getDefaultHost(null);
|
assertThat(host, is(DNS.getDefaultHost(DEFAULT)));
|
||||||
fail("Expected a NullPointerException, got " + host);
|
|
||||||
} catch (NullPointerException npe) {
|
|
||||||
// Expected
|
|
||||||
}
|
|
||||||
try {
|
try {
|
||||||
String ip = DNS.getDefaultIP(null);
|
String ip = DNS.getDefaultIP(null);
|
||||||
fail("Expected a NullPointerException, got " + ip);
|
fail("Expected a NullPointerException, got " + ip);
|
||||||
|
@ -103,6 +111,26 @@ public class TestDNS {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that 'null' DNS server gives the same result as if no DNS
|
||||||
|
* server was passed.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testNullDnsServer() throws Exception {
|
||||||
|
String host = DNS.getDefaultHost(getLoopbackInterface(), null);
|
||||||
|
assertThat(host, is(DNS.getDefaultHost(getLoopbackInterface())));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that "default" DNS server gives the same result as if no DNS
|
||||||
|
* server was passed.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testDefaultDnsServer() throws Exception {
|
||||||
|
String host = DNS.getDefaultHost(getLoopbackInterface(), DEFAULT);
|
||||||
|
assertThat(host, is(DNS.getDefaultHost(getLoopbackInterface())));
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the IP addresses of an unknown interface
|
* Get the IP addresses of an unknown interface
|
||||||
*/
|
*/
|
||||||
|
@ -147,10 +175,80 @@ public class TestDNS {
|
||||||
+ " Loopback=" + localhost.isLoopbackAddress()
|
+ " Loopback=" + localhost.isLoopbackAddress()
|
||||||
+ " Linklocal=" + localhost.isLinkLocalAddress());
|
+ " Linklocal=" + localhost.isLinkLocalAddress());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that when using an invalid DNS server with hosts file fallback,
|
||||||
|
* we are able to get the hostname from the hosts file.
|
||||||
|
*
|
||||||
|
* This test may fail on some misconfigured test machines that don't have
|
||||||
|
* an entry for "localhost" in their hosts file. This entry is correctly
|
||||||
|
* configured out of the box on common Linux distributions, OS X and
|
||||||
|
* Windows.
|
||||||
|
*
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test (timeout=60000)
|
||||||
|
public void testLookupWithHostsFallback() throws Exception {
|
||||||
|
final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
|
||||||
|
|
||||||
|
try {
|
||||||
|
String hostname = DNS.getDefaultHost(
|
||||||
|
getLoopbackInterface(), INVALID_DNS_SERVER, true);
|
||||||
|
|
||||||
|
// Expect to get back something other than the cached host name.
|
||||||
|
assertThat(hostname, not(DUMMY_HOSTNAME));
|
||||||
|
} finally {
|
||||||
|
// Restore DNS#cachedHostname for subsequent tests.
|
||||||
|
changeDnsCachedHostname(oldHostname);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that when using an invalid DNS server without hosts file
|
||||||
|
* fallback, we get back the cached host name.
|
||||||
|
*
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test(timeout=60000)
|
||||||
|
public void testLookupWithoutHostsFallback() throws Exception {
|
||||||
|
final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
|
||||||
|
|
||||||
|
try {
|
||||||
|
String hostname = DNS.getDefaultHost(
|
||||||
|
getLoopbackInterface(), INVALID_DNS_SERVER, false);
|
||||||
|
|
||||||
|
// Expect to get back the cached host name since there was no hosts
|
||||||
|
// file lookup.
|
||||||
|
assertThat(hostname, is(DUMMY_HOSTNAME));
|
||||||
|
} finally {
|
||||||
|
// Restore DNS#cachedHostname for subsequent tests.
|
||||||
|
changeDnsCachedHostname(oldHostname);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getLoopbackInterface() throws SocketException {
|
||||||
|
return NetworkInterface.getByInetAddress(
|
||||||
|
InetAddress.getLoopbackAddress()).getDisplayName();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Change DNS#cachedHostName to something which cannot be a real
|
||||||
|
* host name. Uses reflection since it is a 'private final' field.
|
||||||
|
*/
|
||||||
|
private String changeDnsCachedHostname(final String newHostname)
|
||||||
|
throws Exception {
|
||||||
|
final String oldCachedHostname = DNS.getDefaultHost(DEFAULT);
|
||||||
|
Field field = DNS.class.getDeclaredField("cachedHostname");
|
||||||
|
field.setAccessible(true);
|
||||||
|
Field modifiersField = Field.class.getDeclaredField("modifiers");
|
||||||
|
modifiersField.setAccessible(true);
|
||||||
|
modifiersField.set(field, field.getModifiers() & ~Modifier.FINAL);
|
||||||
|
field.set(null, newHostname);
|
||||||
|
return oldCachedHostname;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test that the name "localhost" resolves to something.
|
* Test that the name "localhost" resolves to something.
|
||||||
*
|
*
|
||||||
|
|
|
@ -111,7 +111,7 @@ public class TestSecurityUtil {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testLocalHostNameForNullOrWild() throws Exception {
|
public void testLocalHostNameForNullOrWild() throws Exception {
|
||||||
String local = StringUtils.toLowerCase(SecurityUtil.getLocalHostName());
|
String local = StringUtils.toLowerCase(SecurityUtil.getLocalHostName(null));
|
||||||
assertEquals("hdfs/" + local + "@REALM",
|
assertEquals("hdfs/" + local + "@REALM",
|
||||||
SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM", (String)null));
|
SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM", (String)null));
|
||||||
assertEquals("hdfs/" + local + "@REALM",
|
assertEquals("hdfs/" + local + "@REALM",
|
||||||
|
|
|
@ -37,7 +37,6 @@ import java.security.KeyPair;
|
||||||
import java.security.KeyPairGenerator;
|
import java.security.KeyPairGenerator;
|
||||||
import java.security.KeyStore;
|
import java.security.KeyStore;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
import java.security.PrivateKey;
|
|
||||||
import java.security.SecureRandom;
|
import java.security.SecureRandom;
|
||||||
import java.security.cert.Certificate;
|
import java.security.cert.Certificate;
|
||||||
import java.security.cert.X509Certificate;
|
import java.security.cert.X509Certificate;
|
||||||
|
@ -49,8 +48,6 @@ import java.security.InvalidKeyException;
|
||||||
import java.security.NoSuchProviderException;
|
import java.security.NoSuchProviderException;
|
||||||
import java.security.SignatureException;
|
import java.security.SignatureException;
|
||||||
import java.security.cert.CertificateEncodingException;
|
import java.security.cert.CertificateEncodingException;
|
||||||
import java.security.cert.CertificateException;
|
|
||||||
import java.security.cert.CertificateFactory;
|
|
||||||
import javax.security.auth.x500.X500Principal;
|
import javax.security.auth.x500.X500Principal;
|
||||||
import org.bouncycastle.x509.X509V1CertificateGenerator;
|
import org.bouncycastle.x509.X509V1CertificateGenerator;
|
||||||
|
|
||||||
|
@ -233,8 +230,8 @@ public class KeyStoreTestUtil {
|
||||||
String trustKS = null;
|
String trustKS = null;
|
||||||
String trustPassword = "trustP";
|
String trustPassword = "trustP";
|
||||||
|
|
||||||
File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml");
|
File sslClientConfFile = new File(sslConfDir, getClientSSLConfigFileName());
|
||||||
File sslServerConfFile = new File(sslConfDir + "/ssl-server.xml");
|
File sslServerConfFile = new File(sslConfDir, getServerSSLConfigFileName());
|
||||||
|
|
||||||
Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
|
Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
|
||||||
|
|
||||||
|
@ -311,6 +308,42 @@ public class KeyStoreTestUtil {
|
||||||
return serverSSLConf;
|
return serverSSLConf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the client SSL configuration file name. Under parallel test
|
||||||
|
* execution, this file name is parameterized by a unique ID to ensure that
|
||||||
|
* concurrent tests don't collide on an SSL configuration file.
|
||||||
|
*
|
||||||
|
* @return client SSL configuration file name
|
||||||
|
*/
|
||||||
|
public static String getClientSSLConfigFileName() {
|
||||||
|
return getSSLConfigFileName("ssl-client");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the server SSL configuration file name. Under parallel test
|
||||||
|
* execution, this file name is parameterized by a unique ID to ensure that
|
||||||
|
* concurrent tests don't collide on an SSL configuration file.
|
||||||
|
*
|
||||||
|
* @return client SSL configuration file name
|
||||||
|
*/
|
||||||
|
public static String getServerSSLConfigFileName() {
|
||||||
|
return getSSLConfigFileName("ssl-server");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns an SSL configuration file name. Under parallel test
|
||||||
|
* execution, this file name is parameterized by a unique ID to ensure that
|
||||||
|
* concurrent tests don't collide on an SSL configuration file.
|
||||||
|
*
|
||||||
|
* @param base the base of the file name
|
||||||
|
* @return SSL configuration file name for base
|
||||||
|
*/
|
||||||
|
private static String getSSLConfigFileName(String base) {
|
||||||
|
String testUniqueForkId = System.getProperty("test.unique.fork.id");
|
||||||
|
String fileSuffix = testUniqueForkId != null ? "-" + testUniqueForkId : "";
|
||||||
|
return base + fileSuffix + ".xml";
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates SSL configuration.
|
* Creates SSL configuration.
|
||||||
*
|
*
|
||||||
|
@ -410,4 +443,19 @@ public class KeyStoreTestUtil {
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the SSL configuration
|
||||||
|
* @return {@link Configuration} instance with ssl configs loaded
|
||||||
|
*/
|
||||||
|
public static Configuration getSslConfig(){
|
||||||
|
Configuration sslConf = new Configuration(false);
|
||||||
|
String sslServerConfFile = KeyStoreTestUtil.getServerSSLConfigFileName();
|
||||||
|
String sslClientConfFile = KeyStoreTestUtil.getClientSSLConfigFileName();
|
||||||
|
sslConf.addResource(sslServerConfFile);
|
||||||
|
sslConf.addResource(sslClientConfFile);
|
||||||
|
sslConf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile);
|
||||||
|
sslConf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile);
|
||||||
|
return sslConf;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,9 +19,10 @@ package org.apache.hadoop.tracing;
|
||||||
|
|
||||||
import com.google.common.base.Supplier;
|
import com.google.common.base.Supplier;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.SpanReceiver;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.HTraceConfiguration;
|
import org.apache.htrace.core.SpanReceiver;
|
||||||
|
import org.apache.htrace.core.HTraceConfiguration;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
|
@ -39,7 +40,7 @@ import org.junit.Assert;
|
||||||
* push all the metrics to a static place, and would make testing
|
* push all the metrics to a static place, and would make testing
|
||||||
* SpanReceiverHost harder.
|
* SpanReceiverHost harder.
|
||||||
*/
|
*/
|
||||||
public class SetSpanReceiver implements SpanReceiver {
|
public class SetSpanReceiver extends SpanReceiver {
|
||||||
|
|
||||||
public SetSpanReceiver(HTraceConfiguration conf) {
|
public SetSpanReceiver(HTraceConfiguration conf) {
|
||||||
}
|
}
|
||||||
|
@ -68,8 +69,8 @@ public class SetSpanReceiver implements SpanReceiver {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class SetHolder {
|
public static class SetHolder {
|
||||||
public static ConcurrentHashMap<Long, Span> spans =
|
public static ConcurrentHashMap<SpanId, Span> spans =
|
||||||
new ConcurrentHashMap<Long, Span>();
|
new ConcurrentHashMap<SpanId, Span>();
|
||||||
|
|
||||||
public static Map<String, List<Span>> getMap() {
|
public static Map<String, List<Span>> getMap() {
|
||||||
Map<String, List<Span>> map = new HashMap<String, List<Span>>();
|
Map<String, List<Span>> map = new HashMap<String, List<Span>>();
|
||||||
|
|
|
@ -21,7 +21,7 @@ import static org.junit.Assert.assertEquals;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||||
import org.apache.htrace.HTraceConfiguration;
|
import org.apache.htrace.core.HTraceConfiguration;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestTraceUtils {
|
public class TestTraceUtils {
|
||||||
|
|
|
@ -32,4 +32,28 @@
|
||||||
<Method name="allocSlot" />
|
<Method name="allocSlot" />
|
||||||
<Bug pattern="UL_UNRELEASED_LOCK" />
|
<Bug pattern="UL_UNRELEASED_LOCK" />
|
||||||
</Match>
|
</Match>
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.DFSInputStream"/>
|
||||||
|
<Field name="tcpReadsDisabledForTesting"/>
|
||||||
|
<Bug pattern="MS_SHOULD_BE_FINAL"/>
|
||||||
|
</Match>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
ResponseProccessor is thread that is designed to catch RuntimeException.
|
||||||
|
-->
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor" />
|
||||||
|
<Method name="run" />
|
||||||
|
<Bug pattern="REC_CATCH_EXCEPTION" />
|
||||||
|
</Match>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
We use a separate lock to guard cachingStrategy in order to separate
|
||||||
|
locks for p-reads from seek + read invocations.
|
||||||
|
-->
|
||||||
|
<Match>
|
||||||
|
<Class name="org.apache.hadoop.hdfs.DFSInputStream" />
|
||||||
|
<Field name="cachingStrategy" />
|
||||||
|
<Bug pattern="IS2_INCONSISTENT_SYNC" />
|
||||||
|
</Match>
|
||||||
</FindBugsFilter>
|
</FindBugsFilter>
|
||||||
|
|
|
@ -31,8 +31,6 @@ import java.util.List;
|
||||||
import com.google.common.io.ByteArrayDataOutput;
|
import com.google.common.io.ByteArrayDataOutput;
|
||||||
import com.google.common.io.ByteStreams;
|
import com.google.common.io.ByteStreams;
|
||||||
import org.apache.commons.lang.mutable.MutableBoolean;
|
import org.apache.commons.lang.mutable.MutableBoolean;
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
@ -56,7 +54,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.hdfs.util.IOUtilsClient;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
|
@ -68,6 +66,10 @@ import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -75,7 +77,7 @@ import com.google.common.base.Preconditions;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
static final Log LOG = LogFactory.getLog(BlockReaderFactory.class);
|
static final Logger LOG = LoggerFactory.getLogger(BlockReaderFactory.class);
|
||||||
|
|
||||||
public static class FailureInjector {
|
public static class FailureInjector {
|
||||||
public void injectRequestFileDescriptorsFailure() throws IOException {
|
public void injectRequestFileDescriptorsFailure() throws IOException {
|
||||||
|
@ -177,6 +179,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
*/
|
*/
|
||||||
private Configuration configuration;
|
private Configuration configuration;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The HTrace tracer to use.
|
||||||
|
*/
|
||||||
|
private Tracer tracer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Information about the domain socket path we should use to connect to the
|
* Information about the domain socket path we should use to connect to the
|
||||||
* local peer-- or null if we haven't examined the local domain socket.
|
* local peer-- or null if we haven't examined the local domain socket.
|
||||||
|
@ -281,6 +288,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public BlockReaderFactory setTracer(Tracer tracer) {
|
||||||
|
this.tracer = tracer;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static void setFailureInjectorForTesting(FailureInjector injector) {
|
public static void setFailureInjectorForTesting(FailureInjector injector) {
|
||||||
failureInjector = injector;
|
failureInjector = injector;
|
||||||
|
@ -380,15 +392,17 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
Constructor<? extends ReplicaAccessorBuilder> ctor =
|
Constructor<? extends ReplicaAccessorBuilder> ctor =
|
||||||
cls.getConstructor();
|
cls.getConstructor();
|
||||||
ReplicaAccessorBuilder builder = ctor.newInstance();
|
ReplicaAccessorBuilder builder = ctor.newInstance();
|
||||||
|
long visibleLength = startOffset + length;
|
||||||
ReplicaAccessor accessor = builder.
|
ReplicaAccessor accessor = builder.
|
||||||
setAllowShortCircuitReads(allowShortCircuitLocalReads).
|
setAllowShortCircuitReads(allowShortCircuitLocalReads).
|
||||||
setBlock(block.getBlockId(), block.getBlockPoolId()).
|
setBlock(block.getBlockId(), block.getBlockPoolId()).
|
||||||
|
setGenerationStamp(block.getGenerationStamp()).
|
||||||
setBlockAccessToken(tokenBytes).
|
setBlockAccessToken(tokenBytes).
|
||||||
setClientName(clientName).
|
setClientName(clientName).
|
||||||
setConfiguration(configuration).
|
setConfiguration(configuration).
|
||||||
setFileName(fileName).
|
setFileName(fileName).
|
||||||
setVerifyChecksum(verifyChecksum).
|
setVerifyChecksum(verifyChecksum).
|
||||||
setVisibleLength(length).
|
setVisibleLength(visibleLength).
|
||||||
build();
|
build();
|
||||||
if (accessor == null) {
|
if (accessor == null) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
|
@ -396,7 +410,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
cls.getName());
|
cls.getName());
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return new ExternalBlockReader(accessor, length, startOffset);
|
return new ExternalBlockReader(accessor, visibleLength, startOffset);
|
||||||
}
|
}
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
LOG.warn("Failed to construct new object of type " +
|
LOG.warn("Failed to construct new object of type " +
|
||||||
|
@ -433,7 +447,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
try {
|
try {
|
||||||
return BlockReaderLocalLegacy.newBlockReader(conf,
|
return BlockReaderLocalLegacy.newBlockReader(conf,
|
||||||
userGroupInformation, configuration, fileName, block, token,
|
userGroupInformation, configuration, fileName, block, token,
|
||||||
datanode, startOffset, length, storageType);
|
datanode, startOffset, length, storageType, tracer);
|
||||||
} catch (RemoteException remoteException) {
|
} catch (RemoteException remoteException) {
|
||||||
ioe = remoteException.unwrapRemoteException(
|
ioe = remoteException.unwrapRemoteException(
|
||||||
InvalidToken.class, AccessControlException.class);
|
InvalidToken.class, AccessControlException.class);
|
||||||
|
@ -494,6 +508,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
setVerifyChecksum(verifyChecksum).
|
setVerifyChecksum(verifyChecksum).
|
||||||
setCachingStrategy(cachingStrategy).
|
setCachingStrategy(cachingStrategy).
|
||||||
setStorageType(storageType).
|
setStorageType(storageType).
|
||||||
|
setTracer(tracer).
|
||||||
build();
|
build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,14 +565,14 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug(this + ": closing stale domain peer " + peer, e);
|
LOG.debug(this + ": closing stale domain peer " + peer, e);
|
||||||
}
|
}
|
||||||
IOUtils.cleanup(LOG, peer);
|
IOUtilsClient.cleanup(LOG, peer);
|
||||||
} else {
|
} else {
|
||||||
// Handle an I/O error we got when using a newly created socket.
|
// Handle an I/O error we got when using a newly created socket.
|
||||||
// We temporarily disable the domain socket path for a few minutes in
|
// We temporarily disable the domain socket path for a few minutes in
|
||||||
// this case, to prevent wasting more time on it.
|
// this case, to prevent wasting more time on it.
|
||||||
LOG.warn(this + ": I/O error requesting file descriptors. " +
|
LOG.warn(this + ": I/O error requesting file descriptors. " +
|
||||||
"Disabling domain socket " + peer.getDomainSocket(), e);
|
"Disabling domain socket " + peer.getDomainSocket(), e);
|
||||||
IOUtils.cleanup(LOG, peer);
|
IOUtilsClient.cleanup(LOG, peer);
|
||||||
clientContext.getDomainSocketFactory()
|
clientContext.getDomainSocketFactory()
|
||||||
.disableDomainSocketPath(pathInfo.getPath());
|
.disableDomainSocketPath(pathInfo.getPath());
|
||||||
return null;
|
return null;
|
||||||
|
@ -616,7 +631,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
return null;
|
return null;
|
||||||
} finally {
|
} finally {
|
||||||
if (replica == null) {
|
if (replica == null) {
|
||||||
IOUtils.cleanup(DFSClient.LOG, fis[0], fis[1]);
|
IOUtilsClient.cleanup(DFSClient.LOG, fis[0], fis[1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case ERROR_UNSUPPORTED:
|
case ERROR_UNSUPPORTED:
|
||||||
|
@ -684,7 +699,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
blockReader = getRemoteBlockReader(peer);
|
blockReader = getRemoteBlockReader(peer);
|
||||||
return blockReader;
|
return blockReader;
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
IOUtils.cleanup(LOG, peer);
|
IOUtilsClient.cleanup(LOG, peer);
|
||||||
if (isSecurityException(ioe)) {
|
if (isSecurityException(ioe)) {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace(this + ": got security exception while constructing " +
|
LOG.trace(this + ": got security exception while constructing " +
|
||||||
|
@ -711,7 +726,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (blockReader == null) {
|
if (blockReader == null) {
|
||||||
IOUtils.cleanup(LOG, peer);
|
IOUtilsClient.cleanup(LOG, peer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -768,7 +783,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (blockReader == null) {
|
if (blockReader == null) {
|
||||||
IOUtils.cleanup(LOG, peer);
|
IOUtilsClient.cleanup(LOG, peer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -863,12 +878,12 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
return RemoteBlockReader.newBlockReader(fileName,
|
return RemoteBlockReader.newBlockReader(fileName,
|
||||||
block, token, startOffset, length, conf.getIoBufferSize(),
|
block, token, startOffset, length, conf.getIoBufferSize(),
|
||||||
verifyChecksum, clientName, peer, datanode,
|
verifyChecksum, clientName, peer, datanode,
|
||||||
clientContext.getPeerCache(), cachingStrategy);
|
clientContext.getPeerCache(), cachingStrategy, tracer);
|
||||||
} else {
|
} else {
|
||||||
return RemoteBlockReader2.newBlockReader(
|
return RemoteBlockReader2.newBlockReader(
|
||||||
fileName, block, token, startOffset, length,
|
fileName, block, token, startOffset, length,
|
||||||
verifyChecksum, clientName, peer, datanode,
|
verifyChecksum, clientName, peer, datanode,
|
||||||
clientContext.getPeerCache(), cachingStrategy);
|
clientContext.getPeerCache(), cachingStrategy, tracer);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,9 +34,8 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.DirectBufferPool;
|
import org.apache.hadoop.util.DirectBufferPool;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
@ -75,6 +74,7 @@ class BlockReaderLocal implements BlockReader {
|
||||||
private long dataPos;
|
private long dataPos;
|
||||||
private ExtendedBlock block;
|
private ExtendedBlock block;
|
||||||
private StorageType storageType;
|
private StorageType storageType;
|
||||||
|
private Tracer tracer;
|
||||||
|
|
||||||
public Builder(ShortCircuitConf conf) {
|
public Builder(ShortCircuitConf conf) {
|
||||||
this.maxReadahead = Integer.MAX_VALUE;
|
this.maxReadahead = Integer.MAX_VALUE;
|
||||||
|
@ -120,6 +120,11 @@ class BlockReaderLocal implements BlockReader {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Builder setTracer(Tracer tracer) {
|
||||||
|
this.tracer = tracer;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
public BlockReaderLocal build() {
|
public BlockReaderLocal build() {
|
||||||
Preconditions.checkNotNull(replica);
|
Preconditions.checkNotNull(replica);
|
||||||
return new BlockReaderLocal(this);
|
return new BlockReaderLocal(this);
|
||||||
|
@ -228,6 +233,11 @@ class BlockReaderLocal implements BlockReader {
|
||||||
*/
|
*/
|
||||||
private StorageType storageType;
|
private StorageType storageType;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The Tracer to use.
|
||||||
|
*/
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
private BlockReaderLocal(Builder builder) {
|
private BlockReaderLocal(Builder builder) {
|
||||||
this.replica = builder.replica;
|
this.replica = builder.replica;
|
||||||
this.dataIn = replica.getDataStream().getChannel();
|
this.dataIn = replica.getDataStream().getChannel();
|
||||||
|
@ -257,6 +267,7 @@ class BlockReaderLocal implements BlockReader {
|
||||||
}
|
}
|
||||||
this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
|
this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
|
||||||
this.storageType = builder.storageType;
|
this.storageType = builder.storageType;
|
||||||
|
this.tracer = builder.tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
private synchronized void createDataBufIfNeeded() {
|
private synchronized void createDataBufIfNeeded() {
|
||||||
|
@ -324,8 +335,8 @@ class BlockReaderLocal implements BlockReader {
|
||||||
*/
|
*/
|
||||||
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
|
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("BlockReaderLocal#fillBuffer(" +
|
TraceScope scope = tracer.newScope(
|
||||||
block.getBlockId() + ")", Sampler.NEVER);
|
"BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")");
|
||||||
try {
|
try {
|
||||||
int total = 0;
|
int total = 0;
|
||||||
long startDataPos = dataPos;
|
long startDataPos = dataPos;
|
||||||
|
|
|
@ -50,10 +50,8 @@ import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.DirectBufferPool;
|
import org.apache.hadoop.util.DirectBufferPool;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -182,6 +180,7 @@ class BlockReaderLocalLegacy implements BlockReader {
|
||||||
private long startOffset;
|
private long startOffset;
|
||||||
private final String filename;
|
private final String filename;
|
||||||
private long blockId;
|
private long blockId;
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The only way this object can be instantiated.
|
* The only way this object can be instantiated.
|
||||||
|
@ -190,8 +189,8 @@ class BlockReaderLocalLegacy implements BlockReader {
|
||||||
UserGroupInformation userGroupInformation,
|
UserGroupInformation userGroupInformation,
|
||||||
Configuration configuration, String file, ExtendedBlock blk,
|
Configuration configuration, String file, ExtendedBlock blk,
|
||||||
Token<BlockTokenIdentifier> token, DatanodeInfo node,
|
Token<BlockTokenIdentifier> token, DatanodeInfo node,
|
||||||
long startOffset, long length, StorageType storageType)
|
long startOffset, long length, StorageType storageType,
|
||||||
throws IOException {
|
Tracer tracer) throws IOException {
|
||||||
final ShortCircuitConf scConf = conf.getShortCircuitConf();
|
final ShortCircuitConf scConf = conf.getShortCircuitConf();
|
||||||
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
|
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
|
||||||
.getIpcPort());
|
.getIpcPort());
|
||||||
|
@ -239,10 +238,10 @@ class BlockReaderLocalLegacy implements BlockReader {
|
||||||
- (startOffset % checksum.getBytesPerChecksum());
|
- (startOffset % checksum.getBytesPerChecksum());
|
||||||
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
|
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
|
||||||
startOffset, length, pathinfo, checksum, true, dataIn,
|
startOffset, length, pathinfo, checksum, true, dataIn,
|
||||||
firstChunkOffset, checksumIn);
|
firstChunkOffset, checksumIn, tracer);
|
||||||
} else {
|
} else {
|
||||||
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
|
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
|
||||||
startOffset, length, pathinfo, dataIn);
|
startOffset, length, pathinfo, dataIn, tracer);
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// remove from cache
|
// remove from cache
|
||||||
|
@ -321,18 +320,18 @@ class BlockReaderLocalLegacy implements BlockReader {
|
||||||
|
|
||||||
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
|
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
|
||||||
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
||||||
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
|
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn,
|
||||||
throws IOException {
|
Tracer tracer) throws IOException {
|
||||||
this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
|
this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
|
||||||
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
|
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
|
||||||
dataIn, startOffset, null);
|
dataIn, startOffset, null, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
|
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
|
||||||
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
||||||
long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
|
long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
|
||||||
boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
|
boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
|
||||||
FileInputStream checksumIn) throws IOException {
|
FileInputStream checksumIn, Tracer tracer) throws IOException {
|
||||||
this.filename = hdfsfile;
|
this.filename = hdfsfile;
|
||||||
this.checksum = checksum;
|
this.checksum = checksum;
|
||||||
this.verifyChecksum = verifyChecksum;
|
this.verifyChecksum = verifyChecksum;
|
||||||
|
@ -368,6 +367,7 @@ class BlockReaderLocalLegacy implements BlockReader {
|
||||||
bufferPool.returnBuffer(checksumBuff);
|
bufferPool.returnBuffer(checksumBuff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -375,8 +375,8 @@ class BlockReaderLocalLegacy implements BlockReader {
|
||||||
*/
|
*/
|
||||||
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
|
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
|
TraceScope scope = tracer.
|
||||||
blockId + ")", Sampler.NEVER);
|
newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")");
|
||||||
try {
|
try {
|
||||||
int bytesRead = stream.getChannel().read(buf);
|
int bytesRead = stream.getChannel().read(buf);
|
||||||
if (bytesRead < 0) {
|
if (bytesRead < 0) {
|
||||||
|
|
|
@ -53,8 +53,6 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -76,6 +74,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FsServerDefaults;
|
import org.apache.hadoop.fs.FsServerDefaults;
|
||||||
import org.apache.hadoop.fs.FsStatus;
|
import org.apache.hadoop.fs.FsStatus;
|
||||||
|
import org.apache.hadoop.fs.FsTracer;
|
||||||
import org.apache.hadoop.fs.HdfsBlockLocation;
|
import org.apache.hadoop.fs.HdfsBlockLocation;
|
||||||
import org.apache.hadoop.fs.InvalidPathException;
|
import org.apache.hadoop.fs.InvalidPathException;
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
||||||
|
@ -94,6 +93,8 @@ import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
|
@ -147,10 +148,10 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
|
import org.apache.hadoop.hdfs.util.IOUtilsClient;
|
||||||
import org.apache.hadoop.io.DataOutputBuffer;
|
import org.apache.hadoop.io.DataOutputBuffer;
|
||||||
import org.apache.hadoop.io.EnumSetWritable;
|
import org.apache.hadoop.io.EnumSetWritable;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -167,24 +168,22 @@ import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenRenewer;
|
import org.apache.hadoop.security.token.TokenRenewer;
|
||||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
|
||||||
import org.apache.hadoop.tracing.TraceUtils;
|
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.DataChecksum.Type;
|
import org.apache.hadoop.util.DataChecksum.Type;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.SamplerBuilder;
|
|
||||||
import org.apache.htrace.Span;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import com.google.common.base.Joiner;
|
import com.google.common.base.Joiner;
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.net.InetAddresses;
|
import com.google.common.net.InetAddresses;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/********************************************************
|
/********************************************************
|
||||||
* DFSClient can connect to a Hadoop Filesystem and
|
* DFSClient can connect to a Hadoop Filesystem and
|
||||||
|
@ -200,10 +199,11 @@ import com.google.common.net.InetAddresses;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
DataEncryptionKeyFactory {
|
DataEncryptionKeyFactory {
|
||||||
public static final Log LOG = LogFactory.getLog(DFSClient.class);
|
public static final Logger LOG = LoggerFactory.getLogger(DFSClient.class);
|
||||||
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
|
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
|
||||||
|
|
||||||
private final Configuration conf;
|
private final Configuration conf;
|
||||||
|
private final Tracer tracer;
|
||||||
private final DfsClientConf dfsClientConf;
|
private final DfsClientConf dfsClientConf;
|
||||||
final ClientProtocol namenode;
|
final ClientProtocol namenode;
|
||||||
/* The service used for delegation tokens */
|
/* The service used for delegation tokens */
|
||||||
|
@ -231,7 +231,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
new DFSHedgedReadMetrics();
|
new DFSHedgedReadMetrics();
|
||||||
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
|
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
|
||||||
private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
|
private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
|
||||||
private final Sampler<?> traceSampler;
|
|
||||||
private final int smallBufferSize;
|
private final int smallBufferSize;
|
||||||
|
|
||||||
public DfsClientConf getConf() {
|
public DfsClientConf getConf() {
|
||||||
|
@ -286,25 +285,23 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
/**
|
/**
|
||||||
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
|
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
|
||||||
* If HA is enabled and a positive value is set for
|
* If HA is enabled and a positive value is set for
|
||||||
* {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
|
* {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
|
||||||
* configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
|
* in the configuration, the DFSClient will use
|
||||||
* as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode
|
* {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
|
||||||
* must be null.
|
* Otherwise one of nameNodeUri or rpcNamenode must be null.
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
|
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
|
||||||
Configuration conf, FileSystem.Statistics stats)
|
Configuration conf, FileSystem.Statistics stats)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
|
|
||||||
traceSampler = new SamplerBuilder(TraceUtils.
|
|
||||||
wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build();
|
|
||||||
// Copy only the required DFSClient configuration
|
// Copy only the required DFSClient configuration
|
||||||
|
this.tracer = FsTracer.get(conf);
|
||||||
this.dfsClientConf = new DfsClientConf(conf);
|
this.dfsClientConf = new DfsClientConf(conf);
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.stats = stats;
|
this.stats = stats;
|
||||||
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
|
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
|
||||||
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
|
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
|
||||||
this.smallBufferSize = DFSUtil.getSmallBufferSize(conf);
|
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
|
||||||
|
|
||||||
this.ugi = UserGroupInformation.getCurrentUser();
|
this.ugi = UserGroupInformation.getCurrentUser();
|
||||||
|
|
||||||
|
@ -313,16 +310,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
ThreadLocalRandom.current().nextInt() + "_" +
|
ThreadLocalRandom.current().nextInt() + "_" +
|
||||||
Thread.currentThread().getId();
|
Thread.currentThread().getId();
|
||||||
int numResponseToDrop = conf.getInt(
|
int numResponseToDrop = conf.getInt(
|
||||||
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
|
HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
|
HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
|
||||||
NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
|
ProxyAndInfo<ClientProtocol> proxyInfo = null;
|
||||||
AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
|
AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
|
||||||
|
|
||||||
if (numResponseToDrop > 0) {
|
if (numResponseToDrop > 0) {
|
||||||
// This case is used for testing.
|
// This case is used for testing.
|
||||||
LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
|
LOG.warn(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
|
||||||
+ " is set to " + numResponseToDrop
|
+ " is set to " + numResponseToDrop
|
||||||
+ ", this hacked client will proactively drop responses");
|
+ ", this hacked client will proactively drop responses");
|
||||||
proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
|
proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
|
||||||
nameNodeUri, ClientProtocol.class, numResponseToDrop,
|
nameNodeUri, ClientProtocol.class, numResponseToDrop,
|
||||||
nnFallbackToSimpleAuth);
|
nnFallbackToSimpleAuth);
|
||||||
}
|
}
|
||||||
|
@ -338,14 +336,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
} else {
|
} else {
|
||||||
Preconditions.checkArgument(nameNodeUri != null,
|
Preconditions.checkArgument(nameNodeUri != null,
|
||||||
"null URI");
|
"null URI");
|
||||||
proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
|
proxyInfo = NameNodeProxiesClient.createProxyWithClientProtocol(conf,
|
||||||
ClientProtocol.class, nnFallbackToSimpleAuth);
|
nameNodeUri, nnFallbackToSimpleAuth);
|
||||||
this.dtService = proxyInfo.getDelegationTokenService();
|
this.dtService = proxyInfo.getDelegationTokenService();
|
||||||
this.namenode = proxyInfo.getProxy();
|
this.namenode = proxyInfo.getProxy();
|
||||||
}
|
}
|
||||||
|
|
||||||
String localInterfaces[] =
|
String localInterfaces[] =
|
||||||
conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
|
conf.getTrimmedStrings(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
|
||||||
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
|
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
|
||||||
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
|
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
|
||||||
LOG.debug("Using local interfaces [" +
|
LOG.debug("Using local interfaces [" +
|
||||||
|
@ -544,10 +542,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
// Abort if the lease has already expired.
|
// Abort if the lease has already expired.
|
||||||
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
|
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
|
||||||
if (elapsed > HdfsServerConstants.LEASE_HARDLIMIT_PERIOD) {
|
if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
|
||||||
LOG.warn("Failed to renew lease for " + clientName + " for "
|
LOG.warn("Failed to renew lease for " + clientName + " for "
|
||||||
+ (elapsed/1000) + " seconds (>= hard-limit ="
|
+ (elapsed/1000) + " seconds (>= hard-limit ="
|
||||||
+ (HdfsServerConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
|
+ (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
|
||||||
+ "Closing all files being written ...", e);
|
+ "Closing all files being written ...", e);
|
||||||
closeAllFilesBeingWritten(true);
|
closeAllFilesBeingWritten(true);
|
||||||
} else {
|
} else {
|
||||||
|
@ -586,8 +584,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
out.close();
|
out.close();
|
||||||
}
|
}
|
||||||
} catch(IOException ie) {
|
} catch(IOException ie) {
|
||||||
LOG.error("Failed to " + (abort? "abort": "close") +
|
LOG.error("Failed to " + (abort ? "abort" : "close") + " file: "
|
||||||
" inode " + inodeId, ie);
|
+ out.getSrc() + " with inode: " + inodeId, ie);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -624,7 +622,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public long getBlockSize(String f) throws IOException {
|
public long getBlockSize(String f) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getBlockSize", f);
|
TraceScope scope = newPathTraceScope("getBlockSize", f);
|
||||||
try {
|
try {
|
||||||
return namenode.getPreferredBlockSize(f);
|
return namenode.getPreferredBlockSize(f);
|
||||||
} catch (IOException ie) {
|
} catch (IOException ie) {
|
||||||
|
@ -667,7 +665,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
|
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
assert dtService != null;
|
assert dtService != null;
|
||||||
TraceScope scope = Trace.startSpan("getDelegationToken", traceSampler);
|
TraceScope scope = tracer.newScope("getDelegationToken");
|
||||||
try {
|
try {
|
||||||
Token<DelegationTokenIdentifier> token =
|
Token<DelegationTokenIdentifier> token =
|
||||||
namenode.getDelegationToken(renewer);
|
namenode.getDelegationToken(renewer);
|
||||||
|
@ -732,7 +730,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
static {
|
static {
|
||||||
//Ensure that HDFS Configuration files are loaded before trying to use
|
//Ensure that HDFS Configuration files are loaded before trying to use
|
||||||
// the renewer.
|
// the renewer.
|
||||||
HdfsConfiguration.init();
|
HdfsConfigurationLoader.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -786,8 +784,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
"a failover proxy provider configured.");
|
"a failover proxy provider configured.");
|
||||||
}
|
}
|
||||||
|
|
||||||
NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
|
ProxyAndInfo<ClientProtocol> info =
|
||||||
NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
|
NameNodeProxiesClient.createProxyWithClientProtocol(conf, uri, null);
|
||||||
assert info.getDelegationTokenService().equals(token.getService()) :
|
assert info.getDelegationTokenService().equals(token.getService()) :
|
||||||
"Returned service '" + info.getDelegationTokenService().toString() +
|
"Returned service '" + info.getDelegationTokenService().toString() +
|
||||||
"' doesn't match expected service '" +
|
"' doesn't match expected service '" +
|
||||||
|
@ -824,7 +822,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public LocatedBlocks getLocatedBlocks(String src, long start, long length)
|
public LocatedBlocks getLocatedBlocks(String src, long start, long length)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
TraceScope scope = newPathTraceScope("getBlockLocations", src);
|
||||||
try {
|
try {
|
||||||
return callGetBlockLocations(namenode, src, start, length);
|
return callGetBlockLocations(namenode, src, start, length);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -856,7 +854,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
boolean recoverLease(String src) throws IOException {
|
boolean recoverLease(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
|
|
||||||
TraceScope scope = getPathTraceScope("recoverLease", src);
|
TraceScope scope = newPathTraceScope("recoverLease", src);
|
||||||
try {
|
try {
|
||||||
return namenode.recoverLease(src, clientName);
|
return namenode.recoverLease(src, clientName);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -883,7 +881,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public BlockLocation[] getBlockLocations(String src, long start,
|
public BlockLocation[] getBlockLocations(String src, long start,
|
||||||
long length) throws IOException, UnresolvedLinkException {
|
long length) throws IOException, UnresolvedLinkException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
TraceScope scope = newPathTraceScope("getBlockLocations", src);
|
||||||
try {
|
try {
|
||||||
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
|
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
|
||||||
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
|
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
|
||||||
|
@ -902,7 +900,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
|
private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
|
||||||
feInfo) throws IOException {
|
feInfo) throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler);
|
TraceScope scope = tracer.newScope("decryptEDEK");
|
||||||
try {
|
try {
|
||||||
KeyProvider provider = getKeyProvider();
|
KeyProvider provider = getKeyProvider();
|
||||||
if (provider == null) {
|
if (provider == null) {
|
||||||
|
@ -1058,7 +1056,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
throws IOException, UnresolvedLinkException {
|
throws IOException, UnresolvedLinkException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
// Get block info from namenode
|
// Get block info from namenode
|
||||||
TraceScope scope = getPathTraceScope("newDFSInputStream", src);
|
TraceScope scope = newPathTraceScope("newDFSInputStream", src);
|
||||||
try {
|
try {
|
||||||
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
|
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
|
||||||
if (locatedBlocks != null) {
|
if (locatedBlocks != null) {
|
||||||
|
@ -1314,7 +1312,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void createSymlink(String target, String link, boolean createParent)
|
public void createSymlink(String target, String link, boolean createParent)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("createSymlink", target);
|
TraceScope scope = newPathTraceScope("createSymlink", target);
|
||||||
try {
|
try {
|
||||||
final FsPermission dirPerm = applyUMask(null);
|
final FsPermission dirPerm = applyUMask(null);
|
||||||
namenode.createSymlink(target, link, dirPerm, createParent);
|
namenode.createSymlink(target, link, dirPerm, createParent);
|
||||||
|
@ -1340,7 +1338,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public String getLinkTarget(String path) throws IOException {
|
public String getLinkTarget(String path) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getLinkTarget", path);
|
TraceScope scope = newPathTraceScope("getLinkTarget", path);
|
||||||
try {
|
try {
|
||||||
return namenode.getLinkTarget(path);
|
return namenode.getLinkTarget(path);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -1437,7 +1435,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public boolean setReplication(String src, short replication)
|
public boolean setReplication(String src, short replication)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setReplication", src);
|
TraceScope scope = newPathTraceScope("setReplication", src);
|
||||||
try {
|
try {
|
||||||
return namenode.setReplication(src, replication);
|
return namenode.setReplication(src, replication);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1461,7 +1459,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void setStoragePolicy(String src, String policyName)
|
public void setStoragePolicy(String src, String policyName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setStoragePolicy", src);
|
TraceScope scope = newPathTraceScope("setStoragePolicy", src);
|
||||||
try {
|
try {
|
||||||
namenode.setStoragePolicy(src, policyName);
|
namenode.setStoragePolicy(src, policyName);
|
||||||
} catch (RemoteException e) {
|
} catch (RemoteException e) {
|
||||||
|
@ -1482,7 +1480,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
|
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getStoragePolicy", path);
|
TraceScope scope = newPathTraceScope("getStoragePolicy", path);
|
||||||
try {
|
try {
|
||||||
return namenode.getStoragePolicy(path);
|
return namenode.getStoragePolicy(path);
|
||||||
} catch (RemoteException e) {
|
} catch (RemoteException e) {
|
||||||
|
@ -1500,7 +1498,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
|
TraceScope scope = tracer.newScope("getStoragePolicies");
|
||||||
try {
|
try {
|
||||||
return namenode.getStoragePolicies();
|
return namenode.getStoragePolicies();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1516,7 +1514,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public boolean rename(String src, String dst) throws IOException {
|
public boolean rename(String src, String dst) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getSrcDstTraceScope("rename", src, dst);
|
TraceScope scope = newSrcDstTraceScope("rename", src, dst);
|
||||||
try {
|
try {
|
||||||
return namenode.rename(src, dst);
|
return namenode.rename(src, dst);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1537,7 +1535,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void concat(String trg, String [] srcs) throws IOException {
|
public void concat(String trg, String [] srcs) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("concat", traceSampler);
|
TraceScope scope = tracer.newScope("concat");
|
||||||
try {
|
try {
|
||||||
namenode.concat(trg, srcs);
|
namenode.concat(trg, srcs);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1555,7 +1553,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void rename(String src, String dst, Options.Rename... options)
|
public void rename(String src, String dst, Options.Rename... options)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
|
TraceScope scope = newSrcDstTraceScope("rename2", src, dst);
|
||||||
try {
|
try {
|
||||||
namenode.rename2(src, dst, options);
|
namenode.rename2(src, dst, options);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1584,7 +1582,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
throw new HadoopIllegalArgumentException(
|
throw new HadoopIllegalArgumentException(
|
||||||
"Cannot truncate to a negative file size: " + newLength + ".");
|
"Cannot truncate to a negative file size: " + newLength + ".");
|
||||||
}
|
}
|
||||||
TraceScope scope = getPathTraceScope("truncate", src);
|
TraceScope scope = newPathTraceScope("truncate", src);
|
||||||
try {
|
try {
|
||||||
return namenode.truncate(src, newLength, clientName);
|
return namenode.truncate(src, newLength, clientName);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -1614,7 +1612,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public boolean delete(String src, boolean recursive) throws IOException {
|
public boolean delete(String src, boolean recursive) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("delete", src);
|
TraceScope scope = newPathTraceScope("delete", src);
|
||||||
try {
|
try {
|
||||||
return namenode.delete(src, recursive);
|
return namenode.delete(src, recursive);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1656,7 +1654,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public DirectoryListing listPaths(String src, byte[] startAfter,
|
public DirectoryListing listPaths(String src, byte[] startAfter,
|
||||||
boolean needLocation) throws IOException {
|
boolean needLocation) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("listPaths", src);
|
TraceScope scope = newPathTraceScope("listPaths", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getListing(src, startAfter, needLocation);
|
return namenode.getListing(src, startAfter, needLocation);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1678,7 +1676,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public HdfsFileStatus getFileInfo(String src) throws IOException {
|
public HdfsFileStatus getFileInfo(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getFileInfo", src);
|
TraceScope scope = newPathTraceScope("getFileInfo", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getFileInfo(src);
|
return namenode.getFileInfo(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1696,7 +1694,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public boolean isFileClosed(String src) throws IOException{
|
public boolean isFileClosed(String src) throws IOException{
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("isFileClosed", src);
|
TraceScope scope = newPathTraceScope("isFileClosed", src);
|
||||||
try {
|
try {
|
||||||
return namenode.isFileClosed(src);
|
return namenode.isFileClosed(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -1718,7 +1716,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
|
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getFileLinkInfo", src);
|
TraceScope scope = newPathTraceScope("getFileLinkInfo", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getFileLinkInfo(src);
|
return namenode.getFileLinkInfo(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2007,7 +2005,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
|
return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.cleanup(null, pair.in, pair.out);
|
IOUtilsClient.cleanup(null, pair.in, pair.out);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2021,7 +2019,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void setPermission(String src, FsPermission permission)
|
public void setPermission(String src, FsPermission permission)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setPermission", src);
|
TraceScope scope = newPathTraceScope("setPermission", src);
|
||||||
try {
|
try {
|
||||||
namenode.setPermission(src, permission);
|
namenode.setPermission(src, permission);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2046,7 +2044,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void setOwner(String src, String username, String groupname)
|
public void setOwner(String src, String username, String groupname)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setOwner", src);
|
TraceScope scope = newPathTraceScope("setOwner", src);
|
||||||
try {
|
try {
|
||||||
namenode.setOwner(src, username, groupname);
|
namenode.setOwner(src, username, groupname);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2062,7 +2060,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
private long[] callGetStats() throws IOException {
|
private long[] callGetStats() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("getStats", traceSampler);
|
TraceScope scope = tracer.newScope("getStats");
|
||||||
try {
|
try {
|
||||||
return namenode.getStats();
|
return namenode.getStats();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2121,7 +2119,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
String cookie)
|
String cookie)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("listCorruptFileBlocks", path);
|
TraceScope scope = newPathTraceScope("listCorruptFileBlocks", path);
|
||||||
try {
|
try {
|
||||||
return namenode.listCorruptFileBlocks(path, cookie);
|
return namenode.listCorruptFileBlocks(path, cookie);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2132,7 +2130,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
|
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
|
TraceScope scope = tracer.newScope("datanodeReport");
|
||||||
try {
|
try {
|
||||||
return namenode.getDatanodeReport(type);
|
return namenode.getDatanodeReport(type);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2144,7 +2142,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
DatanodeReportType type) throws IOException {
|
DatanodeReportType type) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
Trace.startSpan("datanodeStorageReport", traceSampler);
|
tracer.newScope("datanodeStorageReport");
|
||||||
try {
|
try {
|
||||||
return namenode.getDatanodeStorageReport(type);
|
return namenode.getDatanodeStorageReport(type);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2175,7 +2173,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
|
public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
Trace.startSpan("setSafeMode", traceSampler);
|
tracer.newScope("setSafeMode");
|
||||||
try {
|
try {
|
||||||
return namenode.setSafeMode(action, isChecked);
|
return namenode.setSafeMode(action, isChecked);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2194,7 +2192,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public String createSnapshot(String snapshotRoot, String snapshotName)
|
public String createSnapshot(String snapshotRoot, String snapshotName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("createSnapshot", traceSampler);
|
TraceScope scope = tracer.newScope("createSnapshot");
|
||||||
try {
|
try {
|
||||||
return namenode.createSnapshot(snapshotRoot, snapshotName);
|
return namenode.createSnapshot(snapshotRoot, snapshotName);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2216,7 +2214,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("deleteSnapshot", traceSampler);
|
TraceScope scope = tracer.newScope("deleteSnapshot");
|
||||||
try {
|
try {
|
||||||
namenode.deleteSnapshot(snapshotRoot, snapshotName);
|
namenode.deleteSnapshot(snapshotRoot, snapshotName);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2237,7 +2235,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void renameSnapshot(String snapshotDir, String snapshotOldName,
|
public void renameSnapshot(String snapshotDir, String snapshotOldName,
|
||||||
String snapshotNewName) throws IOException {
|
String snapshotNewName) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("renameSnapshot", traceSampler);
|
TraceScope scope = tracer.newScope("renameSnapshot");
|
||||||
try {
|
try {
|
||||||
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
|
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2256,8 +2254,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
|
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("getSnapshottableDirListing",
|
TraceScope scope = tracer.newScope("getSnapshottableDirListing");
|
||||||
traceSampler);
|
|
||||||
try {
|
try {
|
||||||
return namenode.getSnapshottableDirListing();
|
return namenode.getSnapshottableDirListing();
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2274,7 +2271,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void allowSnapshot(String snapshotRoot) throws IOException {
|
public void allowSnapshot(String snapshotRoot) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("allowSnapshot", traceSampler);
|
TraceScope scope = tracer.newScope("allowSnapshot");
|
||||||
try {
|
try {
|
||||||
namenode.allowSnapshot(snapshotRoot);
|
namenode.allowSnapshot(snapshotRoot);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2291,7 +2288,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void disallowSnapshot(String snapshotRoot) throws IOException {
|
public void disallowSnapshot(String snapshotRoot) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("disallowSnapshot", traceSampler);
|
TraceScope scope = tracer.newScope("disallowSnapshot");
|
||||||
try {
|
try {
|
||||||
namenode.disallowSnapshot(snapshotRoot);
|
namenode.disallowSnapshot(snapshotRoot);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2309,7 +2306,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
|
public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
|
||||||
String fromSnapshot, String toSnapshot) throws IOException {
|
String fromSnapshot, String toSnapshot) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("getSnapshotDiffReport", traceSampler);
|
TraceScope scope = tracer.newScope("getSnapshotDiffReport");
|
||||||
try {
|
try {
|
||||||
return namenode.getSnapshotDiffReport(snapshotDir,
|
return namenode.getSnapshotDiffReport(snapshotDir,
|
||||||
fromSnapshot, toSnapshot);
|
fromSnapshot, toSnapshot);
|
||||||
|
@ -2323,7 +2320,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public long addCacheDirective(
|
public long addCacheDirective(
|
||||||
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
|
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("addCacheDirective", traceSampler);
|
TraceScope scope = tracer.newScope("addCacheDirective");
|
||||||
try {
|
try {
|
||||||
return namenode.addCacheDirective(info, flags);
|
return namenode.addCacheDirective(info, flags);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2336,7 +2333,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void modifyCacheDirective(
|
public void modifyCacheDirective(
|
||||||
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
|
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("modifyCacheDirective", traceSampler);
|
TraceScope scope = tracer.newScope("modifyCacheDirective");
|
||||||
try {
|
try {
|
||||||
namenode.modifyCacheDirective(info, flags);
|
namenode.modifyCacheDirective(info, flags);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2349,7 +2346,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void removeCacheDirective(long id)
|
public void removeCacheDirective(long id)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("removeCacheDirective", traceSampler);
|
TraceScope scope = tracer.newScope("removeCacheDirective");
|
||||||
try {
|
try {
|
||||||
namenode.removeCacheDirective(id);
|
namenode.removeCacheDirective(id);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2362,12 +2359,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
|
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
|
||||||
CacheDirectiveInfo filter) throws IOException {
|
CacheDirectiveInfo filter) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
return new CacheDirectiveIterator(namenode, filter, traceSampler);
|
return new CacheDirectiveIterator(namenode, filter, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addCachePool(CachePoolInfo info) throws IOException {
|
public void addCachePool(CachePoolInfo info) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("addCachePool", traceSampler);
|
TraceScope scope = tracer.newScope("addCachePool");
|
||||||
try {
|
try {
|
||||||
namenode.addCachePool(info);
|
namenode.addCachePool(info);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2379,7 +2376,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void modifyCachePool(CachePoolInfo info) throws IOException {
|
public void modifyCachePool(CachePoolInfo info) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("modifyCachePool", traceSampler);
|
TraceScope scope = tracer.newScope("modifyCachePool");
|
||||||
try {
|
try {
|
||||||
namenode.modifyCachePool(info);
|
namenode.modifyCachePool(info);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2391,7 +2388,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void removeCachePool(String poolName) throws IOException {
|
public void removeCachePool(String poolName) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("removeCachePool", traceSampler);
|
TraceScope scope = tracer.newScope("removeCachePool");
|
||||||
try {
|
try {
|
||||||
namenode.removeCachePool(poolName);
|
namenode.removeCachePool(poolName);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2403,7 +2400,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
return new CachePoolIterator(namenode, traceSampler);
|
return new CachePoolIterator(namenode, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -2413,7 +2410,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
boolean saveNamespace(long timeWindow, long txGap) throws IOException {
|
boolean saveNamespace(long timeWindow, long txGap) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("saveNamespace", traceSampler);
|
TraceScope scope = tracer.newScope("saveNamespace");
|
||||||
try {
|
try {
|
||||||
return namenode.saveNamespace(timeWindow, txGap);
|
return namenode.saveNamespace(timeWindow, txGap);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2431,7 +2428,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
long rollEdits() throws AccessControlException, IOException {
|
long rollEdits() throws AccessControlException, IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("rollEdits", traceSampler);
|
TraceScope scope = tracer.newScope("rollEdits");
|
||||||
try {
|
try {
|
||||||
return namenode.rollEdits();
|
return namenode.rollEdits();
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2454,7 +2451,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
boolean restoreFailedStorage(String arg)
|
boolean restoreFailedStorage(String arg)
|
||||||
throws AccessControlException, IOException{
|
throws AccessControlException, IOException{
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("restoreFailedStorage", traceSampler);
|
TraceScope scope = tracer.newScope("restoreFailedStorage");
|
||||||
try {
|
try {
|
||||||
return namenode.restoreFailedStorage(arg);
|
return namenode.restoreFailedStorage(arg);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2471,7 +2468,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void refreshNodes() throws IOException {
|
public void refreshNodes() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("refreshNodes", traceSampler);
|
TraceScope scope = tracer.newScope("refreshNodes");
|
||||||
try {
|
try {
|
||||||
namenode.refreshNodes();
|
namenode.refreshNodes();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2486,7 +2483,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void metaSave(String pathname) throws IOException {
|
public void metaSave(String pathname) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("metaSave", traceSampler);
|
TraceScope scope = tracer.newScope("metaSave");
|
||||||
try {
|
try {
|
||||||
namenode.metaSave(pathname);
|
namenode.metaSave(pathname);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2504,7 +2501,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void setBalancerBandwidth(long bandwidth) throws IOException {
|
public void setBalancerBandwidth(long bandwidth) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler);
|
TraceScope scope = tracer.newScope("setBalancerBandwidth");
|
||||||
try {
|
try {
|
||||||
namenode.setBalancerBandwidth(bandwidth);
|
namenode.setBalancerBandwidth(bandwidth);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2517,7 +2514,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void finalizeUpgrade() throws IOException {
|
public void finalizeUpgrade() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler);
|
TraceScope scope = tracer.newScope("finalizeUpgrade");
|
||||||
try {
|
try {
|
||||||
namenode.finalizeUpgrade();
|
namenode.finalizeUpgrade();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2527,7 +2524,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
|
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler);
|
TraceScope scope = tracer.newScope("rollingUpgrade");
|
||||||
try {
|
try {
|
||||||
return namenode.rollingUpgrade(action);
|
return namenode.rollingUpgrade(action);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -2585,7 +2582,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
if(LOG.isDebugEnabled()) {
|
if(LOG.isDebugEnabled()) {
|
||||||
LOG.debug(src + ": masked=" + absPermission);
|
LOG.debug(src + ": masked=" + absPermission);
|
||||||
}
|
}
|
||||||
TraceScope scope = Trace.startSpan("mkdir", traceSampler);
|
TraceScope scope = tracer.newScope("mkdir");
|
||||||
try {
|
try {
|
||||||
return namenode.mkdirs(src, absPermission, createParent);
|
return namenode.mkdirs(src, absPermission, createParent);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2613,7 +2610,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
ContentSummary getContentSummary(String src) throws IOException {
|
ContentSummary getContentSummary(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getContentSummary", src);
|
TraceScope scope = newPathTraceScope("getContentSummary", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getContentSummary(src);
|
return namenode.getContentSummary(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2642,7 +2639,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
storagespaceQuota);
|
storagespaceQuota);
|
||||||
|
|
||||||
}
|
}
|
||||||
TraceScope scope = getPathTraceScope("setQuota", src);
|
TraceScope scope = newPathTraceScope("setQuota", src);
|
||||||
try {
|
try {
|
||||||
// Pass null as storage type for traditional namespace/storagespace quota.
|
// Pass null as storage type for traditional namespace/storagespace quota.
|
||||||
namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
|
namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
|
||||||
|
@ -2678,7 +2675,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
throw new IllegalArgumentException("Don't support Quota for storage type : "
|
throw new IllegalArgumentException("Don't support Quota for storage type : "
|
||||||
+ type.toString());
|
+ type.toString());
|
||||||
}
|
}
|
||||||
TraceScope scope = getPathTraceScope("setQuotaByStorageType", src);
|
TraceScope scope = newPathTraceScope("setQuotaByStorageType", src);
|
||||||
try {
|
try {
|
||||||
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
|
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2698,7 +2695,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
public void setTimes(String src, long mtime, long atime) throws IOException {
|
public void setTimes(String src, long mtime, long atime) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setTimes", src);
|
TraceScope scope = newPathTraceScope("setTimes", src);
|
||||||
try {
|
try {
|
||||||
namenode.setTimes(src, mtime, atime);
|
namenode.setTimes(src, mtime, atime);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2759,7 +2756,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
|
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("modifyAclEntries", src);
|
TraceScope scope = newPathTraceScope("modifyAclEntries", src);
|
||||||
try {
|
try {
|
||||||
namenode.modifyAclEntries(src, aclSpec);
|
namenode.modifyAclEntries(src, aclSpec);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2778,7 +2775,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void removeAclEntries(String src, List<AclEntry> aclSpec)
|
public void removeAclEntries(String src, List<AclEntry> aclSpec)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("removeAclEntries", traceSampler);
|
TraceScope scope = tracer.newScope("removeAclEntries");
|
||||||
try {
|
try {
|
||||||
namenode.removeAclEntries(src, aclSpec);
|
namenode.removeAclEntries(src, aclSpec);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2796,7 +2793,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void removeDefaultAcl(String src) throws IOException {
|
public void removeDefaultAcl(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("removeDefaultAcl", traceSampler);
|
TraceScope scope = tracer.newScope("removeDefaultAcl");
|
||||||
try {
|
try {
|
||||||
namenode.removeDefaultAcl(src);
|
namenode.removeDefaultAcl(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2814,7 +2811,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void removeAcl(String src) throws IOException {
|
public void removeAcl(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("removeAcl", traceSampler);
|
TraceScope scope = tracer.newScope("removeAcl");
|
||||||
try {
|
try {
|
||||||
namenode.removeAcl(src);
|
namenode.removeAcl(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2832,7 +2829,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
|
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("setAcl", traceSampler);
|
TraceScope scope = tracer.newScope("setAcl");
|
||||||
try {
|
try {
|
||||||
namenode.setAcl(src, aclSpec);
|
namenode.setAcl(src, aclSpec);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2850,7 +2847,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public AclStatus getAclStatus(String src) throws IOException {
|
public AclStatus getAclStatus(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getAclStatus", src);
|
TraceScope scope = newPathTraceScope("getAclStatus", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getAclStatus(src);
|
return namenode.getAclStatus(src);
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2866,7 +2863,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void createEncryptionZone(String src, String keyName)
|
public void createEncryptionZone(String src, String keyName)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("createEncryptionZone", src);
|
TraceScope scope = newPathTraceScope("createEncryptionZone", src);
|
||||||
try {
|
try {
|
||||||
namenode.createEncryptionZone(src, keyName);
|
namenode.createEncryptionZone(src, keyName);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2881,7 +2878,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public EncryptionZone getEZForPath(String src)
|
public EncryptionZone getEZForPath(String src)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getEZForPath", src);
|
TraceScope scope = newPathTraceScope("getEZForPath", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getEZForPath(src);
|
return namenode.getEZForPath(src);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2895,14 +2892,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public RemoteIterator<EncryptionZone> listEncryptionZones()
|
public RemoteIterator<EncryptionZone> listEncryptionZones()
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
return new EncryptionZoneIterator(namenode, traceSampler);
|
return new EncryptionZoneIterator(namenode, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
|
public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setErasureCodingPolicy", src);
|
TraceScope scope = newPathTraceScope("setErasureCodingPolicy", src);
|
||||||
try {
|
try {
|
||||||
namenode.setErasureCodingPolicy(src, ecPolicy);
|
namenode.setErasureCodingPolicy(src, ecPolicy);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2917,7 +2914,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public void setXAttr(String src, String name, byte[] value,
|
public void setXAttr(String src, String name, byte[] value,
|
||||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("setXAttr", src);
|
TraceScope scope = newPathTraceScope("setXAttr", src);
|
||||||
try {
|
try {
|
||||||
namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
|
namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -2934,7 +2931,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public byte[] getXAttr(String src, String name) throws IOException {
|
public byte[] getXAttr(String src, String name) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getXAttr", src);
|
TraceScope scope = newPathTraceScope("getXAttr", src);
|
||||||
try {
|
try {
|
||||||
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
|
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
|
||||||
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
|
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
|
||||||
|
@ -2950,7 +2947,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public Map<String, byte[]> getXAttrs(String src) throws IOException {
|
public Map<String, byte[]> getXAttrs(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getXAttrs", src);
|
TraceScope scope = newPathTraceScope("getXAttrs", src);
|
||||||
try {
|
try {
|
||||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
|
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -2965,7 +2962,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public Map<String, byte[]> getXAttrs(String src, List<String> names)
|
public Map<String, byte[]> getXAttrs(String src, List<String> names)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getXAttrs", src);
|
TraceScope scope = newPathTraceScope("getXAttrs", src);
|
||||||
try {
|
try {
|
||||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
|
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
|
||||||
src, XAttrHelper.buildXAttrs(names)));
|
src, XAttrHelper.buildXAttrs(names)));
|
||||||
|
@ -2981,7 +2978,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
public List<String> listXAttrs(String src)
|
public List<String> listXAttrs(String src)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("listXAttrs", src);
|
TraceScope scope = newPathTraceScope("listXAttrs", src);
|
||||||
try {
|
try {
|
||||||
final Map<String, byte[]> xattrs =
|
final Map<String, byte[]> xattrs =
|
||||||
XAttrHelper.buildXAttrMap(namenode.listXAttrs(src));
|
XAttrHelper.buildXAttrMap(namenode.listXAttrs(src));
|
||||||
|
@ -2997,7 +2994,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void removeXAttr(String src, String name) throws IOException {
|
public void removeXAttr(String src, String name) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("removeXAttr", src);
|
TraceScope scope = newPathTraceScope("removeXAttr", src);
|
||||||
try {
|
try {
|
||||||
namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
|
namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
|
||||||
} catch(RemoteException re) {
|
} catch(RemoteException re) {
|
||||||
|
@ -3014,7 +3011,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public void checkAccess(String src, FsAction mode) throws IOException {
|
public void checkAccess(String src, FsAction mode) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("checkAccess", src);
|
TraceScope scope = newPathTraceScope("checkAccess", src);
|
||||||
try {
|
try {
|
||||||
namenode.checkAccess(src, mode);
|
namenode.checkAccess(src, mode);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -3028,7 +3025,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = Trace.startSpan("getErasureCodingPolicies", traceSampler);
|
TraceScope scope = tracer.newScope("getErasureCodingPolicies");
|
||||||
try {
|
try {
|
||||||
return namenode.getErasureCodingPolicies();
|
return namenode.getErasureCodingPolicies();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -3038,13 +3035,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
return new DFSInotifyEventInputStream(traceSampler, namenode);
|
return new DFSInotifyEventInputStream(namenode, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
|
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
return new DFSInotifyEventInputStream(traceSampler, namenode, lastReadTxid);
|
return new DFSInotifyEventInputStream(namenode, tracer,
|
||||||
|
lastReadTxid);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // RemotePeerFactory
|
@Override // RemotePeerFactory
|
||||||
|
@ -3066,7 +3064,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
return peer;
|
return peer;
|
||||||
} finally {
|
} finally {
|
||||||
if (!success) {
|
if (!success) {
|
||||||
IOUtils.cleanup(LOG, peer);
|
IOUtilsClient.cleanup(LOG, peer);
|
||||||
IOUtils.closeSocket(sock);
|
IOUtils.closeSocket(sock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3179,11 +3177,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Probe for encryption enabled on this filesystem.
|
* Probe for encryption enabled on this filesystem.
|
||||||
* See {@link DFSUtil#isHDFSEncryptionEnabled(Configuration)}
|
* See {@link DFSUtilClient#isHDFSEncryptionEnabled(Configuration)}
|
||||||
* @return true if encryption is enabled
|
* @return true if encryption is enabled
|
||||||
*/
|
*/
|
||||||
public boolean isHDFSEncryptionEnabled() {
|
public boolean isHDFSEncryptionEnabled() {
|
||||||
return DFSUtil.isHDFSEncryptionEnabled(this.conf);
|
return DFSUtilClient.isHDFSEncryptionEnabled(this.conf);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -3195,27 +3193,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
return saslClient;
|
return saslClient;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceScope getPathTraceScope(String description, String path) {
|
TraceScope newPathTraceScope(String description, String path) {
|
||||||
TraceScope scope = Trace.startSpan(description, traceSampler);
|
TraceScope scope = tracer.newScope(description);
|
||||||
Span span = scope.getSpan();
|
|
||||||
if (span != null) {
|
|
||||||
if (path != null) {
|
if (path != null) {
|
||||||
span.addKVAnnotation("path", path);
|
scope.addKVAnnotation("path", path);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return scope;
|
return scope;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceScope getSrcDstTraceScope(String description, String src, String dst) {
|
TraceScope newSrcDstTraceScope(String description, String src, String dst) {
|
||||||
TraceScope scope = Trace.startSpan(description, traceSampler);
|
TraceScope scope = tracer.newScope(description);
|
||||||
Span span = scope.getSpan();
|
|
||||||
if (span != null) {
|
|
||||||
if (src != null) {
|
if (src != null) {
|
||||||
span.addKVAnnotation("src", src);
|
scope.addKVAnnotation("src", src);
|
||||||
}
|
}
|
||||||
if (dst != null) {
|
if (dst != null) {
|
||||||
span.addKVAnnotation("dst", dst);
|
scope.addKVAnnotation("dst", dst);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return scope;
|
return scope;
|
||||||
}
|
}
|
||||||
|
@ -3231,7 +3223,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
|
|
||||||
public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
|
public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
|
||||||
checkOpen();
|
checkOpen();
|
||||||
TraceScope scope = getPathTraceScope("getErasureCodingPolicy", src);
|
TraceScope scope = newPathTraceScope("getErasureCodingPolicy", src);
|
||||||
try {
|
try {
|
||||||
return namenode.getErasureCodingPolicy(src);
|
return namenode.getErasureCodingPolicy(src);
|
||||||
} catch (RemoteException re) {
|
} catch (RemoteException re) {
|
||||||
|
@ -3241,4 +3233,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
scope.close();
|
scope.close();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Tracer getTracer() {
|
||||||
|
return tracer;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -30,12 +30,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DFSClientFaultInjector {
|
public class DFSClientFaultInjector {
|
||||||
public static DFSClientFaultInjector instance = new DFSClientFaultInjector();
|
private static DFSClientFaultInjector instance = new DFSClientFaultInjector();
|
||||||
public static AtomicLong exceptionNum = new AtomicLong(0);
|
public static AtomicLong exceptionNum = new AtomicLong(0);
|
||||||
|
|
||||||
public static DFSClientFaultInjector get() {
|
public static DFSClientFaultInjector get() {
|
||||||
return instance;
|
return instance;
|
||||||
}
|
}
|
||||||
|
public static void set(DFSClientFaultInjector instance) {
|
||||||
|
DFSClientFaultInjector.instance = instance;
|
||||||
|
}
|
||||||
|
|
||||||
public boolean corruptPacket() {
|
public boolean corruptPacket() {
|
||||||
return false;
|
return false;
|
|
@ -26,9 +26,8 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
|
||||||
import org.apache.hadoop.hdfs.inotify.MissingEventsException;
|
import org.apache.hadoop.hdfs.inotify.MissingEventsException;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -44,13 +43,8 @@ import java.util.concurrent.TimeUnit;
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public class DFSInotifyEventInputStream {
|
public class DFSInotifyEventInputStream {
|
||||||
public static Logger LOG = LoggerFactory.getLogger(DFSInotifyEventInputStream
|
public static final Logger LOG = LoggerFactory.getLogger(
|
||||||
.class);
|
DFSInotifyEventInputStream.class);
|
||||||
|
|
||||||
/**
|
|
||||||
* The trace sampler to use when making RPCs to the NameNode.
|
|
||||||
*/
|
|
||||||
private final Sampler<?> traceSampler;
|
|
||||||
|
|
||||||
private final ClientProtocol namenode;
|
private final ClientProtocol namenode;
|
||||||
private Iterator<EventBatch> it;
|
private Iterator<EventBatch> it;
|
||||||
|
@ -65,20 +59,22 @@ public class DFSInotifyEventInputStream {
|
||||||
*/
|
*/
|
||||||
private Random rng = new Random();
|
private Random rng = new Random();
|
||||||
|
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
private static final int INITIAL_WAIT_MS = 10;
|
private static final int INITIAL_WAIT_MS = 10;
|
||||||
|
|
||||||
DFSInotifyEventInputStream(Sampler<?> traceSampler, ClientProtocol namenode)
|
DFSInotifyEventInputStream(ClientProtocol namenode, Tracer tracer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// Only consider new transaction IDs.
|
// Only consider new transaction IDs.
|
||||||
this(traceSampler, namenode, namenode.getCurrentEditLogTxid());
|
this(namenode, tracer, namenode.getCurrentEditLogTxid());
|
||||||
}
|
}
|
||||||
|
|
||||||
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
|
DFSInotifyEventInputStream(ClientProtocol namenode,
|
||||||
long lastReadTxid) throws IOException {
|
Tracer tracer, long lastReadTxid) throws IOException {
|
||||||
this.traceSampler = traceSampler;
|
|
||||||
this.namenode = namenode;
|
this.namenode = namenode;
|
||||||
this.it = Iterators.emptyIterator();
|
this.it = Iterators.emptyIterator();
|
||||||
this.lastReadTxid = lastReadTxid;
|
this.lastReadTxid = lastReadTxid;
|
||||||
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -98,8 +94,7 @@ public class DFSInotifyEventInputStream {
|
||||||
* The next available batch of events will be returned.
|
* The next available batch of events will be returned.
|
||||||
*/
|
*/
|
||||||
public EventBatch poll() throws IOException, MissingEventsException {
|
public EventBatch poll() throws IOException, MissingEventsException {
|
||||||
TraceScope scope =
|
TraceScope scope = tracer.newScope("inotifyPoll");
|
||||||
Trace.startSpan("inotifyPoll", traceSampler);
|
|
||||||
try {
|
try {
|
||||||
// need to keep retrying until the NN sends us the latest committed txid
|
// need to keep retrying until the NN sends us the latest committed txid
|
||||||
if (lastReadTxid == -1) {
|
if (lastReadTxid == -1) {
|
||||||
|
@ -180,7 +175,7 @@ public class DFSInotifyEventInputStream {
|
||||||
*/
|
*/
|
||||||
public EventBatch poll(long time, TimeUnit tu) throws IOException,
|
public EventBatch poll(long time, TimeUnit tu) throws IOException,
|
||||||
InterruptedException, MissingEventsException {
|
InterruptedException, MissingEventsException {
|
||||||
TraceScope scope = Trace.startSpan("inotifyPollWithTimeout", traceSampler);
|
TraceScope scope = tracer.newScope("inotifyPollWithTimeout");
|
||||||
EventBatch next = null;
|
EventBatch next = null;
|
||||||
try {
|
try {
|
||||||
long initialTime = Time.monotonicNow();
|
long initialTime = Time.monotonicNow();
|
||||||
|
@ -217,7 +212,7 @@ public class DFSInotifyEventInputStream {
|
||||||
*/
|
*/
|
||||||
public EventBatch take() throws IOException, InterruptedException,
|
public EventBatch take() throws IOException, InterruptedException,
|
||||||
MissingEventsException {
|
MissingEventsException {
|
||||||
TraceScope scope = Trace.startSpan("inotifyTake", traceSampler);
|
TraceScope scope = tracer.newScope("inotifyTake");
|
||||||
EventBatch next = null;
|
EventBatch next = null;
|
||||||
try {
|
try {
|
||||||
int nextWaitMin = INITIAL_WAIT_MS;
|
int nextWaitMin = INITIAL_WAIT_MS;
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.fs.CanUnbuffer;
|
||||||
import org.apache.hadoop.fs.ChecksumException;
|
import org.apache.hadoop.fs.ChecksumException;
|
||||||
import org.apache.hadoop.fs.FSInputStream;
|
import org.apache.hadoop.fs.FSInputStream;
|
||||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
|
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
|
||||||
import org.apache.hadoop.fs.ReadOption;
|
import org.apache.hadoop.fs.ReadOption;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
@ -77,9 +78,9 @@ import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.IdentityHashStore;
|
import org.apache.hadoop.util.IdentityHashStore;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
@ -677,6 +678,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
||||||
setClientCacheContext(dfsClient.getClientContext()).
|
setClientCacheContext(dfsClient.getClientContext()).
|
||||||
setUserGroupInformation(dfsClient.ugi).
|
setUserGroupInformation(dfsClient.ugi).
|
||||||
setConfiguration(dfsClient.getConfiguration()).
|
setConfiguration(dfsClient.getConfiguration()).
|
||||||
|
setTracer(dfsClient.getTracer()).
|
||||||
build();
|
build();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -940,7 +942,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
||||||
public synchronized int read(final byte buf[], int off, int len) throws IOException {
|
public synchronized int read(final byte buf[], int off, int len) throws IOException {
|
||||||
ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf);
|
ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf);
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("DFSInputStream#byteArrayRead", src);
|
dfsClient.newPathTraceScope("DFSInputStream#byteArrayRead", src);
|
||||||
try {
|
try {
|
||||||
return readWithStrategy(byteArrayReader, off, len);
|
return readWithStrategy(byteArrayReader, off, len);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -952,7 +954,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
||||||
public synchronized int read(final ByteBuffer buf) throws IOException {
|
public synchronized int read(final ByteBuffer buf) throws IOException {
|
||||||
ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf);
|
ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf);
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("DFSInputStream#byteBufferRead", src);
|
dfsClient.newPathTraceScope("DFSInputStream#byteBufferRead", src);
|
||||||
try {
|
try {
|
||||||
return readWithStrategy(byteBufferReader, 0, buf.remaining());
|
return readWithStrategy(byteBufferReader, 0, buf.remaining());
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1128,14 +1130,14 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
||||||
final ByteBuffer bb,
|
final ByteBuffer bb,
|
||||||
final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
|
final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
|
||||||
final int hedgedReadId) {
|
final int hedgedReadId) {
|
||||||
final Span parentSpan = Trace.currentSpan();
|
final SpanId parentSpanId = Tracer.getCurrentSpanId();
|
||||||
return new Callable<ByteBuffer>() {
|
return new Callable<ByteBuffer>() {
|
||||||
@Override
|
@Override
|
||||||
public ByteBuffer call() throws Exception {
|
public ByteBuffer call() throws Exception {
|
||||||
byte[] buf = bb.array();
|
byte[] buf = bb.array();
|
||||||
int offset = bb.position();
|
int offset = bb.position();
|
||||||
TraceScope scope =
|
TraceScope scope = dfsClient.getTracer().
|
||||||
Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
|
newScope("hedgedRead" + hedgedReadId, parentSpanId);
|
||||||
try {
|
try {
|
||||||
actualGetFromOneDataNode(datanode, block, start, end, buf,
|
actualGetFromOneDataNode(datanode, block, start, end, buf,
|
||||||
offset, corruptedBlockMap);
|
offset, corruptedBlockMap);
|
||||||
|
@ -1421,8 +1423,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
||||||
@Override
|
@Override
|
||||||
public int read(long position, byte[] buffer, int offset, int length)
|
public int read(long position, byte[] buffer, int offset, int length)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope = dfsClient.
|
||||||
dfsClient.getPathTraceScope("DFSInputStream#byteArrayPread", src);
|
newPathTraceScope("DFSInputStream#byteArrayPread", src);
|
||||||
try {
|
try {
|
||||||
return pread(position, buffer, offset, length);
|
return pread(position, buffer, offset, length);
|
||||||
} finally {
|
} finally {
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.FSOutputSummer;
|
import org.apache.hadoop.fs.FSOutputSummer;
|
||||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Syncable;
|
import org.apache.hadoop.fs.Syncable;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
@ -63,9 +64,7 @@ import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.DataChecksum.Type;
|
import org.apache.hadoop.util.DataChecksum.Type;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -231,7 +230,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
short replication, long blockSize, Progressable progress, int buffersize,
|
short replication, long blockSize, Progressable progress, int buffersize,
|
||||||
DataChecksum checksum, String[] favoredNodes) throws IOException {
|
DataChecksum checksum, String[] favoredNodes) throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("newStreamForCreate", src);
|
dfsClient.newPathTraceScope("newStreamForCreate", src);
|
||||||
try {
|
try {
|
||||||
HdfsFileStatus stat = null;
|
HdfsFileStatus stat = null;
|
||||||
|
|
||||||
|
@ -360,7 +359,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
|
LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
|
||||||
String[] favoredNodes) throws IOException {
|
String[] favoredNodes) throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("newStreamForAppend", src);
|
dfsClient.newPathTraceScope("newStreamForAppend", src);
|
||||||
if(stat.getErasureCodingPolicy() != null) {
|
if(stat.getErasureCodingPolicy() != null) {
|
||||||
throw new IOException("Not support appending to a striping layout file yet.");
|
throw new IOException("Not support appending to a striping layout file yet.");
|
||||||
}
|
}
|
||||||
|
@ -388,7 +387,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
}
|
}
|
||||||
|
|
||||||
protected TraceScope createWriteTraceScope() {
|
protected TraceScope createWriteTraceScope() {
|
||||||
return dfsClient.getPathTraceScope("DFSOutputStream#write", src);
|
return dfsClient.newPathTraceScope("DFSOutputStream#write", src);
|
||||||
}
|
}
|
||||||
|
|
||||||
// @see FSOutputSummer#writeChunk()
|
// @see FSOutputSummer#writeChunk()
|
||||||
|
@ -502,7 +501,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
@Override
|
@Override
|
||||||
public void hflush() throws IOException {
|
public void hflush() throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("hflush", src);
|
dfsClient.newPathTraceScope("hflush", src);
|
||||||
try {
|
try {
|
||||||
flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
|
flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -513,7 +512,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
@Override
|
@Override
|
||||||
public void hsync() throws IOException {
|
public void hsync() throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("hsync", src);
|
dfsClient.newPathTraceScope("hsync", src);
|
||||||
try {
|
try {
|
||||||
flushOrSync(true, EnumSet.noneOf(SyncFlag.class));
|
flushOrSync(true, EnumSet.noneOf(SyncFlag.class));
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -536,7 +535,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
*/
|
*/
|
||||||
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
|
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("hsync", src);
|
dfsClient.newPathTraceScope("hsync", src);
|
||||||
try {
|
try {
|
||||||
flushOrSync(true, syncFlags);
|
flushOrSync(true, syncFlags);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -777,7 +776,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
@Override
|
@Override
|
||||||
public synchronized void close() throws IOException {
|
public synchronized void close() throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope =
|
||||||
dfsClient.getPathTraceScope("DFSOutputStream#close", src);
|
dfsClient.newPathTraceScope("DFSOutputStream#close", src);
|
||||||
try {
|
try {
|
||||||
closeImpl();
|
closeImpl();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -806,7 +805,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
// get last block before destroying the streamer
|
// get last block before destroying the streamer
|
||||||
ExtendedBlock lastBlock = getStreamer().getBlock();
|
ExtendedBlock lastBlock = getStreamer().getBlock();
|
||||||
closeThreads(false);
|
closeThreads(false);
|
||||||
TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER);
|
TraceScope scope = dfsClient.getTracer().newScope("completeFile");
|
||||||
try {
|
try {
|
||||||
completeFile(lastBlock);
|
completeFile(lastBlock);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -914,6 +913,13 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
return fileId;
|
return fileId;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return the source of stream.
|
||||||
|
*/
|
||||||
|
String getSrc() {
|
||||||
|
return src;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the data streamer object.
|
* Returns the data streamer object.
|
||||||
*/
|
*/
|
|
@ -28,7 +28,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
|
||||||
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
|
import org.apache.htrace.core.SpanId;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
|
||||||
/****************************************************************
|
/****************************************************************
|
||||||
* DFSPacket is used by DataStreamer and DFSOutputStream.
|
* DFSPacket is used by DataStreamer and DFSOutputStream.
|
||||||
|
@ -39,7 +41,7 @@ import org.apache.htrace.Span;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DFSPacket {
|
public class DFSPacket {
|
||||||
public static final long HEART_BEAT_SEQNO = -1L;
|
public static final long HEART_BEAT_SEQNO = -1L;
|
||||||
private static long[] EMPTY = new long[0];
|
private static SpanId[] EMPTY = new SpanId[0];
|
||||||
private final long seqno; // sequence number of buffer in block
|
private final long seqno; // sequence number of buffer in block
|
||||||
private final long offsetInBlock; // offset in block
|
private final long offsetInBlock; // offset in block
|
||||||
private boolean syncBlock; // this packet forces the current block to disk
|
private boolean syncBlock; // this packet forces the current block to disk
|
||||||
|
@ -66,9 +68,9 @@ public class DFSPacket {
|
||||||
private int checksumPos;
|
private int checksumPos;
|
||||||
private final int dataStart;
|
private final int dataStart;
|
||||||
private int dataPos;
|
private int dataPos;
|
||||||
private long[] traceParents = EMPTY;
|
private SpanId[] traceParents = EMPTY;
|
||||||
private int traceParentsUsed;
|
private int traceParentsUsed;
|
||||||
private Span span;
|
private TraceScope scope;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new packet.
|
* Create a new packet.
|
||||||
|
@ -307,7 +309,10 @@ public class DFSPacket {
|
||||||
addTraceParent(span.getSpanId());
|
addTraceParent(span.getSpanId());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void addTraceParent(long id) {
|
public void addTraceParent(SpanId id) {
|
||||||
|
if (!id.isValid()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (traceParentsUsed == traceParents.length) {
|
if (traceParentsUsed == traceParents.length) {
|
||||||
int newLength = (traceParents.length == 0) ? 8 :
|
int newLength = (traceParents.length == 0) ? 8 :
|
||||||
traceParents.length * 2;
|
traceParents.length * 2;
|
||||||
|
@ -324,18 +329,18 @@ public class DFSPacket {
|
||||||
*
|
*
|
||||||
* Protected by the DFSOutputStream dataQueue lock.
|
* Protected by the DFSOutputStream dataQueue lock.
|
||||||
*/
|
*/
|
||||||
public long[] getTraceParents() {
|
public SpanId[] getTraceParents() {
|
||||||
// Remove duplicates from the array.
|
// Remove duplicates from the array.
|
||||||
int len = traceParentsUsed;
|
int len = traceParentsUsed;
|
||||||
Arrays.sort(traceParents, 0, len);
|
Arrays.sort(traceParents, 0, len);
|
||||||
int i = 0, j = 0;
|
int i = 0, j = 0;
|
||||||
long prevVal = 0; // 0 is not a valid span id
|
SpanId prevVal = SpanId.INVALID;
|
||||||
while (true) {
|
while (true) {
|
||||||
if (i == len) {
|
if (i == len) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
long val = traceParents[i];
|
SpanId val = traceParents[i];
|
||||||
if (val != prevVal) {
|
if (!val.equals(prevVal)) {
|
||||||
traceParents[j] = val;
|
traceParents[j] = val;
|
||||||
j++;
|
j++;
|
||||||
prevVal = val;
|
prevVal = val;
|
||||||
|
@ -349,11 +354,11 @@ public class DFSPacket {
|
||||||
return traceParents;
|
return traceParents;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setTraceSpan(Span span) {
|
public void setTraceScope(TraceScope scope) {
|
||||||
this.span = span;
|
this.scope = scope;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Span getTraceSpan() {
|
public TraceScope getTraceScope() {
|
||||||
return span;
|
return scope;
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
|
|
||||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||||
import org.apache.hadoop.io.ByteBufferPool;
|
import org.apache.hadoop.io.ByteBufferPool;
|
||||||
|
|
||||||
|
@ -260,7 +259,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
|
|
||||||
private void closeReader(BlockReaderInfo readerInfo) {
|
private void closeReader(BlockReaderInfo readerInfo) {
|
||||||
if (readerInfo != null) {
|
if (readerInfo != null) {
|
||||||
IOUtils.cleanup(DFSClient.LOG, readerInfo.reader);
|
// IOUtils.cleanup(null, readerInfo.reader);
|
||||||
readerInfo.skip();
|
readerInfo.skip();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -483,7 +482,7 @@ public class DFSStripedInputStream extends DFSInputStream {
|
||||||
@Override
|
@Override
|
||||||
protected LocatedBlock refreshLocatedBlock(LocatedBlock block)
|
protected LocatedBlock refreshLocatedBlock(LocatedBlock block)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
int idx = BlockIdManager.getBlockIndex(block.getBlock().getLocalBlock());
|
int idx = StripedBlockUtil.getBlockIndex(block.getBlock().getLocalBlock());
|
||||||
LocatedBlock lb = getBlockGroupAt(block.getStartOffset());
|
LocatedBlock lb = getBlockGroupAt(block.getStartOffset());
|
||||||
// If indexing information is returned, iterate through the index array
|
// If indexing information is returned, iterate through the index array
|
||||||
// to find the entry for position idx in the group
|
// to find the entry for position idx in the group
|
|
@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
@ -53,11 +54,9 @@ import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.Sampler;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -87,7 +86,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
try {
|
try {
|
||||||
return queues.get(i).take();
|
return queues.get(i).take();
|
||||||
} catch(InterruptedException ie) {
|
} catch(InterruptedException ie) {
|
||||||
throw DFSUtil.toInterruptedIOException("take interrupted, i=" + i, ie);
|
throw DFSUtilClient.toInterruptedIOException("take interrupted, i=" + i, ie);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +94,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
try {
|
try {
|
||||||
return queues.get(i).poll(100, TimeUnit.MILLISECONDS);
|
return queues.get(i).poll(100, TimeUnit.MILLISECONDS);
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw DFSUtil.toInterruptedIOException("take interrupted, i=" + i, e);
|
throw DFSUtilClient.toInterruptedIOException("take interrupted, i=" + i, e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -187,7 +186,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
CellBuffers(int numParityBlocks) throws InterruptedException{
|
CellBuffers(int numParityBlocks) throws InterruptedException{
|
||||||
if (cellSize % bytesPerChecksum != 0) {
|
if (cellSize % bytesPerChecksum != 0) {
|
||||||
throw new HadoopIllegalArgumentException("Invalid values: "
|
throw new HadoopIllegalArgumentException("Invalid values: "
|
||||||
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (="
|
+ HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (="
|
||||||
+ bytesPerChecksum + ") must divide cell size (=" + cellSize + ").");
|
+ bytesPerChecksum + ") must divide cell size (=" + cellSize + ").");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +279,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
try {
|
try {
|
||||||
cellBuffers = new CellBuffers(numParityBlocks);
|
cellBuffers = new CellBuffers(numParityBlocks);
|
||||||
} catch (InterruptedException ie) {
|
} catch (InterruptedException ie) {
|
||||||
throw DFSUtil.toInterruptedIOException(
|
throw DFSUtilClient.toInterruptedIOException(
|
||||||
"Failed to create cell buffers", ie);
|
"Failed to create cell buffers", ie);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -621,7 +620,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
coordinator.wait(waitInterval);
|
coordinator.wait(waitInterval);
|
||||||
remaingTime -= Time.monotonicNow() - start;
|
remaingTime -= Time.monotonicNow() - start;
|
||||||
} catch (InterruptedException e) {
|
} catch (InterruptedException e) {
|
||||||
throw DFSUtil.toInterruptedIOException("Interrupted when waiting" +
|
throw DFSUtilClient.toInterruptedIOException("Interrupted when waiting" +
|
||||||
" for results of updating striped streamers", e);
|
" for results of updating striped streamers", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -893,7 +892,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
}
|
}
|
||||||
|
|
||||||
closeThreads(false);
|
closeThreads(false);
|
||||||
TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER);
|
TraceScope scope = dfsClient.getTracer().newScope("completeFile");
|
||||||
try {
|
try {
|
||||||
completeFile(currentBlockGroup);
|
completeFile(currentBlockGroup);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -942,7 +941,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
|
||||||
try {
|
try {
|
||||||
Thread.sleep(ms);
|
Thread.sleep(ms);
|
||||||
} catch(InterruptedException ie) {
|
} catch(InterruptedException ie) {
|
||||||
throw DFSUtil.toInterruptedIOException(
|
throw DFSUtilClient.toInterruptedIOException(
|
||||||
"Sleep interrupted during " + op, ie);
|
"Sleep interrupted during " + op, ie);
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.crypto.key.KeyProvider;
|
import org.apache.hadoop.crypto.key.KeyProvider;
|
||||||
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
import org.apache.hadoop.crypto.key.KeyProviderFactory;
|
||||||
import org.apache.hadoop.fs.BlockLocation;
|
import org.apache.hadoop.fs.BlockLocation;
|
||||||
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
@ -53,6 +54,7 @@ import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InterruptedIOException;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
@ -590,6 +592,29 @@ public class DFSUtilClient {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static int getIoFileBufferSize(Configuration conf) {
|
||||||
|
return conf.getInt(
|
||||||
|
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
|
||||||
|
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int getSmallBufferSize(Configuration conf) {
|
||||||
|
return Math.min(getIoFileBufferSize(conf) / 2, 512);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Probe for HDFS Encryption being enabled; this uses the value of
|
||||||
|
* the option {@link HdfsClientConfigKeys#DFS_ENCRYPTION_KEY_PROVIDER_URI},
|
||||||
|
* returning true if that property contains a non-empty, non-whitespace
|
||||||
|
* string.
|
||||||
|
* @param conf configuration to probe
|
||||||
|
* @return true if encryption is considered enabled.
|
||||||
|
*/
|
||||||
|
public static boolean isHDFSEncryptionEnabled(Configuration conf) {
|
||||||
|
return !conf.getTrimmed(
|
||||||
|
HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "").isEmpty();
|
||||||
|
}
|
||||||
|
|
||||||
public static InetSocketAddress getNNAddress(String address) {
|
public static InetSocketAddress getNNAddress(String address) {
|
||||||
return NetUtils.createSocketAddr(address,
|
return NetUtils.createSocketAddr(address,
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
|
||||||
|
@ -628,4 +653,11 @@ public class DFSUtilClient {
|
||||||
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
|
||||||
+ namenode.getHostName() + portString);
|
+ namenode.getHostName() + portString);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static InterruptedIOException toInterruptedIOException(String message,
|
||||||
|
InterruptedException e) {
|
||||||
|
final InterruptedIOException iioe = new InterruptedIOException(message);
|
||||||
|
iioe.initCause(e);
|
||||||
|
return iioe;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,9 +39,8 @@ import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
|
@ -73,12 +72,11 @@ import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.Progressable;
|
import org.apache.hadoop.util.Progressable;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.htrace.NullScope;
|
import org.apache.htrace.core.Sampler;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceInfo;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.cache.CacheBuilder;
|
import com.google.common.cache.CacheBuilder;
|
||||||
import com.google.common.cache.CacheLoader;
|
import com.google.common.cache.CacheLoader;
|
||||||
|
@ -86,6 +84,9 @@ import com.google.common.cache.LoadingCache;
|
||||||
import com.google.common.cache.RemovalListener;
|
import com.google.common.cache.RemovalListener;
|
||||||
import com.google.common.cache.RemovalNotification;
|
import com.google.common.cache.RemovalNotification;
|
||||||
|
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/*********************************************************************
|
/*********************************************************************
|
||||||
*
|
*
|
||||||
* The DataStreamer class is responsible for sending data packets to the
|
* The DataStreamer class is responsible for sending data packets to the
|
||||||
|
@ -109,7 +110,7 @@ import com.google.common.cache.RemovalNotification;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
class DataStreamer extends Daemon {
|
class DataStreamer extends Daemon {
|
||||||
static final Log LOG = LogFactory.getLog(DataStreamer.class);
|
static final Logger LOG = LoggerFactory.getLogger(DataStreamer.class);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a socket for a write pipeline
|
* Create a socket for a write pipeline
|
||||||
|
@ -528,7 +529,7 @@ class DataStreamer extends Daemon {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
long lastPacket = Time.monotonicNow();
|
long lastPacket = Time.monotonicNow();
|
||||||
TraceScope scope = NullScope.INSTANCE;
|
TraceScope scope = null;
|
||||||
while (!streamerClosed && dfsClient.clientRunning) {
|
while (!streamerClosed && dfsClient.clientRunning) {
|
||||||
// if the Responder encountered an error, shutdown Responder
|
// if the Responder encountered an error, shutdown Responder
|
||||||
if (errorState.hasError() && response != null) {
|
if (errorState.hasError() && response != null) {
|
||||||
|
@ -579,12 +580,11 @@ class DataStreamer extends Daemon {
|
||||||
LOG.warn("Caught exception", e);
|
LOG.warn("Caught exception", e);
|
||||||
}
|
}
|
||||||
one = dataQueue.getFirst(); // regular data packet
|
one = dataQueue.getFirst(); // regular data packet
|
||||||
long parents[] = one.getTraceParents();
|
SpanId[] parents = one.getTraceParents();
|
||||||
if (parents.length > 0) {
|
if (parents.length > 0) {
|
||||||
scope = Trace.startSpan("dataStreamer", new TraceInfo(0, parents[0]));
|
scope = dfsClient.getTracer().
|
||||||
// TODO: use setParents API once it's available from HTrace 3.2
|
newScope("dataStreamer", parents[0]);
|
||||||
// scope = Trace.startSpan("dataStreamer", Sampler.ALWAYS);
|
scope.getSpan().setParents(parents);
|
||||||
// scope.getSpan().setParents(parents);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -629,12 +629,16 @@ class DataStreamer extends Daemon {
|
||||||
}
|
}
|
||||||
|
|
||||||
// send the packet
|
// send the packet
|
||||||
Span span = null;
|
SpanId spanId = SpanId.INVALID;
|
||||||
synchronized (dataQueue) {
|
synchronized (dataQueue) {
|
||||||
// move packet from dataQueue to ackQueue
|
// move packet from dataQueue to ackQueue
|
||||||
if (!one.isHeartbeatPacket()) {
|
if (!one.isHeartbeatPacket()) {
|
||||||
span = scope.detach();
|
if (scope != null) {
|
||||||
one.setTraceSpan(span);
|
spanId = scope.getSpanId();
|
||||||
|
scope.detach();
|
||||||
|
one.setTraceScope(scope);
|
||||||
|
}
|
||||||
|
scope = null;
|
||||||
dataQueue.removeFirst();
|
dataQueue.removeFirst();
|
||||||
ackQueue.addLast(one);
|
ackQueue.addLast(one);
|
||||||
dataQueue.notifyAll();
|
dataQueue.notifyAll();
|
||||||
|
@ -646,7 +650,8 @@ class DataStreamer extends Daemon {
|
||||||
}
|
}
|
||||||
|
|
||||||
// write out data to remote datanode
|
// write out data to remote datanode
|
||||||
TraceScope writeScope = Trace.startSpan("writeTo", span);
|
TraceScope writeScope = dfsClient.getTracer().
|
||||||
|
newScope("DataStreamer#writeTo", spanId);
|
||||||
try {
|
try {
|
||||||
one.writeTo(blockStream);
|
one.writeTo(blockStream);
|
||||||
blockStream.flush();
|
blockStream.flush();
|
||||||
|
@ -713,7 +718,10 @@ class DataStreamer extends Daemon {
|
||||||
streamerClosed = true;
|
streamerClosed = true;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
if (scope != null) {
|
||||||
scope.close();
|
scope.close();
|
||||||
|
scope = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
closeInternal();
|
closeInternal();
|
||||||
|
@ -747,7 +755,8 @@ class DataStreamer extends Daemon {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
void waitForAckedSeqno(long seqno) throws IOException {
|
void waitForAckedSeqno(long seqno) throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
|
TraceScope scope = dfsClient.getTracer().
|
||||||
|
newScope("waitForAckedSeqno");
|
||||||
try {
|
try {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Waiting for ack for: " + seqno);
|
LOG.debug("Waiting for ack for: " + seqno);
|
||||||
|
@ -797,7 +806,7 @@ class DataStreamer extends Daemon {
|
||||||
while (!streamerClosed && dataQueue.size() + ackQueue.size() >
|
while (!streamerClosed && dataQueue.size() + ackQueue.size() >
|
||||||
dfsClient.getConf().getWriteMaxPackets()) {
|
dfsClient.getConf().getWriteMaxPackets()) {
|
||||||
if (firstWait) {
|
if (firstWait) {
|
||||||
Span span = Trace.currentSpan();
|
Span span = Tracer.getCurrentSpan();
|
||||||
if (span != null) {
|
if (span != null) {
|
||||||
span.addTimelineAnnotation("dataQueue.wait");
|
span.addTimelineAnnotation("dataQueue.wait");
|
||||||
}
|
}
|
||||||
|
@ -818,7 +827,7 @@ class DataStreamer extends Daemon {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
Span span = Trace.currentSpan();
|
Span span = Tracer.getCurrentSpan();
|
||||||
if ((span != null) && (!firstWait)) {
|
if ((span != null) && (!firstWait)) {
|
||||||
span.addTimelineAnnotation("end.wait");
|
span.addTimelineAnnotation("end.wait");
|
||||||
}
|
}
|
||||||
|
@ -953,7 +962,7 @@ class DataStreamer extends Daemon {
|
||||||
setName("ResponseProcessor for block " + block);
|
setName("ResponseProcessor for block " + block);
|
||||||
PipelineAck ack = new PipelineAck();
|
PipelineAck ack = new PipelineAck();
|
||||||
|
|
||||||
TraceScope scope = NullScope.INSTANCE;
|
TraceScope scope = null;
|
||||||
while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
|
while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
|
||||||
// process responses from datanodes.
|
// process responses from datanodes.
|
||||||
try {
|
try {
|
||||||
|
@ -1040,8 +1049,11 @@ class DataStreamer extends Daemon {
|
||||||
block.setNumBytes(one.getLastByteOffsetBlock());
|
block.setNumBytes(one.getLastByteOffsetBlock());
|
||||||
|
|
||||||
synchronized (dataQueue) {
|
synchronized (dataQueue) {
|
||||||
scope = Trace.continueSpan(one.getTraceSpan());
|
scope = one.getTraceScope();
|
||||||
one.setTraceSpan(null);
|
if (scope != null) {
|
||||||
|
scope.reattach();
|
||||||
|
one.setTraceScope(null);
|
||||||
|
}
|
||||||
lastAckedSeqno = seqno;
|
lastAckedSeqno = seqno;
|
||||||
ackQueue.removeFirst();
|
ackQueue.removeFirst();
|
||||||
dataQueue.notifyAll();
|
dataQueue.notifyAll();
|
||||||
|
@ -1062,8 +1074,11 @@ class DataStreamer extends Daemon {
|
||||||
responderClosed = true;
|
responderClosed = true;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
if (scope != null) {
|
||||||
scope.close();
|
scope.close();
|
||||||
}
|
}
|
||||||
|
scope = null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1133,11 +1148,12 @@ class DataStreamer extends Daemon {
|
||||||
// a client waiting on close() will be aware that the flush finished.
|
// a client waiting on close() will be aware that the flush finished.
|
||||||
synchronized (dataQueue) {
|
synchronized (dataQueue) {
|
||||||
DFSPacket endOfBlockPacket = dataQueue.remove(); // remove the end of block packet
|
DFSPacket endOfBlockPacket = dataQueue.remove(); // remove the end of block packet
|
||||||
Span span = endOfBlockPacket.getTraceSpan();
|
|
||||||
if (span != null) {
|
|
||||||
// Close any trace span associated with this Packet
|
// Close any trace span associated with this Packet
|
||||||
TraceScope scope = Trace.continueSpan(span);
|
TraceScope scope = endOfBlockPacket.getTraceScope();
|
||||||
|
if (scope != null) {
|
||||||
|
scope.reattach();
|
||||||
scope.close();
|
scope.close();
|
||||||
|
endOfBlockPacket.setTraceScope(null);
|
||||||
}
|
}
|
||||||
assert endOfBlockPacket.isLastPacketInBlock();
|
assert endOfBlockPacket.isLastPacketInBlock();
|
||||||
assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
|
assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
|
||||||
|
@ -1217,22 +1233,46 @@ class DataStreamer extends Daemon {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
//get a new datanode
|
int tried = 0;
|
||||||
final DatanodeInfo[] original = nodes;
|
final DatanodeInfo[] original = nodes;
|
||||||
final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode(
|
final StorageType[] originalTypes = storageTypes;
|
||||||
|
final String[] originalIDs = storageIDs;
|
||||||
|
IOException caughtException = null;
|
||||||
|
ArrayList<DatanodeInfo> exclude = new ArrayList<DatanodeInfo>(failed);
|
||||||
|
while (tried < 3) {
|
||||||
|
LocatedBlock lb;
|
||||||
|
//get a new datanode
|
||||||
|
lb = dfsClient.namenode.getAdditionalDatanode(
|
||||||
src, stat.getFileId(), block, nodes, storageIDs,
|
src, stat.getFileId(), block, nodes, storageIDs,
|
||||||
failed.toArray(new DatanodeInfo[failed.size()]),
|
exclude.toArray(new DatanodeInfo[exclude.size()]),
|
||||||
1, dfsClient.clientName);
|
1, dfsClient.clientName);
|
||||||
|
// a new node was allocated by the namenode. Update nodes.
|
||||||
setPipeline(lb);
|
setPipeline(lb);
|
||||||
|
|
||||||
//find the new datanode
|
//find the new datanode
|
||||||
final int d = findNewDatanode(original);
|
final int d = findNewDatanode(original);
|
||||||
|
//transfer replica. pick a source from the original nodes
|
||||||
//transfer replica
|
final DatanodeInfo src = original[tried % original.length];
|
||||||
final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
|
|
||||||
final DatanodeInfo[] targets = {nodes[d]};
|
final DatanodeInfo[] targets = {nodes[d]};
|
||||||
final StorageType[] targetStorageTypes = {storageTypes[d]};
|
final StorageType[] targetStorageTypes = {storageTypes[d]};
|
||||||
|
|
||||||
|
try {
|
||||||
transfer(src, targets, targetStorageTypes, lb.getBlockToken());
|
transfer(src, targets, targetStorageTypes, lb.getBlockToken());
|
||||||
|
} catch (IOException ioe) {
|
||||||
|
DFSClient.LOG.warn("Error transferring data from " + src + " to " +
|
||||||
|
nodes[d] + ": " + ioe.getMessage());
|
||||||
|
caughtException = ioe;
|
||||||
|
// add the allocated node to the exclude list.
|
||||||
|
exclude.add(nodes[d]);
|
||||||
|
setPipeline(original, originalTypes, originalIDs);
|
||||||
|
tried++;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return; // finished successfully
|
||||||
|
}
|
||||||
|
// All retries failed
|
||||||
|
throw (caughtException != null) ? caughtException :
|
||||||
|
new IOException("Failed to add a node");
|
||||||
}
|
}
|
||||||
|
|
||||||
private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
|
private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
|
||||||
|
@ -1245,7 +1285,11 @@ class DataStreamer extends Daemon {
|
||||||
try {
|
try {
|
||||||
sock = createSocketForPipeline(src, 2, dfsClient);
|
sock = createSocketForPipeline(src, 2, dfsClient);
|
||||||
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
|
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
|
||||||
final long readTimeout = dfsClient.getDatanodeReadTimeout(2);
|
|
||||||
|
// transfer timeout multiplier based on the transfer size
|
||||||
|
// One per 200 packets = 12.8MB. Minimum is 2.
|
||||||
|
int multi = 2 + (int)(bytesSent/dfsClient.getConf().getWritePacketSize())/200;
|
||||||
|
final long readTimeout = dfsClient.getDatanodeReadTimeout(multi);
|
||||||
|
|
||||||
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
||||||
InputStream unbufIn = NetUtils.getInputStream(sock, readTimeout);
|
InputStream unbufIn = NetUtils.getInputStream(sock, readTimeout);
|
||||||
|
@ -1254,7 +1298,7 @@ class DataStreamer extends Daemon {
|
||||||
unbufOut = saslStreams.out;
|
unbufOut = saslStreams.out;
|
||||||
unbufIn = saslStreams.in;
|
unbufIn = saslStreams.in;
|
||||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||||
DFSUtil.getSmallBufferSize(dfsClient.getConfiguration())));
|
DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
|
||||||
in = new DataInputStream(unbufIn);
|
in = new DataInputStream(unbufIn);
|
||||||
|
|
||||||
//send the TRANSFER_BLOCK request
|
//send the TRANSFER_BLOCK request
|
||||||
|
@ -1528,7 +1572,7 @@ class DataStreamer extends Daemon {
|
||||||
unbufOut = saslStreams.out;
|
unbufOut = saslStreams.out;
|
||||||
unbufIn = saslStreams.in;
|
unbufIn = saslStreams.in;
|
||||||
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
|
||||||
DFSUtil.getSmallBufferSize(dfsClient.getConfiguration())));
|
DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
|
||||||
blockReplyStream = new DataInputStream(unbufIn);
|
blockReplyStream = new DataInputStream(unbufIn);
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -1738,7 +1782,7 @@ class DataStreamer extends Daemon {
|
||||||
void queuePacket(DFSPacket packet) {
|
void queuePacket(DFSPacket packet) {
|
||||||
synchronized (dataQueue) {
|
synchronized (dataQueue) {
|
||||||
if (packet == null) return;
|
if (packet == null) return;
|
||||||
packet.addTraceParent(Trace.currentSpan());
|
packet.addTraceParent(Tracer.getCurrentSpanId());
|
||||||
dataQueue.addLast(packet);
|
dataQueue.addLast(packet);
|
||||||
lastQueuedSeqno = packet.getSeqno();
|
lastQueuedSeqno = packet.getSeqno();
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
|
@ -62,7 +62,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
|
||||||
|
@ -110,13 +109,13 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
private Path workingDir;
|
private Path workingDir;
|
||||||
private URI uri;
|
private URI uri;
|
||||||
private String homeDirPrefix =
|
private String homeDirPrefix =
|
||||||
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
|
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
|
||||||
|
|
||||||
DFSClient dfs;
|
DFSClient dfs;
|
||||||
private boolean verifyChecksum = true;
|
private boolean verifyChecksum = true;
|
||||||
|
|
||||||
static{
|
static{
|
||||||
HdfsConfiguration.init();
|
HdfsConfigurationLoader.init();
|
||||||
}
|
}
|
||||||
|
|
||||||
public DistributedFileSystem() {
|
public DistributedFileSystem() {
|
||||||
|
@ -146,8 +145,8 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
throw new IOException("Incomplete HDFS URI, no host: "+ uri);
|
throw new IOException("Incomplete HDFS URI, no host: "+ uri);
|
||||||
}
|
}
|
||||||
homeDirPrefix = conf.get(
|
homeDirPrefix = conf.get(
|
||||||
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
|
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
|
||||||
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
|
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
|
||||||
|
|
||||||
this.dfs = new DFSClient(uri, conf, statistics);
|
this.dfs = new DFSClient(uri, conf, statistics);
|
||||||
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
|
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
|
||||||
|
@ -172,7 +171,7 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
@Override
|
@Override
|
||||||
public void setWorkingDirectory(Path dir) {
|
public void setWorkingDirectory(Path dir) {
|
||||||
String result = fixRelativePart(dir).toUri().getPath();
|
String result = fixRelativePart(dir).toUri().getPath();
|
||||||
if (!DFSUtil.isValidName(result)) {
|
if (!DFSUtilClient.isValidName(result)) {
|
||||||
throw new IllegalArgumentException("Invalid DFS directory name " +
|
throw new IllegalArgumentException("Invalid DFS directory name " +
|
||||||
result);
|
result);
|
||||||
}
|
}
|
||||||
|
@ -196,7 +195,7 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
private String getPathName(Path file) {
|
private String getPathName(Path file) {
|
||||||
checkPath(file);
|
checkPath(file);
|
||||||
String result = file.toUri().getPath();
|
String result = file.toUri().getPath();
|
||||||
if (!DFSUtil.isValidName(result)) {
|
if (!DFSUtilClient.isValidName(result)) {
|
||||||
throw new IllegalArgumentException("Pathname " + result + " from " +
|
throw new IllegalArgumentException("Pathname " + result + " from " +
|
||||||
file+" is not a valid DFS filename.");
|
file+" is not a valid DFS filename.");
|
||||||
}
|
}
|
||||||
|
@ -219,8 +218,7 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
final Path absF = fixRelativePart(p);
|
final Path absF = fixRelativePart(p);
|
||||||
return new FileSystemLinkResolver<BlockLocation[]>() {
|
return new FileSystemLinkResolver<BlockLocation[]>() {
|
||||||
@Override
|
@Override
|
||||||
public BlockLocation[] doCall(final Path p)
|
public BlockLocation[] doCall(final Path p) throws IOException {
|
||||||
throws IOException, UnresolvedLinkException {
|
|
||||||
return dfs.getBlockLocations(getPathName(p), start, len);
|
return dfs.getBlockLocations(getPathName(p), start, len);
|
||||||
}
|
}
|
||||||
@Override
|
@Override
|
||||||
|
@ -449,7 +447,6 @@ public class DistributedFileSystem extends FileSystem {
|
||||||
* Same as create(), except fails if parent directory doesn't already exist.
|
* Same as create(), except fails if parent directory doesn't already exist.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
public FSDataOutputStream createNonRecursive(final Path f,
|
public FSDataOutputStream createNonRecursive(final Path f,
|
||||||
final FsPermission permission, final EnumSet<CreateFlag> flag,
|
final FsPermission permission, final EnumSet<CreateFlag> flag,
|
||||||
final int bufferSize, final short replication, final long blockSize,
|
final int bufferSize, final short replication, final long blockSize,
|
|
@ -46,6 +46,9 @@ public final class ExternalBlockReader implements BlockReader {
|
||||||
@Override
|
@Override
|
||||||
public int read(byte[] buf, int off, int len) throws IOException {
|
public int read(byte[] buf, int off, int len) throws IOException {
|
||||||
int nread = accessor.read(pos, buf, off, len);
|
int nread = accessor.read(pos, buf, off, len);
|
||||||
|
if (nread < 0) {
|
||||||
|
return nread;
|
||||||
|
}
|
||||||
pos += nread;
|
pos += nread;
|
||||||
return nread;
|
return nread;
|
||||||
}
|
}
|
||||||
|
@ -53,6 +56,9 @@ public final class ExternalBlockReader implements BlockReader {
|
||||||
@Override
|
@Override
|
||||||
public int read(ByteBuffer buf) throws IOException {
|
public int read(ByteBuffer buf) throws IOException {
|
||||||
int nread = accessor.read(pos, buf);
|
int nread = accessor.read(pos, buf);
|
||||||
|
if (nread < 0) {
|
||||||
|
return nread;
|
||||||
|
}
|
||||||
pos += nread;
|
pos += nread;
|
||||||
return nread;
|
return nread;
|
||||||
}
|
}
|
||||||
|
@ -63,7 +69,8 @@ public final class ExternalBlockReader implements BlockReader {
|
||||||
if (n <= 0) {
|
if (n <= 0) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
// You can't skip past the end of the replica.
|
// You can't skip past the last offset that we want to read with this
|
||||||
|
// block reader.
|
||||||
long oldPos = pos;
|
long oldPos = pos;
|
||||||
pos += n;
|
pos += n;
|
||||||
if (pos > visibleLength) {
|
if (pos > visibleLength) {
|
||||||
|
@ -74,12 +81,11 @@ public final class ExternalBlockReader implements BlockReader {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int available() throws IOException {
|
public int available() throws IOException {
|
||||||
// We return the amount of bytes that we haven't read yet from the
|
// We return the amount of bytes between the current offset and the visible
|
||||||
// replica, based on our current position. Some of the other block
|
// length. Some of the other block readers return a shorter length than
|
||||||
// readers return a shorter length than that. The only advantage to
|
// that. The only advantage to returning a shorter length is that the
|
||||||
// returning a shorter length is that the DFSInputStream will
|
// DFSInputStream will trash your block reader and create a new one if
|
||||||
// trash your block reader and create a new one if someone tries to
|
// someone tries to seek() beyond the available() region.
|
||||||
// seek() beyond the available() region.
|
|
||||||
long diff = visibleLength - pos;
|
long diff = visibleLength - pos;
|
||||||
if (diff > Integer.MAX_VALUE) {
|
if (diff > Integer.MAX_VALUE) {
|
||||||
return Integer.MAX_VALUE;
|
return Integer.MAX_VALUE;
|
||||||
|
|
|
@ -0,0 +1,44 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load default HDFS configuration resources.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
class HdfsConfigurationLoader {
|
||||||
|
|
||||||
|
static {
|
||||||
|
// adds the default resources
|
||||||
|
Configuration.addDefaultResource("hdfs-default.xml");
|
||||||
|
Configuration.addDefaultResource("hdfs-site.xml");
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method is here so that when invoked, default resources are added if
|
||||||
|
* they haven't already been previously loaded. Upon loading this class, the
|
||||||
|
* static initializer block above will be executed to add the default
|
||||||
|
* resources. It is safe for this method to be called multiple times
|
||||||
|
* as the static initializer block will only get invoked once.
|
||||||
|
*/
|
||||||
|
public static void init() {
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,366 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.lang.reflect.Constructor;
|
||||||
|
import java.lang.reflect.InvocationHandler;
|
||||||
|
import java.lang.reflect.Proxy;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.URI;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
|
||||||
|
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
|
||||||
|
import org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider;
|
||||||
|
import org.apache.hadoop.io.Text;
|
||||||
|
import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
|
||||||
|
import org.apache.hadoop.io.retry.FailoverProxyProvider;
|
||||||
|
import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
|
||||||
|
import org.apache.hadoop.io.retry.RetryPolicies;
|
||||||
|
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||||
|
import org.apache.hadoop.io.retry.RetryProxy;
|
||||||
|
import org.apache.hadoop.io.retry.RetryUtils;
|
||||||
|
import org.apache.hadoop.ipc.ProtobufRpcEngine;
|
||||||
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create proxy objects with {@link ClientProtocol} to communicate with a remote
|
||||||
|
* NN. Generally use {@link NameNodeProxiesClient#createProxyWithClientProtocol(
|
||||||
|
* Configuration, URI, AtomicBoolean)}, which will create either an HA- or
|
||||||
|
* non-HA-enabled client proxy as appropriate.
|
||||||
|
*
|
||||||
|
* For creating proxy objects with other protocols, please see
|
||||||
|
* {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
|
||||||
|
*/
|
||||||
|
@InterfaceAudience.Private
|
||||||
|
public class NameNodeProxiesClient {
|
||||||
|
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(
|
||||||
|
NameNodeProxiesClient.class);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper for a client proxy as well as its associated service ID.
|
||||||
|
* This is simply used as a tuple-like return type for created NN proxy.
|
||||||
|
*/
|
||||||
|
public static class ProxyAndInfo<PROXYTYPE> {
|
||||||
|
private final PROXYTYPE proxy;
|
||||||
|
private final Text dtService;
|
||||||
|
private final InetSocketAddress address;
|
||||||
|
|
||||||
|
public ProxyAndInfo(PROXYTYPE proxy, Text dtService,
|
||||||
|
InetSocketAddress address) {
|
||||||
|
this.proxy = proxy;
|
||||||
|
this.dtService = dtService;
|
||||||
|
this.address = address;
|
||||||
|
}
|
||||||
|
|
||||||
|
public PROXYTYPE getProxy() {
|
||||||
|
return proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Text getDelegationTokenService() {
|
||||||
|
return dtService;
|
||||||
|
}
|
||||||
|
|
||||||
|
public InetSocketAddress getAddress() {
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates the namenode proxy with the ClientProtocol. This will handle
|
||||||
|
* creation of either HA- or non-HA-enabled proxy objects, depending upon
|
||||||
|
* if the provided URI is a configured logical URI.
|
||||||
|
*
|
||||||
|
* @param conf the configuration containing the required IPC
|
||||||
|
* properties, client failover configurations, etc.
|
||||||
|
* @param nameNodeUri the URI pointing either to a specific NameNode
|
||||||
|
* or to a logical nameservice.
|
||||||
|
* @param fallbackToSimpleAuth set to true or false during calls to indicate
|
||||||
|
* if a secure client falls back to simple auth
|
||||||
|
* @return an object containing both the proxy and the associated
|
||||||
|
* delegation token service it corresponds to
|
||||||
|
* @throws IOException if there is an error creating the proxy
|
||||||
|
* @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
|
||||||
|
*/
|
||||||
|
public static ProxyAndInfo<ClientProtocol> createProxyWithClientProtocol(
|
||||||
|
Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth)
|
||||||
|
throws IOException {
|
||||||
|
AbstractNNFailoverProxyProvider<ClientProtocol> failoverProxyProvider =
|
||||||
|
createFailoverProxyProvider(conf, nameNodeUri, ClientProtocol.class,
|
||||||
|
true, fallbackToSimpleAuth);
|
||||||
|
|
||||||
|
if (failoverProxyProvider == null) {
|
||||||
|
InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
|
||||||
|
Text dtService = SecurityUtil.buildTokenService(nnAddr);
|
||||||
|
ClientProtocol proxy = createNonHAProxyWithClientProtocol(nnAddr, conf,
|
||||||
|
UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
|
||||||
|
return new ProxyAndInfo<>(proxy, dtService, nnAddr);
|
||||||
|
} else {
|
||||||
|
return createHAProxy(conf, nameNodeUri, ClientProtocol.class,
|
||||||
|
failoverProxyProvider);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Generate a dummy namenode proxy instance that utilizes our hacked
|
||||||
|
* {@link LossyRetryInvocationHandler}. Proxy instance generated using this
|
||||||
|
* method will proactively drop RPC responses. Currently this method only
|
||||||
|
* support HA setup. null will be returned if the given configuration is not
|
||||||
|
* for HA.
|
||||||
|
*
|
||||||
|
* @param config the configuration containing the required IPC
|
||||||
|
* properties, client failover configurations, etc.
|
||||||
|
* @param nameNodeUri the URI pointing either to a specific NameNode
|
||||||
|
* or to a logical nameservice.
|
||||||
|
* @param xface the IPC interface which should be created
|
||||||
|
* @param numResponseToDrop The number of responses to drop for each RPC call
|
||||||
|
* @param fallbackToSimpleAuth set to true or false during calls to indicate
|
||||||
|
* if a secure client falls back to simple auth
|
||||||
|
* @return an object containing both the proxy and the associated
|
||||||
|
* delegation token service it corresponds to. Will return null of the
|
||||||
|
* given configuration does not support HA.
|
||||||
|
* @throws IOException if there is an error creating the proxy
|
||||||
|
*/
|
||||||
|
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
|
||||||
|
Configuration config, URI nameNodeUri, Class<T> xface,
|
||||||
|
int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
|
||||||
|
throws IOException {
|
||||||
|
Preconditions.checkArgument(numResponseToDrop > 0);
|
||||||
|
AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
|
||||||
|
createFailoverProxyProvider(config, nameNodeUri, xface, true,
|
||||||
|
fallbackToSimpleAuth);
|
||||||
|
|
||||||
|
if (failoverProxyProvider != null) { // HA case
|
||||||
|
int delay = config.getInt(
|
||||||
|
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
|
||||||
|
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
|
||||||
|
int maxCap = config.getInt(
|
||||||
|
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
|
||||||
|
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
|
||||||
|
int maxFailoverAttempts = config.getInt(
|
||||||
|
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
|
||||||
|
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
|
||||||
|
int maxRetryAttempts = config.getInt(
|
||||||
|
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
|
||||||
|
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
|
||||||
|
InvocationHandler dummyHandler = new LossyRetryInvocationHandler<>(
|
||||||
|
numResponseToDrop, failoverProxyProvider,
|
||||||
|
RetryPolicies.failoverOnNetworkException(
|
||||||
|
RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
|
||||||
|
Math.max(numResponseToDrop + 1, maxRetryAttempts), delay,
|
||||||
|
maxCap));
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
T proxy = (T) Proxy.newProxyInstance(
|
||||||
|
failoverProxyProvider.getInterface().getClassLoader(),
|
||||||
|
new Class[]{xface}, dummyHandler);
|
||||||
|
Text dtService;
|
||||||
|
if (failoverProxyProvider.useLogicalURI()) {
|
||||||
|
dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
|
||||||
|
HdfsConstants.HDFS_URI_SCHEME);
|
||||||
|
} else {
|
||||||
|
dtService = SecurityUtil.buildTokenService(
|
||||||
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
|
}
|
||||||
|
return new ProxyAndInfo<>(proxy, dtService,
|
||||||
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
|
} else {
|
||||||
|
LOG.warn("Currently creating proxy using " +
|
||||||
|
"LossyRetryInvocationHandler requires NN HA setup");
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Creates the Failover proxy provider instance*/
|
||||||
|
@VisibleForTesting
|
||||||
|
public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider(
|
||||||
|
Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort,
|
||||||
|
AtomicBoolean fallbackToSimpleAuth) throws IOException {
|
||||||
|
Class<FailoverProxyProvider<T>> failoverProxyProviderClass = null;
|
||||||
|
AbstractNNFailoverProxyProvider<T> providerNN;
|
||||||
|
try {
|
||||||
|
// Obtain the class of the proxy provider
|
||||||
|
failoverProxyProviderClass = getFailoverProxyProviderClass(conf,
|
||||||
|
nameNodeUri);
|
||||||
|
if (failoverProxyProviderClass == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
// Create a proxy provider instance.
|
||||||
|
Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
|
||||||
|
.getConstructor(Configuration.class, URI.class, Class.class);
|
||||||
|
FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri,
|
||||||
|
xface);
|
||||||
|
|
||||||
|
// If the proxy provider is of an old implementation, wrap it.
|
||||||
|
if (!(provider instanceof AbstractNNFailoverProxyProvider)) {
|
||||||
|
providerNN = new WrappedFailoverProxyProvider<>(provider);
|
||||||
|
} else {
|
||||||
|
providerNN = (AbstractNNFailoverProxyProvider<T>)provider;
|
||||||
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
final String message = "Couldn't create proxy provider " +
|
||||||
|
failoverProxyProviderClass;
|
||||||
|
LOG.debug(message, e);
|
||||||
|
if (e.getCause() instanceof IOException) {
|
||||||
|
throw (IOException) e.getCause();
|
||||||
|
} else {
|
||||||
|
throw new IOException(message, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the port in the URI, if it is logical.
|
||||||
|
if (checkPort && providerNN.useLogicalURI()) {
|
||||||
|
int port = nameNodeUri.getPort();
|
||||||
|
if (port > 0 &&
|
||||||
|
port != HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
|
||||||
|
// Throwing here without any cleanup is fine since we have not
|
||||||
|
// actually created the underlying proxies yet.
|
||||||
|
throw new IOException("Port " + port + " specified in URI "
|
||||||
|
+ nameNodeUri + " but host '" + nameNodeUri.getHost()
|
||||||
|
+ "' is a logical (HA) namenode"
|
||||||
|
+ " and does not use port information.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
providerNN.setFallbackToSimpleAuth(fallbackToSimpleAuth);
|
||||||
|
return providerNN;
|
||||||
|
}
|
||||||
|
|
||||||
|
/** Gets the configured Failover proxy provider's class */
|
||||||
|
@VisibleForTesting
|
||||||
|
public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
|
||||||
|
Configuration conf, URI nameNodeUri) throws IOException {
|
||||||
|
if (nameNodeUri == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
String host = nameNodeUri.getHost();
|
||||||
|
String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
|
||||||
|
+ "." + host;
|
||||||
|
try {
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>)
|
||||||
|
conf.getClass(configKey, null, FailoverProxyProvider.class);
|
||||||
|
return ret;
|
||||||
|
} catch (RuntimeException e) {
|
||||||
|
if (e.getCause() instanceof ClassNotFoundException) {
|
||||||
|
throw new IOException("Could not load failover proxy provider class "
|
||||||
|
+ conf.get(configKey) + " which is configured for authority "
|
||||||
|
+ nameNodeUri, e);
|
||||||
|
} else {
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates an explicitly HA-enabled proxy object.
|
||||||
|
*
|
||||||
|
* @param conf the configuration object
|
||||||
|
* @param nameNodeUri the URI pointing either to a specific NameNode or to a
|
||||||
|
* logical nameservice.
|
||||||
|
* @param xface the IPC interface which should be created
|
||||||
|
* @param failoverProxyProvider Failover proxy provider
|
||||||
|
* @return an object containing both the proxy and the associated
|
||||||
|
* delegation token service it corresponds to
|
||||||
|
* @throws IOException
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
public static <T> ProxyAndInfo<T> createHAProxy(
|
||||||
|
Configuration conf, URI nameNodeUri, Class<T> xface,
|
||||||
|
AbstractNNFailoverProxyProvider<T> failoverProxyProvider)
|
||||||
|
throws IOException {
|
||||||
|
Preconditions.checkNotNull(failoverProxyProvider);
|
||||||
|
// HA case
|
||||||
|
DfsClientConf config = new DfsClientConf(conf);
|
||||||
|
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
|
||||||
|
RetryPolicies.failoverOnNetworkException(
|
||||||
|
RetryPolicies.TRY_ONCE_THEN_FAIL, config.getMaxFailoverAttempts(),
|
||||||
|
config.getMaxRetryAttempts(), config.getFailoverSleepBaseMillis(),
|
||||||
|
config.getFailoverSleepMaxMillis()));
|
||||||
|
|
||||||
|
Text dtService;
|
||||||
|
if (failoverProxyProvider.useLogicalURI()) {
|
||||||
|
dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
|
||||||
|
HdfsConstants.HDFS_URI_SCHEME);
|
||||||
|
} else {
|
||||||
|
dtService = SecurityUtil.buildTokenService(
|
||||||
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
|
}
|
||||||
|
return new ProxyAndInfo<>(proxy, dtService,
|
||||||
|
DFSUtilClient.getNNAddress(nameNodeUri));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static ClientProtocol createNonHAProxyWithClientProtocol(
|
||||||
|
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
|
||||||
|
boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
|
||||||
|
throws IOException {
|
||||||
|
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
|
||||||
|
ProtobufRpcEngine.class);
|
||||||
|
|
||||||
|
final RetryPolicy defaultPolicy =
|
||||||
|
RetryUtils.getDefaultRetryPolicy(
|
||||||
|
conf,
|
||||||
|
HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY,
|
||||||
|
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
|
||||||
|
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
|
||||||
|
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
|
||||||
|
SafeModeException.class.getName());
|
||||||
|
|
||||||
|
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
|
||||||
|
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
|
||||||
|
ClientNamenodeProtocolPB.class, version, address, ugi, conf,
|
||||||
|
NetUtils.getDefaultSocketFactory(conf),
|
||||||
|
org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
|
||||||
|
fallbackToSimpleAuth).getProxy();
|
||||||
|
|
||||||
|
if (withRetries) { // create the proxy with retries
|
||||||
|
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
|
||||||
|
ClientProtocol translatorProxy =
|
||||||
|
new ClientNamenodeProtocolTranslatorPB(proxy);
|
||||||
|
return (ClientProtocol) RetryProxy.create(
|
||||||
|
ClientProtocol.class,
|
||||||
|
new DefaultFailoverProxyProvider<>(ClientProtocol.class,
|
||||||
|
translatorProxy),
|
||||||
|
methodNameToPolicyMap,
|
||||||
|
defaultPolicy);
|
||||||
|
} else {
|
||||||
|
return new ClientNamenodeProtocolTranslatorPB(proxy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -47,9 +47,8 @@ import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -107,6 +106,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||||
|
|
||||||
private final PeerCache peerCache;
|
private final PeerCache peerCache;
|
||||||
|
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
/* FSInputChecker interface */
|
/* FSInputChecker interface */
|
||||||
|
|
||||||
/* same interface as inputStream java.io.InputStream#read()
|
/* same interface as inputStream java.io.InputStream#read()
|
||||||
|
@ -210,9 +211,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||||
protected synchronized int readChunk(long pos, byte[] buf, int offset,
|
protected synchronized int readChunk(long pos, byte[] buf, int offset,
|
||||||
int len, byte[] checksumBuf)
|
int len, byte[] checksumBuf)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope =
|
TraceScope scope = tracer.
|
||||||
Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
|
newScope("RemoteBlockReader#readChunk(" + blockId + ")");
|
||||||
Sampler.NEVER);
|
|
||||||
try {
|
try {
|
||||||
return readChunkImpl(pos, buf, offset, len, checksumBuf);
|
return readChunkImpl(pos, buf, offset, len, checksumBuf);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -346,7 +346,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||||
private RemoteBlockReader(String file, String bpid, long blockId,
|
private RemoteBlockReader(String file, String bpid, long blockId,
|
||||||
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
|
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
|
||||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||||
DatanodeID datanodeID, PeerCache peerCache) {
|
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
|
||||||
// Path is used only for printing block and file information in debug
|
// Path is used only for printing block and file information in debug
|
||||||
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
|
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
|
||||||
":" + bpid + ":of:"+ file)/*too non path-like?*/,
|
":" + bpid + ":of:"+ file)/*too non path-like?*/,
|
||||||
|
@ -378,6 +378,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||||
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
||||||
checksumSize = this.checksum.getChecksumSize();
|
checksumSize = this.checksum.getChecksumSize();
|
||||||
this.peerCache = peerCache;
|
this.peerCache = peerCache;
|
||||||
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -402,7 +403,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||||
String clientName, Peer peer,
|
String clientName, Peer peer,
|
||||||
DatanodeID datanodeID,
|
DatanodeID datanodeID,
|
||||||
PeerCache peerCache,
|
PeerCache peerCache,
|
||||||
CachingStrategy cachingStrategy)
|
CachingStrategy cachingStrategy,
|
||||||
|
Tracer tracer)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
// in and out will be closed when sock is closed (by the caller)
|
// in and out will be closed when sock is closed (by the caller)
|
||||||
final DataOutputStream out =
|
final DataOutputStream out =
|
||||||
|
@ -438,7 +440,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
||||||
|
|
||||||
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
|
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
|
||||||
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
|
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
|
||||||
peer, datanodeID, peerCache);
|
peer, datanodeID, peerCache, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -48,12 +48,11 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -126,6 +125,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
|
|
||||||
private boolean sentStatusCode = false;
|
private boolean sentStatusCode = false;
|
||||||
|
|
||||||
|
private final Tracer tracer;
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public Peer getPeer() {
|
public Peer getPeer() {
|
||||||
return peer;
|
return peer;
|
||||||
|
@ -144,8 +145,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
||||||
TraceScope scope = Trace.startSpan(
|
TraceScope scope = tracer.newScope(
|
||||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
|
"RemoteBlockReader2#readNextPacket(" + blockId + ")");
|
||||||
try {
|
try {
|
||||||
readNextPacket();
|
readNextPacket();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -172,8 +173,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
@Override
|
@Override
|
||||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||||
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
||||||
TraceScope scope = Trace.startSpan(
|
TraceScope scope = tracer.newScope(
|
||||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
|
"RemoteBlockReader2#readNextPacket(" + blockId + ")");
|
||||||
try {
|
try {
|
||||||
readNextPacket();
|
readNextPacket();
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -292,7 +293,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
protected RemoteBlockReader2(String file, String bpid, long blockId,
|
protected RemoteBlockReader2(String file, String bpid, long blockId,
|
||||||
DataChecksum checksum, boolean verifyChecksum,
|
DataChecksum checksum, boolean verifyChecksum,
|
||||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||||
DatanodeID datanodeID, PeerCache peerCache) {
|
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
|
||||||
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
|
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
|
||||||
createSocketAddr(datanodeID.getXferAddr()));
|
createSocketAddr(datanodeID.getXferAddr()));
|
||||||
// Path is used only for printing block and file information in debug
|
// Path is used only for printing block and file information in debug
|
||||||
|
@ -313,6 +314,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
|
this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
|
||||||
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
||||||
checksumSize = this.checksum.getChecksumSize();
|
checksumSize = this.checksum.getChecksumSize();
|
||||||
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -407,7 +409,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
String clientName,
|
String clientName,
|
||||||
Peer peer, DatanodeID datanodeID,
|
Peer peer, DatanodeID datanodeID,
|
||||||
PeerCache peerCache,
|
PeerCache peerCache,
|
||||||
CachingStrategy cachingStrategy) throws IOException {
|
CachingStrategy cachingStrategy,
|
||||||
|
Tracer tracer) throws IOException {
|
||||||
// in and out will be closed when sock is closed (by the caller)
|
// in and out will be closed when sock is closed (by the caller)
|
||||||
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||||
peer.getOutputStream()));
|
peer.getOutputStream()));
|
||||||
|
@ -440,7 +443,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
||||||
|
|
||||||
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
|
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
|
||||||
checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
|
checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
|
||||||
datanodeID, peerCache);
|
datanodeID, peerCache, tracer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void checkSuccess(
|
static void checkSuccess(
|
||||||
|
|
|
@ -40,8 +40,9 @@ public abstract class ReplicaAccessor {
|
||||||
*
|
*
|
||||||
* @return The number of bytes read. If the read extends past the end
|
* @return The number of bytes read. If the read extends past the end
|
||||||
* of the replica, a short read count will be returned. We
|
* of the replica, a short read count will be returned. We
|
||||||
* will never return a negative number. We will never
|
* will should return -1 if EOF is reached and no bytes
|
||||||
* return a short read count unless EOF is reached.
|
* can be returned. We will never return a short read
|
||||||
|
* count unless EOF is reached.
|
||||||
*/
|
*/
|
||||||
public abstract int read(long pos, byte[] buf, int off, int len)
|
public abstract int read(long pos, byte[] buf, int off, int len)
|
||||||
throws IOException;
|
throws IOException;
|
||||||
|
@ -58,8 +59,9 @@ public abstract class ReplicaAccessor {
|
||||||
*
|
*
|
||||||
* @return The number of bytes read. If the read extends past the end
|
* @return The number of bytes read. If the read extends past the end
|
||||||
* of the replica, a short read count will be returned. We
|
* of the replica, a short read count will be returned. We
|
||||||
* will never return a negative number. We will never return
|
* should return -1 if EOF is reached and no bytes can be
|
||||||
* a short read count unless EOF is reached.
|
* returned. We will never return a short read count unless
|
||||||
|
* EOF is reached.
|
||||||
*/
|
*/
|
||||||
public abstract int read(long pos, ByteBuffer buf) throws IOException;
|
public abstract int read(long pos, ByteBuffer buf) throws IOException;
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,9 @@ public abstract class ReplicaAccessorBuilder {
|
||||||
public abstract ReplicaAccessorBuilder
|
public abstract ReplicaAccessorBuilder
|
||||||
setBlock(long blockId, String blockPoolId);
|
setBlock(long blockId, String blockPoolId);
|
||||||
|
|
||||||
|
/** Set the genstamp of the block which is being opened. */
|
||||||
|
public abstract ReplicaAccessorBuilder setGenerationStamp(long genstamp);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set whether checksums must be verified. Checksums should be skipped if
|
* Set whether checksums must be verified. Checksums should be skipped if
|
||||||
* the user has disabled checksum verification in the configuration. Users
|
* the user has disabled checksum verification in the configuration. Users
|
||||||
|
|
|
@ -143,6 +143,17 @@ public interface HdfsClientConfigKeys {
|
||||||
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
|
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
|
||||||
PREFIX + "replica.accessor.builder.classes";
|
PREFIX + "replica.accessor.builder.classes";
|
||||||
|
|
||||||
|
// The number of NN response dropped by client proactively in each RPC call.
|
||||||
|
// For testing NN retry cache, we can set this property with positive value.
|
||||||
|
String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY =
|
||||||
|
"dfs.client.test.drop.namenode.response.number";
|
||||||
|
int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
|
||||||
|
String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
|
||||||
|
// HDFS client HTrace configuration.
|
||||||
|
String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
|
||||||
|
String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
|
||||||
|
String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
|
||||||
|
|
||||||
/** dfs.client.retry configuration properties */
|
/** dfs.client.retry configuration properties */
|
||||||
interface Retry {
|
interface Retry {
|
||||||
String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";
|
String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";
|
||||||
|
|
|
@ -27,18 +27,18 @@ import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p>
|
* <p>
|
||||||
|
@ -73,7 +73,7 @@ import com.google.common.annotations.VisibleForTesting;
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class LeaseRenewer {
|
public class LeaseRenewer {
|
||||||
static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
|
static final Logger LOG = LoggerFactory.getLogger(LeaseRenewer.class);
|
||||||
|
|
||||||
static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
|
static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
|
||||||
static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
|
static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
|
||||||
|
@ -165,7 +165,7 @@ public class LeaseRenewer {
|
||||||
/** The time in milliseconds that the map became empty. */
|
/** The time in milliseconds that the map became empty. */
|
||||||
private long emptyTime = Long.MAX_VALUE;
|
private long emptyTime = Long.MAX_VALUE;
|
||||||
/** A fixed lease renewal time period in milliseconds */
|
/** A fixed lease renewal time period in milliseconds */
|
||||||
private long renewal = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD/2;
|
private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD / 2;
|
||||||
|
|
||||||
/** A daemon for renewing lease */
|
/** A daemon for renewing lease */
|
||||||
private Daemon daemon = null;
|
private Daemon daemon = null;
|
||||||
|
@ -378,7 +378,7 @@ public class LeaseRenewer {
|
||||||
|
|
||||||
//update renewal time
|
//update renewal time
|
||||||
if (renewal == dfsc.getConf().getHdfsTimeout()/2) {
|
if (renewal == dfsc.getConf().getHdfsTimeout()/2) {
|
||||||
long min = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD;
|
long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
|
||||||
for(DFSClient c : dfsclients) {
|
for(DFSClient c : dfsclients) {
|
||||||
final int timeout = c.getConf().getHdfsTimeout();
|
final int timeout = c.getConf().getHdfsTimeout();
|
||||||
if (timeout > 0 && timeout < min) {
|
if (timeout > 0 && timeout < min) {
|
|
@ -25,11 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
||||||
import org.apache.hadoop.fs.InvalidRequestException;
|
import org.apache.hadoop.fs.InvalidRequestException;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.htrace.Sampler;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CacheDirectiveIterator is a remote iterator that iterates cache directives.
|
* CacheDirectiveIterator is a remote iterator that iterates cache directives.
|
||||||
|
@ -42,14 +41,14 @@ public class CacheDirectiveIterator
|
||||||
|
|
||||||
private CacheDirectiveInfo filter;
|
private CacheDirectiveInfo filter;
|
||||||
private final ClientProtocol namenode;
|
private final ClientProtocol namenode;
|
||||||
private final Sampler<?> traceSampler;
|
private final Tracer tracer;
|
||||||
|
|
||||||
public CacheDirectiveIterator(ClientProtocol namenode,
|
public CacheDirectiveIterator(ClientProtocol namenode,
|
||||||
CacheDirectiveInfo filter, Sampler<?> traceSampler) {
|
CacheDirectiveInfo filter, Tracer tracer) {
|
||||||
super(0L);
|
super(0L);
|
||||||
this.namenode = namenode;
|
this.namenode = namenode;
|
||||||
this.filter = filter;
|
this.filter = filter;
|
||||||
this.traceSampler = traceSampler;
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) {
|
private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) {
|
||||||
|
@ -94,7 +93,7 @@ public class CacheDirectiveIterator
|
||||||
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
|
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
BatchedEntries<CacheDirectiveEntry> entries = null;
|
BatchedEntries<CacheDirectiveEntry> entries = null;
|
||||||
TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler);
|
TraceScope scope = tracer.newScope("listCacheDirectives");
|
||||||
try {
|
try {
|
||||||
entries = namenode.listCacheDirectives(prevKey, filter);
|
entries = namenode.listCacheDirectives(prevKey, filter);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
|
@ -23,9 +23,8 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* CachePoolIterator is a remote iterator that iterates cache pools.
|
* CachePoolIterator is a remote iterator that iterates cache pools.
|
||||||
|
@ -37,18 +36,18 @@ public class CachePoolIterator
|
||||||
extends BatchedRemoteIterator<String, CachePoolEntry> {
|
extends BatchedRemoteIterator<String, CachePoolEntry> {
|
||||||
|
|
||||||
private final ClientProtocol namenode;
|
private final ClientProtocol namenode;
|
||||||
private final Sampler traceSampler;
|
private final Tracer tracer;
|
||||||
|
|
||||||
public CachePoolIterator(ClientProtocol namenode, Sampler traceSampler) {
|
public CachePoolIterator(ClientProtocol namenode, Tracer tracer) {
|
||||||
super("");
|
super("");
|
||||||
this.namenode = namenode;
|
this.namenode = namenode;
|
||||||
this.traceSampler = traceSampler;
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
|
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("listCachePools", traceSampler);
|
TraceScope scope = tracer.newScope("listCachePools");
|
||||||
try {
|
try {
|
||||||
return namenode.listCachePools(prevKey);
|
return namenode.listCachePools(prevKey);
|
||||||
} finally {
|
} finally {
|
|
@ -23,9 +23,8 @@ import java.io.IOException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* EncryptionZoneIterator is a remote iterator that iterates over encryption
|
* EncryptionZoneIterator is a remote iterator that iterates over encryption
|
||||||
|
@ -37,19 +36,18 @@ public class EncryptionZoneIterator
|
||||||
extends BatchedRemoteIterator<Long, EncryptionZone> {
|
extends BatchedRemoteIterator<Long, EncryptionZone> {
|
||||||
|
|
||||||
private final ClientProtocol namenode;
|
private final ClientProtocol namenode;
|
||||||
private final Sampler<?> traceSampler;
|
private final Tracer tracer;
|
||||||
|
|
||||||
public EncryptionZoneIterator(ClientProtocol namenode,
|
public EncryptionZoneIterator(ClientProtocol namenode, Tracer tracer) {
|
||||||
Sampler<?> traceSampler) {
|
|
||||||
super(Long.valueOf(0));
|
super(Long.valueOf(0));
|
||||||
this.namenode = namenode;
|
this.namenode = namenode;
|
||||||
this.traceSampler = traceSampler;
|
this.tracer = tracer;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
|
public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("listEncryptionZones", traceSampler);
|
TraceScope scope = tracer.newScope("listEncryptionZones");
|
||||||
try {
|
try {
|
||||||
return namenode.listEncryptionZones(prevId);
|
return namenode.listEncryptionZones(prevId);
|
||||||
} finally {
|
} finally {
|
|
@ -93,6 +93,29 @@ public final class HdfsConstants {
|
||||||
//for write pipeline
|
//for write pipeline
|
||||||
public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000;
|
public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* For a HDFS client to write to a file, a lease is granted; During the lease
|
||||||
|
* period, no other client can write to the file. The writing client can
|
||||||
|
* periodically renew the lease. When the file is closed, the lease is
|
||||||
|
* revoked. The lease duration is bound by this soft limit and a
|
||||||
|
* {@link HdfsConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
|
||||||
|
* soft limit expires, the writer has sole write access to the file. If the
|
||||||
|
* soft limit expires and the client fails to close the file or renew the
|
||||||
|
* lease, another client can preempt the lease.
|
||||||
|
*/
|
||||||
|
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
|
||||||
|
/**
|
||||||
|
* For a HDFS client to write to a file, a lease is granted; During the lease
|
||||||
|
* period, no other client can write to the file. The writing client can
|
||||||
|
* periodically renew the lease. When the file is closed, the lease is
|
||||||
|
* revoked. The lease duration is bound by a
|
||||||
|
* {@link HdfsConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
|
||||||
|
* limit. If after the hard limit expires and the client has failed to renew
|
||||||
|
* the lease, HDFS assumes that the client has quit and will automatically
|
||||||
|
* close the file on behalf of the writer, and recover the lease.
|
||||||
|
*/
|
||||||
|
public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
|
||||||
|
|
||||||
// SafeMode actions
|
// SafeMode actions
|
||||||
public enum SafeModeAction {
|
public enum SafeModeAction {
|
||||||
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET
|
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET
|
||||||
|
|
|
@ -35,10 +35,8 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Static utilities for dealing with the protocol buffers used by the
|
* Static utilities for dealing with the protocol buffers used by the
|
||||||
|
@ -89,39 +87,21 @@ public abstract class DataTransferProtoUtil {
|
||||||
BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder()
|
BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder()
|
||||||
.setBlock(PBHelperClient.convert(blk))
|
.setBlock(PBHelperClient.convert(blk))
|
||||||
.setToken(PBHelperClient.convert(blockToken));
|
.setToken(PBHelperClient.convert(blockToken));
|
||||||
if (Trace.isTracing()) {
|
SpanId spanId = Tracer.getCurrentSpanId();
|
||||||
Span s = Trace.currentSpan();
|
if (spanId.isValid()) {
|
||||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
||||||
.setTraceId(s.getTraceId())
|
.setTraceId(spanId.getHigh())
|
||||||
.setParentId(s.getSpanId()));
|
.setParentId(spanId.getLow()));
|
||||||
}
|
}
|
||||||
return builder.build();
|
return builder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
public static TraceInfo fromProto(DataTransferTraceInfoProto proto) {
|
public static SpanId fromProto(DataTransferTraceInfoProto proto) {
|
||||||
if (proto == null) return null;
|
if ((proto != null) && proto.hasTraceId() &&
|
||||||
if (!proto.hasTraceId()) return null;
|
proto.hasParentId()) {
|
||||||
return new TraceInfo(proto.getTraceId(), proto.getParentId());
|
return new SpanId(proto.getTraceId(), proto.getParentId());
|
||||||
}
|
}
|
||||||
|
return null;
|
||||||
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
|
|
||||||
String description) {
|
|
||||||
return continueTraceSpan(header.getBaseHeader(), description);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static TraceScope continueTraceSpan(BaseHeaderProto header,
|
|
||||||
String description) {
|
|
||||||
return continueTraceSpan(header.getTraceInfo(), description);
|
|
||||||
}
|
|
||||||
|
|
||||||
public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto,
|
|
||||||
String description) {
|
|
||||||
TraceScope scope = null;
|
|
||||||
TraceInfo info = fromProto(proto);
|
|
||||||
if (info != null) {
|
|
||||||
scope = Trace.startSpan(description, info);
|
|
||||||
}
|
|
||||||
return scope;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void checkBlockOpStatus(
|
public static void checkBlockOpStatus(
|
||||||
|
@ -137,6 +117,7 @@ public abstract class DataTransferProtoUtil {
|
||||||
} else {
|
} else {
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"Got error"
|
"Got error"
|
||||||
|
+ ", status=" + response.getStatus().name()
|
||||||
+ ", status message " + response.getMessage()
|
+ ", status message " + response.getMessage()
|
||||||
+ ", " + logInfo
|
+ ", " + logInfo
|
||||||
);
|
);
|
||||||
|
|
|
@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
import com.google.protobuf.Message;
|
import com.google.protobuf.Message;
|
||||||
|
|
||||||
|
@ -200,10 +200,11 @@ public class Sender implements DataTransferProtocol {
|
||||||
ReleaseShortCircuitAccessRequestProto.Builder builder =
|
ReleaseShortCircuitAccessRequestProto.Builder builder =
|
||||||
ReleaseShortCircuitAccessRequestProto.newBuilder().
|
ReleaseShortCircuitAccessRequestProto.newBuilder().
|
||||||
setSlotId(PBHelperClient.convert(slotId));
|
setSlotId(PBHelperClient.convert(slotId));
|
||||||
if (Trace.isTracing()) {
|
SpanId spanId = Tracer.getCurrentSpanId();
|
||||||
Span s = Trace.currentSpan();
|
if (spanId.isValid()) {
|
||||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
|
||||||
.setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
|
setTraceId(spanId.getHigh()).
|
||||||
|
setParentId(spanId.getLow()));
|
||||||
}
|
}
|
||||||
ReleaseShortCircuitAccessRequestProto proto = builder.build();
|
ReleaseShortCircuitAccessRequestProto proto = builder.build();
|
||||||
send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
|
send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
|
||||||
|
@ -214,10 +215,11 @@ public class Sender implements DataTransferProtocol {
|
||||||
ShortCircuitShmRequestProto.Builder builder =
|
ShortCircuitShmRequestProto.Builder builder =
|
||||||
ShortCircuitShmRequestProto.newBuilder().
|
ShortCircuitShmRequestProto.newBuilder().
|
||||||
setClientName(clientName);
|
setClientName(clientName);
|
||||||
if (Trace.isTracing()) {
|
SpanId spanId = Tracer.getCurrentSpanId();
|
||||||
Span s = Trace.currentSpan();
|
if (spanId.isValid()) {
|
||||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
|
||||||
.setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
|
setTraceId(spanId.getHigh()).
|
||||||
|
setParentId(spanId.getLow()));
|
||||||
}
|
}
|
||||||
ShortCircuitShmRequestProto proto = builder.build();
|
ShortCircuitShmRequestProto proto = builder.build();
|
||||||
send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
|
send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
|
||||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.hadoop.io.retry.FailoverProxyProvider;
|
||||||
public abstract class AbstractNNFailoverProxyProvider<T> implements
|
public abstract class AbstractNNFailoverProxyProvider<T> implements
|
||||||
FailoverProxyProvider <T> {
|
FailoverProxyProvider <T> {
|
||||||
|
|
||||||
protected AtomicBoolean fallbackToSimpleAuth;
|
private AtomicBoolean fallbackToSimpleAuth;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inquire whether logical HA URI is used for the implementation. If it is
|
* Inquire whether logical HA URI is used for the implementation. If it is
|
||||||
|
@ -48,4 +48,8 @@ public abstract class AbstractNNFailoverProxyProvider<T> implements
|
||||||
AtomicBoolean fallbackToSimpleAuth) {
|
AtomicBoolean fallbackToSimpleAuth) {
|
||||||
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
|
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public synchronized AtomicBoolean getFallbackToSimpleAuth() {
|
||||||
|
return fallbackToSimpleAuth;
|
||||||
|
}
|
||||||
}
|
}
|
|
@ -17,18 +17,9 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.server.namenode.ha;
|
package org.apache.hadoop.hdfs.server.namenode.ha;
|
||||||
|
|
||||||
import java.io.Closeable;
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
|
||||||
import java.net.URI;
|
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
|
||||||
import org.apache.hadoop.io.retry.FailoverProxyProvider;
|
import org.apache.hadoop.io.retry.FailoverProxyProvider;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A NNFailoverProxyProvider implementation which wrapps old implementations
|
* A NNFailoverProxyProvider implementation which wrapps old implementations
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue