Merge remote-tracking branch 'apache/trunk' into HDFS-7285

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
	hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
	hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

Change-Id: Ic7946c4ea35bed587fe879ce58b959b25ecc0823
This commit is contained in:
Zhe Zhang 2015-09-29 01:39:16 -07:00
commit 8fd5520246
445 changed files with 12424 additions and 4422 deletions

View File

@ -599,6 +599,8 @@ function hadoop_usage
echo "--run-tests Run all relevant tests below the base directory"
echo "--skip-system-plugins Do not load plugins from ${BINDIR}/test-patch.d"
echo "--testlist=<list> Specify which subsystem tests to use (comma delimited)"
echo "--test-parallel=<bool> Run multiple tests in parallel (default false in developer mode, true in Jenkins mode)"
echo "--test-threads=<int> Number of tests to run in parallel (default defined in ${PROJECT_NAME} build)"
echo "Shell binary overrides:"
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
@ -691,6 +693,7 @@ function parse_args
;;
--jenkins)
JENKINS=true
TEST_PARALLEL=${TEST_PARALLEL:-true}
;;
--jira-cmd=*)
JIRACLI=${i#*=}
@ -749,6 +752,12 @@ function parse_args
add_test "${j}"
done
;;
--test-parallel=*)
TEST_PARALLEL=${i#*=}
;;
--test-threads=*)
TEST_THREADS=${i#*=}
;;
--wget-cmd=*)
WGET=${i#*=}
;;
@ -811,6 +820,13 @@ function parse_args
PATCH_DIR=$(cd -P -- "${PATCH_DIR}" >/dev/null && pwd -P)
GITDIFFLINES=${PATCH_DIR}/gitdifflines.txt
if [[ ${TEST_PARALLEL} == "true" ]] ; then
PARALLEL_TESTS_PROFILE=-Pparallel-tests
if [[ -n ${TEST_THREADS:-} ]]; then
TESTS_THREAD_COUNT="-DtestsThreadCount=$TEST_THREADS"
fi
fi
}
## @description Locate the pom.xml file for a given directory
@ -2245,13 +2261,22 @@ function check_unittests
test_logfile=${PATCH_DIR}/testrun_${module_suffix}.txt
echo " Running tests in ${module_suffix}"
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} -D${PROJECT_NAME}PatchProcess
# Temporary hack to run the parallel tests profile only for hadoop-common.
# This code will be removed once hadoop-hdfs is ready for parallel test
# execution.
if [[ ${module} == "hadoop-common-project/hadoop-common" ]] ; then
OPTIONAL_PARALLEL_TESTS_PROFILE=${PARALLEL_TESTS_PROFILE}
else
unset OPTIONAL_PARALLEL_TESTS_PROFILE
fi
# shellcheck disable=2086
echo_and_redirect "${test_logfile}" "${MVN}" "${MAVEN_ARGS[@]}" clean install -fae ${NATIVE_PROFILE} ${REQUIRE_TEST_LIB_HADOOP} ${OPTIONAL_PARALLEL_TESTS_PROFILE} ${TESTS_THREAD_COUNT} -D${PROJECT_NAME}PatchProcess
test_build_result=$?
add_jira_footer "${module_suffix} test log" "@@BASE@@/testrun_${module_suffix}.txt"
# shellcheck disable=2016
module_test_timeouts=$(${AWK} '/^Running / { if (last) { print last } last=$2 } /^Tests run: / { last="" }' "${test_logfile}")
module_test_timeouts=$(${AWK} '/^Running / { array[$NF] = 1 } /^Tests run: .* in / { delete array[$NF] } END { for (x in array) { print x } }' "${test_logfile}")
if [[ -n "${module_test_timeouts}" ]] ; then
test_timeouts="${test_timeouts} ${module_test_timeouts}"
result=1

View File

@ -348,10 +348,6 @@ Trunk (Unreleased)
HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
to RPC Server and Client classes. (Brandon Li via suresh)
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
required context item is not configured
(Brahma Reddy Battula via harsh)
HADOOP-8386. hadoop script doesn't work if 'cd' prints to stdout
(default behavior in some bash setups (esp. Ubuntu))
(Chiristopher Berner and Andy Isaacson via harsh)
@ -791,6 +787,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12428. Fix inconsistency between log-level guards and statements.
(Jagadesh Kiran N and Jackie Chang via ozawa)
HADOOP-12446. Undeprecate createNonRecursive() (Ted Yu via kihwal)
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp
@ -826,6 +824,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-11878. FileContext#fixRelativePart should check for not null for a
more informative exception. (Brahma Reddy Battula via kasha)
HADOOP-11984. Enable parallel JUnit tests in pre-commit.
(Chris Nauroth via vinayakumarb)
BUG FIXES
HADOOP-12374. Updated expunge command description.
@ -1084,6 +1085,19 @@ Release 2.8.0 - UNRELEASED
HADOOP-12386. RetryPolicies.RETRY_FOREVER should be able to specify a
retry interval. (Sunil G via wangda)
HADOOP-8436. NPE In getLocalPathForWrite ( path, conf ) when the
required context item is not configured
(Brahma Reddy Battula via harsh)
HADOOP-12252. LocalDirAllocator should not throw NPE with empty string
configuration. (Zhihai Xu)
HADOOP-11918. Listing an empty s3a root directory throws FileNotFound.
(Lei (Eddy) Xu via cnauroth)
HADOOP-12440. TestRPC#testRPCServerShutdown did not produce the desired
thread states before shutting down. (Xiao Chen via mingma)
OPTIMIZATIONS
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
@ -1144,6 +1158,15 @@ Release 2.8.0 - UNRELEASED
HADOOP-12417. TestWebDelegationToken failing with port in use.
(Mingliang Liu via wheat9)
HADOOP-12438. Reset RawLocalFileSystem.useDeprecatedFileStatus in
TestLocalFileSystem. (Chris Nauroth via wheat9)
HADOOP-12437. Allow SecurityUtil to lookup alternate hostnames.
(Arpit Agarwal)
HADOOP-12442. Display help if the command option to 'hdfs dfs' is not valid
(nijel via vinayakumarb)
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
@ -1924,7 +1947,7 @@ Release 2.6.2 - UNRELEASED
BUG FIXES
Release 2.6.1 - 2015-09-09
Release 2.6.1 - 2015-09-23
INCOMPATIBLE CHANGES

View File

@ -246,7 +246,7 @@
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>org.apache.zookeeper</groupId>
@ -878,12 +878,53 @@
<id>parallel-tests</id>
<build>
<plugins>
<plugin>
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
<id>create-parallel-tests-dirs</id>
<phase>test-compile</phase>
<configuration>
<target>
<script language="javascript"><![CDATA[
var baseDirs = [
"${test.build.data}",
"${test.build.dir}",
"${hadoop.tmp.dir}" ];
for (var i in baseDirs) {
for (var j = 1; j <= ${testsThreadCount}; ++j) {
var mkdir = project.createTask("mkdir");
mkdir.setDir(new java.io.File(baseDirs[i], j));
mkdir.perform();
}
}
]]></script>
</target>
</configuration>
<goals>
<goal>run</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
<forkCount>${testsThreadCount}</forkCount>
<argLine>-Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true</argLine>
<reuseForks>false</reuseForks>
<argLine>${maven-surefire-plugin.argLine} -DminiClusterDedicatedDirs=true</argLine>
<systemPropertyVariables>
<test.build.data>${test.build.data}/${surefire.forkNumber}</test.build.data>
<test.build.dir>${test.build.dir}/${surefire.forkNumber}</test.build.dir>
<hadoop.tmp.dir>${hadoop.tmp.dir}/${surefire.forkNumber}</hadoop.tmp.dir>
<!-- Due to a Maven quirk, setting this to just -->
<!-- surefire.forkNumber won't do the parameter substitution. -->
<!-- Putting a prefix in front of it like "fork-" makes it -->
<!-- work. -->
<test.unique.fork.id>fork-${surefire.forkNumber}</test.unique.fork.id>
</systemPropertyVariables>
</configuration>
</plugin>
</plugins>

View File

@ -310,4 +310,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
// HDFS client HTrace configuration.
public static final String FS_CLIENT_HTRACE_PREFIX = "fs.client.htrace.";
}

View File

@ -294,6 +294,12 @@ public class CommonConfigurationKeysPublic {
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_AUTH_TO_LOCAL =
"hadoop.security.auth_to_local";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_DNS_INTERFACE_KEY =
"hadoop.security.dns.interface";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_SECURITY_DNS_NAMESERVER_KEY =
"hadoop.security.dns.nameserver";
/** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
public static final String HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN =

View File

@ -21,8 +21,8 @@ package org.apache.hadoop.fs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.NullScope;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import java.io.IOException;
import java.io.OutputStream;
@ -43,6 +43,8 @@ abstract public class FSOutputSummer extends OutputStream {
private byte checksum[];
// The number of valid bytes in the buffer.
private int count;
// The HTrace tracer to use
private Tracer tracer;
// We want this value to be a multiple of 3 because the native code checksums
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
@ -201,7 +203,7 @@ abstract public class FSOutputSummer extends OutputStream {
}
protected TraceScope createWriteTraceScope() {
return NullScope.INSTANCE;
return null;
}
/** Generate checksums for the given data chunks and output chunks & checksums
@ -219,7 +221,9 @@ abstract public class FSOutputSummer extends OutputStream {
getChecksumSize());
}
} finally {
scope.close();
if (scope != null) {
scope.close();
}
}
}

View File

@ -61,6 +61,7 @@ import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ShutdownHookManager;
import com.google.common.base.Preconditions;
import org.apache.htrace.core.Tracer;
/**
* The FileContext class provides an interface for users of the Hadoop
@ -222,12 +223,14 @@ public class FileContext {
private final Configuration conf;
private final UserGroupInformation ugi;
final boolean resolveSymlinks;
private final Tracer tracer;
private FileContext(final AbstractFileSystem defFs,
final FsPermission theUmask, final Configuration aConf) {
defaultFS = defFs;
umask = FsPermission.getUMask(aConf);
conf = aConf;
tracer = FsTracer.get(aConf);
try {
ugi = UserGroupInformation.getCurrentUser();
} catch (IOException e) {
@ -2721,4 +2724,8 @@ public class FileContext {
throws IOException {
return defaultFS.getAllStoragePolicies();
}
Tracer getTracer() {
return tracer;
}
}

View File

@ -67,9 +67,8 @@ import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Tracer;
import org.apache.htrace.core.TraceScope;
import com.google.common.annotations.VisibleForTesting;
@ -129,6 +128,13 @@ public abstract class FileSystem extends Configured implements Closeable {
private Set<Path> deleteOnExit = new TreeSet<Path>();
boolean resolveSymlinks;
private Tracer tracer;
protected final Tracer getTracer() {
return tracer;
}
/**
* This method adds a file system for testing so that we can find it later. It
* is only for testing.
@ -1083,9 +1089,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f,
boolean overwrite,
int bufferSize, short replication, long blockSize,
@ -1108,9 +1112,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
@ -1133,9 +1135,7 @@ public abstract class FileSystem extends Configured implements Closeable {
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
@ -2706,14 +2706,13 @@ public abstract class FileSystem extends Configured implements Closeable {
private static FileSystem createFileSystem(URI uri, Configuration conf
) throws IOException {
TraceScope scope = Trace.startSpan("FileSystem#createFileSystem");
Span span = scope.getSpan();
if (span != null) {
span.addKVAnnotation("scheme", uri.getScheme());
}
Tracer tracer = FsTracer.get(conf);
TraceScope scope = tracer.newScope("FileSystem#createFileSystem");
scope.addKVAnnotation("scheme", uri.getScheme());
try {
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
fs.tracer = tracer;
fs.initialize(uri, conf);
return fs;
} finally {

View File

@ -203,7 +203,6 @@ public class FilterFileSystem extends FileSystem {
@Override
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {

View File

@ -32,16 +32,13 @@ import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.shell.Command;
import org.apache.hadoop.fs.shell.CommandFactory;
import org.apache.hadoop.fs.shell.FsCommand;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tools.TableListing;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.Sampler;
import org.apache.htrace.SamplerBuilder;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
/** Provide command line access to a FileSystem. */
@InterfaceAudience.Private
@ -54,13 +51,12 @@ public class FsShell extends Configured implements Tool {
private FileSystem fs;
private Trash trash;
protected CommandFactory commandFactory;
private Sampler traceSampler;
private final String usagePrefix =
"Usage: hadoop fs [generic options]";
private SpanReceiverHost spanReceiverHost;
static final String SEHLL_HTRACE_PREFIX = "dfs.shell.htrace.";
private Tracer tracer;
static final String SHELL_HTRACE_PREFIX = "fs.shell.htrace.";
/**
* Default ctor with no configuration. Be sure to invoke
@ -102,8 +98,9 @@ public class FsShell extends Configured implements Tool {
commandFactory.addObject(new Usage(), "-usage");
registerCommands(commandFactory);
}
this.spanReceiverHost =
SpanReceiverHost.get(getConf(), SEHLL_HTRACE_PREFIX);
this.tracer = new Tracer.Builder("FsShell").
conf(TraceUtils.wrapHadoopConf(SHELL_HTRACE_PREFIX, getConf())).
build();
}
protected void registerCommands(CommandFactory factory) {
@ -289,8 +286,6 @@ public class FsShell extends Configured implements Tool {
public int run(String argv[]) throws Exception {
// initialize FsShell
init();
traceSampler = new SamplerBuilder(TraceUtils.
wrapHadoopConf(SEHLL_HTRACE_PREFIX, getConf())).build();
int exitCode = -1;
if (argv.length < 1) {
printUsage(System.err);
@ -302,7 +297,7 @@ public class FsShell extends Configured implements Tool {
if (instance == null) {
throw new UnknownCommandException();
}
TraceScope scope = Trace.startSpan(instance.getCommandName(), traceSampler);
TraceScope scope = tracer.newScope(instance.getCommandName());
if (scope.getSpan() != null) {
String args = StringUtils.join(" ", argv);
if (args.length() > 2048) {
@ -317,6 +312,7 @@ public class FsShell extends Configured implements Tool {
}
} catch (IllegalArgumentException e) {
displayError(cmd, e.getLocalizedMessage());
printUsage(System.err);
if (instance != null) {
printInstanceUsage(System.err, instance);
}
@ -327,6 +323,7 @@ public class FsShell extends Configured implements Tool {
e.printStackTrace(System.err);
}
}
tracer.close();
return exitCode;
}
@ -353,9 +350,6 @@ public class FsShell extends Configured implements Tool {
fs.close();
fs = null;
}
if (this.spanReceiverHost != null) {
this.spanReceiverHost.closeReceivers();
}
}
/**

View File

@ -0,0 +1,64 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.htrace.core.Tracer;
/**
* Holds the HTrace Tracer used for FileSystem operations.
*
* Ideally, this would be owned by the DFSClient, rather than global. However,
* the FileContext API may create a new DFSClient for each operation in some
* cases. Because of this, we cannot store this Tracer inside DFSClient. See
* HADOOP-6356 for details.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class FsTracer {
private static Tracer instance;
public static synchronized Tracer get(Configuration conf) {
if (instance == null) {
instance = new Tracer.Builder("FSClient").
conf(TraceUtils.wrapHadoopConf(CommonConfigurationKeys.
FS_CLIENT_HTRACE_PREFIX, conf)).
build();
}
return instance;
}
@VisibleForTesting
public static synchronized void clear() {
if (instance == null) {
return;
}
try {
instance.close();
} finally {
instance = null;
}
}
private FsTracer() {
}
}

View File

@ -28,9 +28,8 @@ import org.apache.commons.logging.Log;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
@InterfaceAudience.Private
@InterfaceStability.Unstable
@ -41,12 +40,14 @@ class Globber {
private final FileContext fc;
private final Path pathPattern;
private final PathFilter filter;
private final Tracer tracer;
public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
this.fs = fs;
this.fc = null;
this.pathPattern = pathPattern;
this.filter = filter;
this.tracer = fs.getTracer();
}
public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
@ -54,6 +55,7 @@ class Globber {
this.fc = fc;
this.pathPattern = pathPattern;
this.filter = filter;
this.tracer = fc.getTracer();
}
private FileStatus getFileStatus(Path path) throws IOException {
@ -140,11 +142,8 @@ class Globber {
}
public FileStatus[] glob() throws IOException {
TraceScope scope = Trace.startSpan("Globber#glob");
Span span = scope.getSpan();
if (span != null) {
span.addKVAnnotation("pattern", pathPattern.toUri().getPath());
}
TraceScope scope = tracer.newScope("Globber#glob");
scope.addKVAnnotation("pattern", pathPattern.toUri().getPath());
try {
return doGlob();
} finally {

View File

@ -713,7 +713,6 @@ public class HarFileSystem extends FileSystem {
throw new IOException("Har: create not allowed.");
}
@SuppressWarnings("deprecation")
@Override
public FSDataOutputStream createNonRecursive(Path f, boolean overwrite,
int bufferSize, short replication, long blockSize, Progressable progress)

View File

@ -250,9 +250,9 @@ public class LocalDirAllocator {
private int dirNumLastAccessed;
private Random dirIndexRandomizer = new Random();
private FileSystem localFS;
private DF[] dirDF;
private DF[] dirDF = new DF[0];
private String contextCfgItemName;
private String[] localDirs;
private String[] localDirs = new String[0];
private String savedLocalDirs = "";
public AllocatorPerContext(String contextCfgItemName) {

View File

@ -319,7 +319,6 @@ public class RawLocalFileSystem extends FileSystem {
}
@Override
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {

View File

@ -238,7 +238,13 @@ abstract class CommandWithDestination extends FsCommand {
e.setTargetPath(dstPath.toString());
throw e;
}
if (dstPath.startsWith(srcPath+Path.SEPARATOR)) {
// When a path is normalized, all trailing slashes are removed
// except for the root
if(!srcPath.endsWith(Path.SEPARATOR)) {
srcPath += Path.SEPARATOR;
}
if(dstPath.startsWith(srcPath)) {
PathIOException e = new PathIOException(src.toString(),
"is a subdirectory of itself");
e.setTargetPath(target.toString());

View File

@ -185,7 +185,6 @@ class ChRootedFileSystem extends FilterFileSystem {
}
@Override
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {

View File

@ -92,7 +92,8 @@ import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Trace;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.Tracer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@ -722,8 +723,9 @@ public class Client {
if (LOG.isDebugEnabled()) {
LOG.debug("Connecting to "+server);
}
if (Trace.isTracing()) {
Trace.addTimelineAnnotation("IPC client connecting to " + server);
Span span = Tracer.getCurrentSpan();
if (span != null) {
span.addTimelineAnnotation("IPC client connecting to " + server);
}
short numRetries = 0;
Random rand = null;
@ -796,8 +798,9 @@ public class Client {
// update last activity time
touch();
if (Trace.isTracing()) {
Trace.addTimelineAnnotation("IPC client connected to " + server);
span = Tracer.getCurrentSpan();
if (span != null) {
span.addTimelineAnnotation("IPC client connected to " + server);
}
// start the receiver thread after the socket connection has been set

View File

@ -49,8 +49,8 @@ import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.BlockingService;
@ -206,12 +206,13 @@ public class ProtobufRpcEngine implements RpcEngine {
+ method.getName() + "]");
}
TraceScope traceScope = null;
// if Tracing is on then start a new span for this rpc.
// guard it in the if statement to make sure there isn't
// any extra string manipulation.
if (Trace.isTracing()) {
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
Tracer tracer = Tracer.curThreadTracer();
TraceScope traceScope = null;
if (tracer != null) {
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
}
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
@ -236,9 +237,9 @@ public class ProtobufRpcEngine implements RpcEngine {
remoteId + ": " + method.getName() +
" {" + e + "}");
}
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation(
"Call got exception: " + e.toString());
if (traceScope != null) {
traceScope.addTimelineAnnotation("Call got exception: " +
e.toString());
}
throw new ServiceException(e);
} finally {

View File

@ -117,10 +117,9 @@ import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceInfo;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ByteString;
@ -141,6 +140,7 @@ public abstract class Server {
private List<AuthMethod> enabledAuthMethods;
private RpcSaslProto negotiateResponse;
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
private Tracer tracer;
public void addTerseExceptions(Class<?>... exceptionClass) {
exceptionsHandler.addTerseExceptions(exceptionClass);
@ -581,7 +581,7 @@ public abstract class Server {
private ByteBuffer rpcResponse; // the response for this call
private final RPC.RpcKind rpcKind;
private final byte[] clientId;
private final Span traceSpan; // the tracing span on the server side
private final TraceScope traceScope; // the HTrace scope on the server side
public Call(int id, int retryCount, Writable param,
Connection connection) {
@ -595,7 +595,7 @@ public abstract class Server {
}
public Call(int id, int retryCount, Writable param, Connection connection,
RPC.RpcKind kind, byte[] clientId, Span span) {
RPC.RpcKind kind, byte[] clientId, TraceScope traceScope) {
this.callId = id;
this.retryCount = retryCount;
this.rpcRequest = param;
@ -604,7 +604,7 @@ public abstract class Server {
this.rpcResponse = null;
this.rpcKind = kind;
this.clientId = clientId;
this.traceSpan = span;
this.traceScope = traceScope;
}
@Override
@ -2014,19 +2014,24 @@ public abstract class Server {
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
}
Span traceSpan = null;
TraceScope traceScope = null;
if (header.hasTraceInfo()) {
// If the incoming RPC included tracing info, always continue the trace
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId());
traceSpan = Trace.startSpan(
RpcClientUtil.toTraceName(rpcRequest.toString()),
parentSpan).detach();
if (tracer != null) {
// If the incoming RPC included tracing info, always continue the
// trace
SpanId parentSpanId = new SpanId(
header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId());
traceScope = tracer.newScope(
RpcClientUtil.toTraceName(rpcRequest.toString()),
parentSpanId);
traceScope.detach();
}
}
Call call = new Call(header.getCallId(), header.getRetryCount(),
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
header.getClientId().toByteArray(), traceSpan);
header.getClientId().toByteArray(), traceScope);
if (callQueue.isClientBackoffEnabled()) {
// if RPC queue is full, we will ask the RPC client to back off by
@ -2209,8 +2214,9 @@ public abstract class Server {
Writable value = null;
CurCall.set(call);
if (call.traceSpan != null) {
traceScope = Trace.continueSpan(call.traceSpan);
if (call.traceScope != null) {
call.traceScope.reattach();
traceScope = call.traceScope;
traceScope.getSpan().addTimelineAnnotation("called");
}
@ -2287,21 +2293,18 @@ public abstract class Server {
} catch (InterruptedException e) {
if (running) { // unexpected -- log it
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
if (Trace.isTracing()) {
if (traceScope != null) {
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
StringUtils.stringifyException(e));
}
}
} catch (Exception e) {
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
if (Trace.isTracing()) {
if (traceScope != null) {
traceScope.getSpan().addTimelineAnnotation("Exception: " +
StringUtils.stringifyException(e));
}
} finally {
if (traceScope != null) {
traceScope.close();
}
IOUtils.cleanup(LOG, traceScope);
}
}
@ -2615,6 +2618,10 @@ public abstract class Server {
/** Sets the socket buffer size used for responding to RPCs */
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
public void setTracer(Tracer t) {
this.tracer = t;
}
/** Starts the service. Must be called before any calls will be handled. */
public synchronized void start() {
responder.start();

View File

@ -42,8 +42,8 @@ import org.apache.hadoop.util.Time;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
@ -233,9 +233,14 @@ public class WritableRpcEngine implements RpcEngine {
if (LOG.isDebugEnabled()) {
startTime = Time.now();
}
// if Tracing is on then start a new span for this rpc.
// guard it in the if statement to make sure there isn't
// any extra string manipulation.
Tracer tracer = Tracer.curThreadTracer();
TraceScope traceScope = null;
if (Trace.isTracing()) {
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
if (tracer != null) {
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
}
ObjectWritable value;
try {

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.net;
import com.google.common.net.InetAddresses;
import com.sun.istack.Nullable;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -27,9 +29,11 @@ import java.net.InetAddress;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Vector;
import javax.naming.NamingException;
@ -68,7 +72,7 @@ public class DNS {
* @return The host name associated with the provided IP
* @throws NamingException If a NamingException is encountered
*/
public static String reverseDns(InetAddress hostIp, String ns)
public static String reverseDns(InetAddress hostIp, @Nullable String ns)
throws NamingException {
//
// Builds the reverse IP lookup form
@ -228,28 +232,44 @@ public class DNS {
* (e.g. eth0 or eth0:0)
* @param nameserver
* The DNS host name
* @param tryfallbackResolution
* if true and if reverse DNS resolution fails then attempt to
* resolve the hostname with
* {@link InetAddress#getCanonicalHostName()} which includes
* hosts file resolution.
* @return A string vector of all host names associated with the IPs tied to
* the specified interface
* @throws UnknownHostException if the given interface is invalid
*/
public static String[] getHosts(String strInterface, String nameserver)
throws UnknownHostException {
String[] ips = getIPs(strInterface);
Vector<String> hosts = new Vector<String>();
for (int ctr = 0; ctr < ips.length; ctr++) {
public static String[] getHosts(String strInterface,
@Nullable String nameserver,
boolean tryfallbackResolution)
throws UnknownHostException {
final List<String> hosts = new Vector<String>();
final List<InetAddress> addresses =
getIPsAsInetAddressList(strInterface, true);
for (InetAddress address : addresses) {
try {
hosts.add(reverseDns(InetAddress.getByName(ips[ctr]),
nameserver));
} catch (UnknownHostException ignored) {
hosts.add(reverseDns(address, nameserver));
} catch (NamingException ignored) {
}
}
if (hosts.isEmpty()) {
LOG.warn("Unable to determine hostname for interface " + strInterface);
return new String[] { cachedHostname };
} else {
return hosts.toArray(new String[hosts.size()]);
if (hosts.isEmpty() && tryfallbackResolution) {
for (InetAddress address : addresses) {
final String canonicalHostName = address.getCanonicalHostName();
// Don't use the result if it looks like an IP address.
if (!InetAddresses.isInetAddress(canonicalHostName)) {
hosts.add(canonicalHostName);
}
}
}
if (hosts.isEmpty()) {
LOG.warn("Unable to determine hostname for interface " +
strInterface);
hosts.add(cachedHostname);
}
return hosts.toArray(new String[hosts.size()]);
}
@ -315,7 +335,7 @@ public class DNS {
*/
public static String[] getHosts(String strInterface)
throws UnknownHostException {
return getHosts(strInterface, null);
return getHosts(strInterface, null, false);
}
/**
@ -331,17 +351,19 @@ public class DNS {
* @throws UnknownHostException
* If one is encountered while querying the default interface
*/
public static String getDefaultHost(String strInterface, String nameserver)
public static String getDefaultHost(@Nullable String strInterface,
@Nullable String nameserver,
boolean tryfallbackResolution)
throws UnknownHostException {
if ("default".equals(strInterface)) {
if (strInterface == null || "default".equals(strInterface)) {
return cachedHostname;
}
if ("default".equals(nameserver)) {
return getDefaultHost(strInterface);
if (nameserver != null && "default".equals(nameserver)) {
nameserver = null;
}
String[] hosts = getHosts(strInterface, nameserver);
String[] hosts = getHosts(strInterface, nameserver, tryfallbackResolution);
return hosts[0];
}
@ -357,9 +379,74 @@ public class DNS {
* @throws UnknownHostException
* If one is encountered while querying the default interface
*/
public static String getDefaultHost(String strInterface)
public static String getDefaultHost(@Nullable String strInterface)
throws UnknownHostException {
return getDefaultHost(strInterface, null);
return getDefaultHost(strInterface, null, false);
}
/**
* Returns the default (first) host name associated by the provided
* nameserver with the address bound to the specified network interface.
*
* @param strInterface
* The name of the network interface to query (e.g. eth0)
* @param nameserver
* The DNS host name
* @throws UnknownHostException
* If one is encountered while querying the default interface
*/
public static String getDefaultHost(@Nullable String strInterface,
@Nullable String nameserver)
throws UnknownHostException {
return getDefaultHost(strInterface, nameserver, false);
}
/**
* Returns all the IPs associated with the provided interface, if any, as
* a list of InetAddress objects.
*
* @param strInterface
* The name of the network interface or sub-interface to query
* (eg eth0 or eth0:0) or the string "default"
* @param returnSubinterfaces
* Whether to return IPs associated with subinterfaces of
* the given interface
* @return A list of all the IPs associated with the provided
* interface. The local host IP is returned if the interface
* name "default" is specified or there is an I/O error looking
* for the given interface.
* @throws UnknownHostException
* If the given interface is invalid
*
*/
public static List<InetAddress> getIPsAsInetAddressList(String strInterface,
boolean returnSubinterfaces) throws UnknownHostException {
if ("default".equals(strInterface)) {
return Arrays.asList(InetAddress.getByName(cachedHostAddress));
}
NetworkInterface netIf;
try {
netIf = NetworkInterface.getByName(strInterface);
if (netIf == null) {
netIf = getSubinterface(strInterface);
}
} catch (SocketException e) {
LOG.warn("I/O error finding interface " + strInterface +
": " + e.getMessage());
return Arrays.asList(InetAddress.getByName(cachedHostAddress));
}
if (netIf == null) {
throw new UnknownHostException("No such interface " + strInterface);
}
// NB: Using a LinkedHashSet to preserve the order for callers
// that depend on a particular element being 1st in the array.
// For example, getDefaultIP always returns the first element.
LinkedHashSet<InetAddress> allAddrs = new LinkedHashSet<InetAddress>();
allAddrs.addAll(Collections.list(netIf.getInetAddresses()));
if (!returnSubinterfaces) {
allAddrs.removeAll(getSubinterfaceInetAddrs(netIf));
}
return new Vector<InetAddress>(allAddrs);
}
}

View File

@ -17,6 +17,8 @@
package org.apache.hadoop.security;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_INTERFACE_KEY;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_DNS_NAMESERVER_KEY;
import java.io.IOException;
import java.net.InetAddress;
@ -29,6 +31,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.ServiceLoader;
import javax.annotation.Nullable;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.kerberos.KerberosTicket;
@ -39,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
@ -180,13 +184,38 @@ public class SecurityUtil {
throws IOException {
String fqdn = hostname;
if (fqdn == null || fqdn.isEmpty() || fqdn.equals("0.0.0.0")) {
fqdn = getLocalHostName();
fqdn = getLocalHostName(null);
}
return components[0] + "/" +
StringUtils.toLowerCase(fqdn) + "@" + components[2];
}
static String getLocalHostName() throws UnknownHostException {
/**
* Retrieve the name of the current host. Multihomed hosts may restrict the
* hostname lookup to a specific interface and nameserver with {@link
* org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_INTERFACE_KEY}
* and {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_NAMESERVER_KEY}
*
* @param conf Configuration object. May be null.
* @return
* @throws UnknownHostException
*/
static String getLocalHostName(@Nullable Configuration conf)
throws UnknownHostException {
if (conf != null) {
String dnsInterface = conf.get(HADOOP_SECURITY_DNS_INTERFACE_KEY);
String nameServer = conf.get(HADOOP_SECURITY_DNS_NAMESERVER_KEY);
if (dnsInterface != null) {
return DNS.getDefaultHost(dnsInterface, nameServer, true);
} else if (nameServer != null) {
throw new IllegalArgumentException(HADOOP_SECURITY_DNS_NAMESERVER_KEY +
" requires " + HADOOP_SECURITY_DNS_INTERFACE_KEY + ". Check your" +
"configuration.");
}
}
// Fallback to querying the default hostname as we did before.
return InetAddress.getLocalHost().getCanonicalHostName();
}
@ -207,7 +236,7 @@ public class SecurityUtil {
@InterfaceStability.Evolving
public static void login(final Configuration conf,
final String keytabFileKey, final String userNameKey) throws IOException {
login(conf, keytabFileKey, userNameKey, getLocalHostName());
login(conf, keytabFileKey, userNameKey, getLocalHostName(conf));
}
/**

View File

@ -1,208 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.htrace.SpanReceiver;
import org.apache.htrace.SpanReceiverBuilder;
import org.apache.htrace.Trace;
import org.apache.htrace.impl.LocalFileSpanReceiver;
/**
* This class provides functions for reading the names of SpanReceivers from
* the Hadoop configuration, adding those SpanReceivers to the Tracer,
* and closing those SpanReceivers when appropriate.
* This class does nothing If no SpanReceiver is configured.
*/
@InterfaceAudience.Private
public class SpanReceiverHost implements TraceAdminProtocol {
public static final String SPAN_RECEIVERS_CONF_SUFFIX =
"spanreceiver.classes";
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
private static final HashMap<String, SpanReceiverHost> hosts =
new HashMap<String, SpanReceiverHost>(1);
private final TreeMap<Long, SpanReceiver> receivers =
new TreeMap<Long, SpanReceiver>();
private final String confPrefix;
private Configuration config;
private boolean closed = false;
private long highestId = 1;
private final static String LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX =
"local-file-span-receiver.path";
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
synchronized (SpanReceiverHost.class) {
SpanReceiverHost host = hosts.get(confPrefix);
if (host != null) {
return host;
}
final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
newHost.loadSpanReceivers(conf);
ShutdownHookManager.get().addShutdownHook(new Runnable() {
public void run() {
newHost.closeReceivers();
}
}, 0);
hosts.put(confPrefix, newHost);
return newHost;
}
}
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
private SpanReceiverHost(String confPrefix) {
this.confPrefix = confPrefix;
}
/**
* Reads the names of classes specified in the
* "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
* them with the Tracer as SpanReceiver's.
*
* The nullary constructor is called during construction, but if the classes
* specified implement the Configurable interface, setConfiguration() will be
* called on them. This allows SpanReceivers to use values from the Hadoop
* configuration.
*/
public synchronized void loadSpanReceivers(Configuration conf) {
config = new Configuration(conf);
String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
String[] receiverNames = config.getTrimmedStrings(receiverKey);
if (receiverNames == null || receiverNames.length == 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("No span receiver names found in " + receiverKey + ".");
}
return;
}
// It's convenient to have each daemon log to a random trace file when
// testing.
String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
if (config.get(pathKey) == null) {
String uniqueFile = LocalFileSpanReceiver.getUniqueLocalTraceFileName();
config.set(pathKey, uniqueFile);
if (LOG.isTraceEnabled()) {
LOG.trace("Set " + pathKey + " to " + uniqueFile);
}
}
for (String className : receiverNames) {
try {
SpanReceiver rcvr = loadInstance(className, EMPTY);
Trace.addReceiver(rcvr);
receivers.put(highestId++, rcvr);
LOG.info("Loaded SpanReceiver " + className + " successfully.");
} catch (IOException e) {
LOG.error("Failed to load SpanReceiver", e);
}
}
}
private synchronized SpanReceiver loadInstance(String className,
List<ConfigurationPair> extraConfig) throws IOException {
SpanReceiverBuilder builder =
new SpanReceiverBuilder(TraceUtils.
wrapHadoopConf(confPrefix, config, extraConfig));
SpanReceiver rcvr = builder.spanReceiverClass(className.trim()).build();
if (rcvr == null) {
throw new IOException("Failed to load SpanReceiver " + className);
}
return rcvr;
}
/**
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
*/
public synchronized void closeReceivers() {
if (closed) return;
closed = true;
for (SpanReceiver rcvr : receivers.values()) {
try {
rcvr.close();
} catch (IOException e) {
LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
}
}
receivers.clear();
}
public synchronized SpanReceiverInfo[] listSpanReceivers()
throws IOException {
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.size()];
int i = 0;
for(Map.Entry<Long, SpanReceiver> entry : receivers.entrySet()) {
info[i] = new SpanReceiverInfo(entry.getKey(),
entry.getValue().getClass().getName());
i++;
}
return info;
}
public synchronized long addSpanReceiver(SpanReceiverInfo info)
throws IOException {
StringBuilder configStringBuilder = new StringBuilder();
String prefix = "";
for (ConfigurationPair pair : info.configPairs) {
configStringBuilder.append(prefix).append(pair.getKey()).
append(" = ").append(pair.getValue());
prefix = ", ";
}
SpanReceiver rcvr = null;
try {
rcvr = loadInstance(info.getClassName(), info.configPairs);
} catch (IOException e) {
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString(), e);
throw e;
} catch (RuntimeException e) {
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString(), e);
throw e;
}
Trace.addReceiver(rcvr);
long newId = highestId++;
receivers.put(newId, rcvr);
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString());
return newId;
}
public synchronized void removeSpanReceiver(long spanReceiverId)
throws IOException {
SpanReceiver rcvr = receivers.remove(spanReceiverId);
if (rcvr == null) {
throw new IOException("There is no span receiver with id " + spanReceiverId);
}
Trace.removeReceiver(rcvr);
rcvr.close();
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
" with class " + rcvr.getClass().getName());
}
}

View File

@ -24,7 +24,7 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.htrace.HTraceConfiguration;
import org.apache.htrace.core.HTraceConfiguration;
/**
* This class provides utility functions for tracing.
@ -32,6 +32,7 @@ import org.apache.htrace.HTraceConfiguration;
@InterfaceAudience.Private
public class TraceUtils {
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
static final String DEFAULT_HADOOP_PREFIX = "hadoop.htrace.";
public static HTraceConfiguration wrapHadoopConf(final String prefix,
final Configuration conf) {
@ -47,16 +48,27 @@ public class TraceUtils {
return new HTraceConfiguration() {
@Override
public String get(String key) {
return get(key, "");
String ret = getInternal(prefix + key);
if (ret != null) {
return ret;
}
return getInternal(DEFAULT_HADOOP_PREFIX + key);
}
@Override
public String get(String key, String defaultValue) {
String prefixedKey = prefix + key;
if (extraMap.containsKey(prefixedKey)) {
return extraMap.get(prefixedKey);
String ret = get(key);
if (ret != null) {
return ret;
}
return conf.get(prefixedKey, defaultValue);
return defaultValue;
}
private String getInternal(String key) {
if (extraMap.containsKey(key)) {
return extraMap.get(key);
}
return conf.get(key);
}
};
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.tracing;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.htrace.core.SpanReceiver;
import org.apache.htrace.core.TracerPool;
/**
* This class provides functions for managing the tracer configuration at
* runtime via an RPC protocol.
*/
@InterfaceAudience.Private
public class TracerConfigurationManager implements TraceAdminProtocol {
private static final Log LOG =
LogFactory.getLog(TracerConfigurationManager.class);
private final String confPrefix;
private final Configuration conf;
public TracerConfigurationManager(String confPrefix, Configuration conf) {
this.confPrefix = confPrefix;
this.conf = conf;
}
public synchronized SpanReceiverInfo[] listSpanReceivers()
throws IOException {
TracerPool pool = TracerPool.getGlobalTracerPool();
SpanReceiver[] receivers = pool.getReceivers();
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.length];
for (int i = 0; i < receivers.length; i++) {
SpanReceiver receiver = receivers[i];
info[i] = new SpanReceiverInfo(receiver.getId(),
receiver.getClass().getName());
}
return info;
}
public synchronized long addSpanReceiver(SpanReceiverInfo info)
throws IOException {
StringBuilder configStringBuilder = new StringBuilder();
String prefix = "";
for (ConfigurationPair pair : info.configPairs) {
configStringBuilder.append(prefix).append(pair.getKey()).
append(" = ").append(pair.getValue());
prefix = ", ";
}
SpanReceiver rcvr = null;
try {
rcvr = new SpanReceiver.Builder(TraceUtils.wrapHadoopConf(
confPrefix, conf, info.configPairs)).
className(info.getClassName().trim()).
build();
} catch (RuntimeException e) {
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString(), e);
throw e;
}
TracerPool.getGlobalTracerPool().addReceiver(rcvr);
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
" with configuration " + configStringBuilder.toString());
return rcvr.getId();
}
public synchronized void removeSpanReceiver(long spanReceiverId)
throws IOException {
SpanReceiver[] receivers =
TracerPool.getGlobalTracerPool().getReceivers();
for (SpanReceiver receiver : receivers) {
if (receiver.getId() == spanReceiverId) {
TracerPool.getGlobalTracerPool().removeAndCloseReceiver(receiver);
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
" with class " + receiver.getClass().getName());
return;
}
}
throw new IOException("There is no span receiver with id " + spanReceiverId);
}
}

View File

@ -27,8 +27,8 @@ import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformation
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.Tracer;
import com.google.protobuf.ByteString;
@ -169,11 +169,12 @@ public abstract class ProtoUtil {
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
// Add tracing info if we are currently tracing.
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
Span span = Tracer.getCurrentSpan();
if (span != null) {
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
.setParentId(s.getSpanId())
.setTraceId(s.getTraceId()).build());
.setTraceId(span.getSpanId().getHigh())
.setParentId(span.getSpanId().getLow())
.build());
}
return result.build();

View File

@ -61,8 +61,9 @@ enum RpcKindProto {
* what span caused the new span we will create when this message is received.
*/
message RPCTraceInfoProto {
optional int64 traceId = 1;
optional int64 parentId = 2;
optional int64 traceId = 1; // parentIdHigh
optional int64 parentId = 2; // parentIdLow
}
message RpcRequestHeaderProto { // the header for the RpcRequest

View File

@ -88,6 +88,31 @@
</description>
</property>
<property>
<name>hadoop.security.dns.interface</name>
<description>
The name of the Network Interface from which the service should determine
its host name for Kerberos login. e.g. eth2. In a multi-homed environment,
the setting can be used to affect the _HOST subsitution in the service
Kerberos principal. If this configuration value is not set, the service
will use its default hostname as returned by
InetAddress.getLocalHost().getCanonicalHostName().
Most clusters will not require this setting.
</description>
</property>
<property>
<name>hadoop.security.dns.nameserver</name>
<description>
The host name or IP address of the name server (DNS) which a service Node
should use to determine its own host name for Kerberos Login. Requires
hadoop.security.dns.interface.
Most clusters will not require this setting.
</description>
</property>
<!--
=== Multiple group mapping providers configuration sample ===
This sample illustrates a typical use case for CompositeGroupsMapping where
@ -1973,4 +1998,19 @@ for ldap providers in the same way as above does.
the limit is 0 or the -safely is not specified in -rm command.
</description>
</property>
<property>
<name>fs.client.htrace.sampler.classes</name>
<value></value>
<description>The class names of the HTrace Samplers to use for Hadoop
filesystem clients.
</description>
</property>
<property>
<name>hadoop.htrace.span.receiver.classes</name>
<value></value>
<description>The class names of the Span Receivers to use for Hadoop.
</description>
</property>
</configuration>

View File

@ -192,7 +192,6 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
| `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
| `PutImageAvgTime` | Average fsimage upload time in milliseconds |
| `TotalFileOps`| Total number of file operations performed |
| `NNStarted`| NameNode start time |
| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
FSNamesystem

View File

@ -49,37 +49,14 @@ interface bundled with HTrace or implementing it by yourself.
* HTracedRESTReceiver
* ZipkinSpanReceiver
In order to set up SpanReceivers for HDFS servers,
configure what SpanReceivers you'd like to use
by putting a comma separated list of the fully-qualified class name of classes implementing SpanReceiver
in `hdfs-site.xml` property: `dfs.htrace.spanreceiver.classes`.
```xml
<property>
<name>dfs.htrace.spanreceiver.classes</name>
<value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
</property>
<property>
<name>dfs.htrace.local-file-span-receiver.path</name>
<value>/var/log/hadoop/htrace.out</value>
</property>
```
You can omit package name prefix if you use span receiver bundled with HTrace.
```xml
<property>
<name>dfs.htrace.spanreceiver.classes</name>
<value>LocalFileSpanReceiver</value>
</property>
```
You also need to add the jar bundling SpanReceiver to the classpath of Hadoop
on each node. (LocalFileSpanReceiver in the example above is included in the
jar of htrace-core which is bundled with Hadoop.)
See core-default.xml for a description of HTrace configuration keys. In some
cases, you will also need to add the jar containing the SpanReceiver that you
are using to the classpath of Hadoop on each node. (In the example above,
LocalFileSpanReceiver is included in the htrace-core4 jar which is bundled
with Hadoop.)
```
$ cp htrace-htraced/target/htrace-htraced-3.2.0-incubating.jar $HADOOP_HOME/share/hadoop/common/lib/
$ cp htrace-htraced/target/htrace-htraced-4.0.1-incubating.jar $HADOOP_HOME/share/hadoop/common/lib/
```
### Dynamic update of tracing configuration
@ -92,11 +69,11 @@ You need to run the command against all servers if you want to update the config
$ hadoop trace -list -host 192.168.56.2:9000
ID CLASS
1 org.apache.htrace.impl.LocalFileSpanReceiver
1 org.apache.htrace.core.LocalFileSpanReceiver
$ hadoop trace -list -host 192.168.56.2:50020
ID CLASS
1 org.apache.htrace.impl.LocalFileSpanReceiver
1 org.apache.htrace.core.LocalFileSpanReceiver
`hadoop trace -remove` removes span receiver from server.
`-remove` options takes id of span receiver as argument.
@ -113,7 +90,7 @@ You can specify the configuration associated with span receiver by `-Ckey=value`
$ hadoop trace -list -host 192.168.56.2:9000
ID CLASS
2 org.apache.htrace.impl.LocalFileSpanReceiver
2 org.apache.htrace.core.LocalFileSpanReceiver
### Starting tracing spans by HTrace API
@ -121,26 +98,21 @@ In order to trace, you will need to wrap the traced logic with **tracing span**
When there is running tracing spans,
the tracing information is propagated to servers along with RPC requests.
In addition, you need to initialize `SpanReceiverHost` once per process.
```java
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Tracer;
import org.apache.htrace.core.TraceScope;
...
SpanReceiverHost.getInstance(new HdfsConfiguration());
...
TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS);
TraceScope ts = tracer.newScope("Gets");
try {
... // traced logic
} finally {
if (ts != null) ts.close();
ts.close();
}
```
@ -154,11 +126,10 @@ which start tracing span before invoking HDFS shell command.
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Trace;
import org.apache.htrace.core.TraceScope;
public class TracingFsShell {
public static void main(String argv[]) throws Exception {
@ -166,13 +137,19 @@ which start tracing span before invoking HDFS shell command.
FsShell shell = new FsShell();
conf.setQuietMode(false);
shell.setConf(conf);
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
Tracer tracer = new Tracer.Builder().
name("TracingFsShell).
conf(TraceUtils.wrapHadoopConf("tracing.fs.shell.htrace.", conf)).
build();
int res = 0;
try (TraceScope ts = Trace.startSpan("FsShell", Sampler.ALWAYS)) {
TraceScope scope = tracer.newScope("FsShell");
try {
res = ToolRunner.run(shell, argv);
} finally {
scope.close();
shell.close();
}
tracer.close();
System.exit(res);
}
}
@ -189,16 +166,15 @@ The DFSClient can enable tracing internally. This allows you to use HTrace with
your client without modifying the client source code.
Configure the span receivers and samplers in `hdfs-site.xml`
by properties `dfs.client.htrace.sampler` and `dfs.client.htrace.sampler`.
The value of `dfs.client.htrace.sampler` can be NeverSampler, AlwaysSampler or ProbabilitySampler.
by properties `fs.client.htrace.sampler.classes` and
`fs.client.htrace.spanreceiver.classes`. The value of
`fs.client.htrace.sampler.classes` can be NeverSampler, AlwaysSampler or
ProbabilitySampler.
* NeverSampler: HTrace is OFF for all requests to namenodes and datanodes;
* AlwaysSampler: HTrace is ON for all requests to namenodes and datanodes;
* ProbabilitySampler: HTrace is ON for some percentage% of requests to namenodes and datanodes
You do not need to enable this if your client program has been modified
to use HTrace.
```xml
<property>
<name>dfs.client.htrace.spanreceiver.classes</name>

View File

@ -1493,7 +1493,7 @@ public class TestConfiguration extends TestCase {
@Override
public void run() {
for (int i = 0; i < 100000; i++) {
for (int i = 0; i < 10000; i++) {
config.set("some.config.value-" + prefix + i, "value");
}
}

View File

@ -17,13 +17,16 @@
*/
package org.apache.hadoop.fs;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import junit.framework.AssertionFailedError;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.tracing.SetSpanReceiver;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.SamplerBuilder;
import org.apache.htrace.impl.AlwaysSampler;
import org.apache.htrace.core.AlwaysSampler;
import org.apache.htrace.core.Tracer;
import org.junit.Assert;
import org.junit.Test;
@ -49,10 +52,10 @@ public class TestFsShell {
@Test
public void testTracing() throws Throwable {
Configuration conf = new Configuration();
String prefix = FsShell.SEHLL_HTRACE_PREFIX;
conf.set(prefix + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
String prefix = "fs.shell.htrace.";
conf.set(prefix + Tracer.SPAN_RECEIVER_CLASSES_KEY,
SetSpanReceiver.class.getName());
conf.set(prefix + SamplerBuilder.SAMPLER_CONF_KEY,
conf.set(prefix + Tracer.SAMPLER_CLASSES_KEY,
AlwaysSampler.class.getName());
conf.setQuietMode(false);
FsShell shell = new FsShell(conf);
@ -67,4 +70,33 @@ public class TestFsShell {
SetSpanReceiver.getMap()
.get("help").get(0).getKVAnnotations().get("args"));
}
@Test
public void testDFSWithInvalidCommmand() throws Throwable {
Configuration conf = new Configuration();
FsShell shell = new FsShell(conf);
String[] args = new String[1];
args[0] = "dfs -mkdirs";
final ByteArrayOutputStream bytes = new ByteArrayOutputStream();
final PrintStream out = new PrintStream(bytes);
final PrintStream oldErr = System.err;
try {
System.setErr(out);
ToolRunner.run(shell, args);
String errorValue=new String(bytes.toString());
Assert
.assertTrue(
"FSShell dfs command did not print the error " +
"message when invalid command is passed",
errorValue.contains("-mkdirs: Unknown command"));
Assert
.assertTrue(
"FSShell dfs command did not print help " +
"message when invalid command is passed",
errorValue.contains("Usage: hadoop fs [generic options]"));
} finally {
IOUtils.closeStream(out);
System.setErr(oldErr);
}
}
}

View File

@ -55,7 +55,6 @@ public class TestHarFileSystem {
* {@link HarFileSystem}. Either because there is a default implementation
* already available or because it is not relevant.
*/
@SuppressWarnings("deprecation")
private interface MustNotImplement {
public BlockLocation[] getFileBlockLocations(Path p, long start, long len);
public long getLength(Path f);

View File

@ -26,6 +26,7 @@ import java.util.Iterator;
import java.util.NoSuchElementException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.Shell;
import org.junit.runner.RunWith;
@ -312,7 +313,30 @@ public class TestLocalDirAllocator {
} catch (IOException e) {
assertEquals(CONTEXT + " not configured", e.getMessage());
} catch (NullPointerException e) {
fail("Lack of configuration should not have thrown an NPE.");
fail("Lack of configuration should not have thrown a NPE.");
}
String NEW_CONTEXT = CONTEXT + ".new";
conf1.set(NEW_CONTEXT, "");
LocalDirAllocator newDirAllocator = new LocalDirAllocator(NEW_CONTEXT);
try {
newDirAllocator.getLocalPathForWrite("/test", conf1);
fail("Exception not thrown when " + NEW_CONTEXT +
" is set to empty string");
} catch (IOException e) {
assertTrue(e instanceof DiskErrorException);
} catch (NullPointerException e) {
fail("Wrong configuration should not have thrown a NPE.");
}
try {
newDirAllocator.getLocalPathToRead("/test", conf1);
fail("Exception not thrown when " + NEW_CONTEXT +
" is set to empty string");
} catch (IOException e) {
assertTrue(e instanceof DiskErrorException);
} catch (NullPointerException e) {
fail("Wrong configuration should not have thrown a NPE.");
}
}

View File

@ -72,6 +72,7 @@ public class TestLocalFileSystem {
FileUtil.setWritable(base, true);
FileUtil.fullyDelete(base);
assertTrue(!base.exists());
RawLocalFileSystem.useStatIfAvailable();
}
/**

View File

@ -25,6 +25,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import org.apache.hadoop.fs.FileStatus;
import static org.apache.hadoop.fs.contract.ContractTestUtils.createFile;
import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset;
@ -120,4 +121,17 @@ public abstract class AbstractContractRootDirectoryTest extends AbstractFSContra
assertIsDirectory(root);
}
@Test
public void testListEmptyRootDirectory() throws IOException {
//extra sanity checks here to avoid support calls about complete loss of data
skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
FileSystem fs = getFileSystem();
Path root = new Path("/");
FileStatus[] statuses = fs.listStatus(root);
for (FileStatus status : statuses) {
ContractTestUtils.assertDeleted(fs, status.getPath(), true);
}
assertEquals("listStatus on empty root-directory returned a non-empty list",
0, fs.listStatus(root).length);
}
}

View File

@ -23,10 +23,7 @@ import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.Socket;
import java.nio.channels.FileLock;
import java.nio.channels.OverlappingFileLockException;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
@ -34,8 +31,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.apache.hadoop.net.ServerSocketUtil;
import org.apache.hadoop.util.Time;
import org.apache.zookeeper.PortAssignment;
import org.apache.zookeeper.TestableZooKeeper;
import org.apache.zookeeper.WatchedEvent;
import org.apache.zookeeper.Watcher;
@ -167,10 +164,6 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
private LinkedList<ZooKeeper> allClients;
private boolean allClientsSetup = false;
private RandomAccessFile portNumLockFile;
private File portNumFile;
protected TestableZooKeeper createClient(CountdownWatcher watcher, String hp)
throws IOException, InterruptedException
{
@ -413,29 +406,11 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
private String initHostPort() {
BASETEST.mkdirs();
int port;
for (;;) {
port = PortAssignment.unique();
FileLock lock = null;
portNumLockFile = null;
try {
try {
portNumFile = new File(BASETEST, port + ".lock");
portNumLockFile = new RandomAccessFile(portNumFile, "rw");
try {
lock = portNumLockFile.getChannel().tryLock();
} catch (OverlappingFileLockException e) {
continue;
}
} finally {
if (lock != null)
break;
if (portNumLockFile != null)
portNumLockFile.close();
}
} catch (IOException e) {
throw new RuntimeException(e);
}
int port = 0;
try {
port = ServerSocketUtil.getPort(port, 100);
} catch (IOException e) {
throw new RuntimeException(e);
}
return "127.0.0.1:" + port;
}
@ -480,9 +455,6 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
stopServer();
portNumLockFile.close();
portNumFile.delete();
if (tmpDir != null) {
Assert.assertTrue("delete " + tmpDir.toString(), recursiveDelete(tmpDir));
}

View File

@ -113,10 +113,7 @@ public class TestAuthenticationSessionCookie {
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
Configuration sslConf = new Configuration(false);
sslConf.addResource("ssl-server.xml");
sslConf.addResource("ssl-client.xml");
Configuration sslConf = KeyStoreTestUtil.getSslConfig();
server = new HttpServer2.Builder()
.setName("test")

View File

@ -17,7 +17,6 @@ import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
@ -27,12 +26,10 @@ import org.junit.Test;
import javax.net.ssl.HttpsURLConnection;
import javax.servlet.*;
import javax.servlet.http.Cookie;
import javax.servlet.http.HttpServletResponse;
import java.io.File;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import java.security.GeneralSecurityException;
@ -89,9 +86,7 @@ public class TestHttpCookieFlag {
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
Configuration sslConf = new Configuration(false);
sslConf.addResource("ssl-server.xml");
sslConf.addResource("ssl-client.xml");
Configuration sslConf = KeyStoreTestUtil.getSslConfig();
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
clientSslFactory.init();

View File

@ -65,9 +65,7 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
Configuration sslConf = new Configuration(false);
sslConf.addResource("ssl-server.xml");
sslConf.addResource("ssl-client.xml");
Configuration sslConf = KeyStoreTestUtil.getSslConfig();
clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, sslConf);
clientSslFactory.init();

View File

@ -1060,8 +1060,8 @@ public class TestRPC {
}));
}
while (server.getCallQueueLen() != 1
&& countThreads(CallQueueManager.class.getName()) != 1
&& countThreads(TestProtocol.class.getName()) != 1) {
|| countThreads(CallQueueManager.class.getName()) != 1
|| countThreads(TestImpl.class.getName()) != 1) {
Thread.sleep(100);
}
} finally {

View File

@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
public class ServerSocketUtil {
private static final Log LOG = LogFactory.getLog(ServerSocketUtil.class);
private static Random rand = new Random();
/**
* Port scan & allocate is how most other apps find ports
@ -38,13 +39,15 @@ public class ServerSocketUtil {
* @throws IOException
*/
public static int getPort(int port, int retries) throws IOException {
Random rand = new Random();
int tryPort = port;
int tries = 0;
while (true) {
if (tries > 0) {
if (tries > 0 || tryPort == 0) {
tryPort = port + rand.nextInt(65535 - port);
}
if (tryPort == 0) {
continue;
}
LOG.info("Using port " + tryPort);
try (ServerSocket s = new ServerSocket(tryPort)) {
return tryPort;

View File

@ -18,6 +18,10 @@
package org.apache.hadoop.net;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.net.NetworkInterface;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.net.InetAddress;
@ -28,6 +32,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.util.Time;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.*;
/**
@ -38,6 +45,11 @@ public class TestDNS {
private static final Log LOG = LogFactory.getLog(TestDNS.class);
private static final String DEFAULT = "default";
// This is not a legal hostname (starts with a hyphen). It will never
// be returned on any test machine.
private static final String DUMMY_HOSTNAME = "-DUMMY_HOSTNAME";
private static final String INVALID_DNS_SERVER = "0.0.0.0";
/**
* Test that asking for the default hostname works
* @throws Exception if hostname lookups fail
@ -89,12 +101,8 @@ public class TestDNS {
*/
@Test
public void testNullInterface() throws Exception {
try {
String host = DNS.getDefaultHost(null);
fail("Expected a NullPointerException, got " + host);
} catch (NullPointerException npe) {
// Expected
}
String host = DNS.getDefaultHost(null); // should work.
assertThat(host, is(DNS.getDefaultHost(DEFAULT)));
try {
String ip = DNS.getDefaultIP(null);
fail("Expected a NullPointerException, got " + ip);
@ -103,6 +111,26 @@ public class TestDNS {
}
}
/**
* Test that 'null' DNS server gives the same result as if no DNS
* server was passed.
*/
@Test
public void testNullDnsServer() throws Exception {
String host = DNS.getDefaultHost(getLoopbackInterface(), null);
assertThat(host, is(DNS.getDefaultHost(getLoopbackInterface())));
}
/**
* Test that "default" DNS server gives the same result as if no DNS
* server was passed.
*/
@Test
public void testDefaultDnsServer() throws Exception {
String host = DNS.getDefaultHost(getLoopbackInterface(), DEFAULT);
assertThat(host, is(DNS.getDefaultHost(getLoopbackInterface())));
}
/**
* Get the IP addresses of an unknown interface
*/
@ -147,10 +175,80 @@ public class TestDNS {
+ " Loopback=" + localhost.isLoopbackAddress()
+ " Linklocal=" + localhost.isLinkLocalAddress());
}
}
}
/**
* Test that when using an invalid DNS server with hosts file fallback,
* we are able to get the hostname from the hosts file.
*
* This test may fail on some misconfigured test machines that don't have
* an entry for "localhost" in their hosts file. This entry is correctly
* configured out of the box on common Linux distributions, OS X and
* Windows.
*
* @throws Exception
*/
@Test (timeout=60000)
public void testLookupWithHostsFallback() throws Exception {
final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
try {
String hostname = DNS.getDefaultHost(
getLoopbackInterface(), INVALID_DNS_SERVER, true);
// Expect to get back something other than the cached host name.
assertThat(hostname, not(DUMMY_HOSTNAME));
} finally {
// Restore DNS#cachedHostname for subsequent tests.
changeDnsCachedHostname(oldHostname);
}
}
/**
* Test that when using an invalid DNS server without hosts file
* fallback, we get back the cached host name.
*
* @throws Exception
*/
@Test(timeout=60000)
public void testLookupWithoutHostsFallback() throws Exception {
final String oldHostname = changeDnsCachedHostname(DUMMY_HOSTNAME);
try {
String hostname = DNS.getDefaultHost(
getLoopbackInterface(), INVALID_DNS_SERVER, false);
// Expect to get back the cached host name since there was no hosts
// file lookup.
assertThat(hostname, is(DUMMY_HOSTNAME));
} finally {
// Restore DNS#cachedHostname for subsequent tests.
changeDnsCachedHostname(oldHostname);
}
}
private String getLoopbackInterface() throws SocketException {
return NetworkInterface.getByInetAddress(
InetAddress.getLoopbackAddress()).getDisplayName();
}
/**
* Change DNS#cachedHostName to something which cannot be a real
* host name. Uses reflection since it is a 'private final' field.
*/
private String changeDnsCachedHostname(final String newHostname)
throws Exception {
final String oldCachedHostname = DNS.getDefaultHost(DEFAULT);
Field field = DNS.class.getDeclaredField("cachedHostname");
field.setAccessible(true);
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.set(field, field.getModifiers() & ~Modifier.FINAL);
field.set(null, newHostname);
return oldCachedHostname;
}
/**
* Test that the name "localhost" resolves to something.
*

View File

@ -111,7 +111,7 @@ public class TestSecurityUtil {
@Test
public void testLocalHostNameForNullOrWild() throws Exception {
String local = StringUtils.toLowerCase(SecurityUtil.getLocalHostName());
String local = StringUtils.toLowerCase(SecurityUtil.getLocalHostName(null));
assertEquals("hdfs/" + local + "@REALM",
SecurityUtil.getServerPrincipal("hdfs/_HOST@REALM", (String)null));
assertEquals("hdfs/" + local + "@REALM",

View File

@ -37,7 +37,6 @@ import java.security.KeyPair;
import java.security.KeyPairGenerator;
import java.security.KeyStore;
import java.security.NoSuchAlgorithmException;
import java.security.PrivateKey;
import java.security.SecureRandom;
import java.security.cert.Certificate;
import java.security.cert.X509Certificate;
@ -49,8 +48,6 @@ import java.security.InvalidKeyException;
import java.security.NoSuchProviderException;
import java.security.SignatureException;
import java.security.cert.CertificateEncodingException;
import java.security.cert.CertificateException;
import java.security.cert.CertificateFactory;
import javax.security.auth.x500.X500Principal;
import org.bouncycastle.x509.X509V1CertificateGenerator;
@ -233,8 +230,8 @@ public class KeyStoreTestUtil {
String trustKS = null;
String trustPassword = "trustP";
File sslClientConfFile = new File(sslConfDir + "/ssl-client.xml");
File sslServerConfFile = new File(sslConfDir + "/ssl-server.xml");
File sslClientConfFile = new File(sslConfDir, getClientSSLConfigFileName());
File sslServerConfFile = new File(sslConfDir, getServerSSLConfigFileName());
Map<String, X509Certificate> certs = new HashMap<String, X509Certificate>();
@ -311,9 +308,45 @@ public class KeyStoreTestUtil {
return serverSSLConf;
}
/**
* Returns the client SSL configuration file name. Under parallel test
* execution, this file name is parameterized by a unique ID to ensure that
* concurrent tests don't collide on an SSL configuration file.
*
* @return client SSL configuration file name
*/
public static String getClientSSLConfigFileName() {
return getSSLConfigFileName("ssl-client");
}
/**
* Returns the server SSL configuration file name. Under parallel test
* execution, this file name is parameterized by a unique ID to ensure that
* concurrent tests don't collide on an SSL configuration file.
*
* @return client SSL configuration file name
*/
public static String getServerSSLConfigFileName() {
return getSSLConfigFileName("ssl-server");
}
/**
* Returns an SSL configuration file name. Under parallel test
* execution, this file name is parameterized by a unique ID to ensure that
* concurrent tests don't collide on an SSL configuration file.
*
* @param base the base of the file name
* @return SSL configuration file name for base
*/
private static String getSSLConfigFileName(String base) {
String testUniqueForkId = System.getProperty("test.unique.fork.id");
String fileSuffix = testUniqueForkId != null ? "-" + testUniqueForkId : "";
return base + fileSuffix + ".xml";
}
/**
* Creates SSL configuration.
*
*
* @param mode SSLFactory.Mode mode to configure
* @param keystore String keystore file
* @param password String store password, or null to avoid setting store
@ -410,4 +443,19 @@ public class KeyStoreTestUtil {
throw e;
}
}
/**
* Get the SSL configuration
* @return {@link Configuration} instance with ssl configs loaded
*/
public static Configuration getSslConfig(){
Configuration sslConf = new Configuration(false);
String sslServerConfFile = KeyStoreTestUtil.getServerSSLConfigFileName();
String sslClientConfFile = KeyStoreTestUtil.getClientSSLConfigFileName();
sslConf.addResource(sslServerConfFile);
sslConf.addResource(sslClientConfFile);
sslConf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile);
sslConf.set(SSLFactory.SSL_CLIENT_CONF_KEY, sslClientConfFile);
return sslConf;
}
}

View File

@ -19,9 +19,10 @@ package org.apache.hadoop.tracing;
import com.google.common.base.Supplier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.htrace.Span;
import org.apache.htrace.SpanReceiver;
import org.apache.htrace.HTraceConfiguration;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.SpanReceiver;
import org.apache.htrace.core.HTraceConfiguration;
import java.util.Collection;
import java.util.HashMap;
import java.util.LinkedList;
@ -39,7 +40,7 @@ import org.junit.Assert;
* push all the metrics to a static place, and would make testing
* SpanReceiverHost harder.
*/
public class SetSpanReceiver implements SpanReceiver {
public class SetSpanReceiver extends SpanReceiver {
public SetSpanReceiver(HTraceConfiguration conf) {
}
@ -68,8 +69,8 @@ public class SetSpanReceiver implements SpanReceiver {
}
public static class SetHolder {
public static ConcurrentHashMap<Long, Span> spans =
new ConcurrentHashMap<Long, Span>();
public static ConcurrentHashMap<SpanId, Span> spans =
new ConcurrentHashMap<SpanId, Span>();
public static Map<String, List<Span>> getMap() {
Map<String, List<Span>> map = new HashMap<String, List<Span>>();

View File

@ -21,7 +21,7 @@ import static org.junit.Assert.assertEquals;
import java.util.LinkedList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
import org.apache.htrace.HTraceConfiguration;
import org.apache.htrace.core.HTraceConfiguration;
import org.junit.Test;
public class TestTraceUtils {

View File

@ -32,4 +32,28 @@
<Method name="allocSlot" />
<Bug pattern="UL_UNRELEASED_LOCK" />
</Match>
<Match>
<Class name="org.apache.hadoop.hdfs.DFSInputStream"/>
<Field name="tcpReadsDisabledForTesting"/>
<Bug pattern="MS_SHOULD_BE_FINAL"/>
</Match>
<!--
ResponseProccessor is thread that is designed to catch RuntimeException.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor" />
<Method name="run" />
<Bug pattern="REC_CATCH_EXCEPTION" />
</Match>
<!--
We use a separate lock to guard cachingStrategy in order to separate
locks for p-reads from seek + read invocations.
-->
<Match>
<Class name="org.apache.hadoop.hdfs.DFSInputStream" />
<Field name="cachingStrategy" />
<Bug pattern="IS2_INCONSISTENT_SYNC" />
</Match>
</FindBugsFilter>

View File

@ -31,8 +31,6 @@ import java.util.List;
import com.google.common.io.ByteArrayDataOutput;
import com.google.common.io.ByteStreams;
import org.apache.commons.lang.mutable.MutableBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
@ -56,7 +54,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplicaInfo;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.util.IOUtilsClient;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.unix.DomainSocket;
import org.apache.hadoop.security.AccessControlException;
@ -68,6 +66,10 @@ import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
@ -75,7 +77,7 @@ import com.google.common.base.Preconditions;
*/
@InterfaceAudience.Private
public class BlockReaderFactory implements ShortCircuitReplicaCreator {
static final Log LOG = LogFactory.getLog(BlockReaderFactory.class);
static final Logger LOG = LoggerFactory.getLogger(BlockReaderFactory.class);
public static class FailureInjector {
public void injectRequestFileDescriptorsFailure() throws IOException {
@ -177,6 +179,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
*/
private Configuration configuration;
/**
* The HTrace tracer to use.
*/
private Tracer tracer;
/**
* Information about the domain socket path we should use to connect to the
* local peer-- or null if we haven't examined the local domain socket.
@ -281,6 +288,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return this;
}
public BlockReaderFactory setTracer(Tracer tracer) {
this.tracer = tracer;
return this;
}
@VisibleForTesting
public static void setFailureInjectorForTesting(FailureInjector injector) {
failureInjector = injector;
@ -380,15 +392,17 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
Constructor<? extends ReplicaAccessorBuilder> ctor =
cls.getConstructor();
ReplicaAccessorBuilder builder = ctor.newInstance();
long visibleLength = startOffset + length;
ReplicaAccessor accessor = builder.
setAllowShortCircuitReads(allowShortCircuitLocalReads).
setBlock(block.getBlockId(), block.getBlockPoolId()).
setGenerationStamp(block.getGenerationStamp()).
setBlockAccessToken(tokenBytes).
setClientName(clientName).
setConfiguration(configuration).
setFileName(fileName).
setVerifyChecksum(verifyChecksum).
setVisibleLength(length).
setVisibleLength(visibleLength).
build();
if (accessor == null) {
if (LOG.isTraceEnabled()) {
@ -396,7 +410,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
cls.getName());
}
} else {
return new ExternalBlockReader(accessor, length, startOffset);
return new ExternalBlockReader(accessor, visibleLength, startOffset);
}
} catch (Throwable t) {
LOG.warn("Failed to construct new object of type " +
@ -433,7 +447,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
try {
return BlockReaderLocalLegacy.newBlockReader(conf,
userGroupInformation, configuration, fileName, block, token,
datanode, startOffset, length, storageType);
datanode, startOffset, length, storageType, tracer);
} catch (RemoteException remoteException) {
ioe = remoteException.unwrapRemoteException(
InvalidToken.class, AccessControlException.class);
@ -494,6 +508,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
setVerifyChecksum(verifyChecksum).
setCachingStrategy(cachingStrategy).
setStorageType(storageType).
setTracer(tracer).
build();
}
@ -550,14 +565,14 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
if (LOG.isDebugEnabled()) {
LOG.debug(this + ": closing stale domain peer " + peer, e);
}
IOUtils.cleanup(LOG, peer);
IOUtilsClient.cleanup(LOG, peer);
} else {
// Handle an I/O error we got when using a newly created socket.
// We temporarily disable the domain socket path for a few minutes in
// this case, to prevent wasting more time on it.
LOG.warn(this + ": I/O error requesting file descriptors. " +
"Disabling domain socket " + peer.getDomainSocket(), e);
IOUtils.cleanup(LOG, peer);
IOUtilsClient.cleanup(LOG, peer);
clientContext.getDomainSocketFactory()
.disableDomainSocketPath(pathInfo.getPath());
return null;
@ -616,7 +631,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return null;
} finally {
if (replica == null) {
IOUtils.cleanup(DFSClient.LOG, fis[0], fis[1]);
IOUtilsClient.cleanup(DFSClient.LOG, fis[0], fis[1]);
}
}
case ERROR_UNSUPPORTED:
@ -684,7 +699,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
blockReader = getRemoteBlockReader(peer);
return blockReader;
} catch (IOException ioe) {
IOUtils.cleanup(LOG, peer);
IOUtilsClient.cleanup(LOG, peer);
if (isSecurityException(ioe)) {
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": got security exception while constructing " +
@ -711,7 +726,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
}
} finally {
if (blockReader == null) {
IOUtils.cleanup(LOG, peer);
IOUtilsClient.cleanup(LOG, peer);
}
}
}
@ -768,7 +783,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
}
} finally {
if (blockReader == null) {
IOUtils.cleanup(LOG, peer);
IOUtilsClient.cleanup(LOG, peer);
}
}
}
@ -863,12 +878,12 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
return RemoteBlockReader.newBlockReader(fileName,
block, token, startOffset, length, conf.getIoBufferSize(),
verifyChecksum, clientName, peer, datanode,
clientContext.getPeerCache(), cachingStrategy);
clientContext.getPeerCache(), cachingStrategy, tracer);
} else {
return RemoteBlockReader2.newBlockReader(
fileName, block, token, startOffset, length,
verifyChecksum, clientName, peer, datanode,
clientContext.getPeerCache(), cachingStrategy);
clientContext.getPeerCache(), cachingStrategy, tracer);
}
}

View File

@ -34,9 +34,8 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DirectBufferPool;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@ -75,6 +74,7 @@ class BlockReaderLocal implements BlockReader {
private long dataPos;
private ExtendedBlock block;
private StorageType storageType;
private Tracer tracer;
public Builder(ShortCircuitConf conf) {
this.maxReadahead = Integer.MAX_VALUE;
@ -120,6 +120,11 @@ class BlockReaderLocal implements BlockReader {
return this;
}
public Builder setTracer(Tracer tracer) {
this.tracer = tracer;
return this;
}
public BlockReaderLocal build() {
Preconditions.checkNotNull(replica);
return new BlockReaderLocal(this);
@ -228,6 +233,11 @@ class BlockReaderLocal implements BlockReader {
*/
private StorageType storageType;
/**
* The Tracer to use.
*/
private final Tracer tracer;
private BlockReaderLocal(Builder builder) {
this.replica = builder.replica;
this.dataIn = replica.getDataStream().getChannel();
@ -257,6 +267,7 @@ class BlockReaderLocal implements BlockReader {
}
this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
this.storageType = builder.storageType;
this.tracer = builder.tracer;
}
private synchronized void createDataBufIfNeeded() {
@ -324,8 +335,8 @@ class BlockReaderLocal implements BlockReader {
*/
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
throws IOException {
TraceScope scope = Trace.startSpan("BlockReaderLocal#fillBuffer(" +
block.getBlockId() + ")", Sampler.NEVER);
TraceScope scope = tracer.newScope(
"BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")");
try {
int total = 0;
long startDataPos = dataPos;

View File

@ -50,10 +50,8 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DirectBufferPool;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -182,7 +180,8 @@ class BlockReaderLocalLegacy implements BlockReader {
private long startOffset;
private final String filename;
private long blockId;
private final Tracer tracer;
/**
* The only way this object can be instantiated.
*/
@ -190,8 +189,8 @@ class BlockReaderLocalLegacy implements BlockReader {
UserGroupInformation userGroupInformation,
Configuration configuration, String file, ExtendedBlock blk,
Token<BlockTokenIdentifier> token, DatanodeInfo node,
long startOffset, long length, StorageType storageType)
throws IOException {
long startOffset, long length, StorageType storageType,
Tracer tracer) throws IOException {
final ShortCircuitConf scConf = conf.getShortCircuitConf();
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
.getIpcPort());
@ -239,10 +238,10 @@ class BlockReaderLocalLegacy implements BlockReader {
- (startOffset % checksum.getBytesPerChecksum());
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
startOffset, length, pathinfo, checksum, true, dataIn,
firstChunkOffset, checksumIn);
firstChunkOffset, checksumIn, tracer);
} else {
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
startOffset, length, pathinfo, dataIn);
startOffset, length, pathinfo, dataIn, tracer);
}
} catch (IOException e) {
// remove from cache
@ -321,18 +320,18 @@ class BlockReaderLocalLegacy implements BlockReader {
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
throws IOException {
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn,
Tracer tracer) throws IOException {
this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
dataIn, startOffset, null);
dataIn, startOffset, null, tracer);
}
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
FileInputStream checksumIn) throws IOException {
FileInputStream checksumIn, Tracer tracer) throws IOException {
this.filename = hdfsfile;
this.checksum = checksum;
this.verifyChecksum = verifyChecksum;
@ -368,6 +367,7 @@ class BlockReaderLocalLegacy implements BlockReader {
bufferPool.returnBuffer(checksumBuff);
}
}
this.tracer = tracer;
}
/**
@ -375,8 +375,8 @@ class BlockReaderLocalLegacy implements BlockReader {
*/
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
throws IOException {
TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
blockId + ")", Sampler.NEVER);
TraceScope scope = tracer.
newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")");
try {
int bytesRead = stream.getChannel().read(buf);
if (bytesRead < 0) {

View File

@ -53,8 +53,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
@ -76,6 +74,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.FsStatus;
import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.HdfsBlockLocation;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
@ -94,6 +93,8 @@ import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@ -147,10 +148,10 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.util.IOUtilsClient;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.IOUtils;
@ -167,24 +168,22 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.tracing.SpanReceiverHost;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DataChecksum.Type;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Sampler;
import org.apache.htrace.SamplerBuilder;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.net.InetAddresses;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/********************************************************
* DFSClient can connect to a Hadoop Filesystem and
@ -200,10 +199,11 @@ import com.google.common.net.InetAddresses;
@InterfaceAudience.Private
public class DFSClient implements java.io.Closeable, RemotePeerFactory,
DataEncryptionKeyFactory {
public static final Log LOG = LogFactory.getLog(DFSClient.class);
public static final Logger LOG = LoggerFactory.getLogger(DFSClient.class);
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
private final Configuration conf;
private final Tracer tracer;
private final DfsClientConf dfsClientConf;
final ClientProtocol namenode;
/* The service used for delegation tokens */
@ -231,7 +231,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
new DFSHedgedReadMetrics();
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
private final Sampler<?> traceSampler;
private final int smallBufferSize;
public DfsClientConf getConf() {
@ -285,26 +284,24 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
* If HA is enabled and a positive value is set for
* {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
* configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
* as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode
* must be null.
* If HA is enabled and a positive value is set for
* {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
* in the configuration, the DFSClient will use
* {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
* Otherwise one of nameNodeUri or rpcNamenode must be null.
*/
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
Configuration conf, FileSystem.Statistics stats)
throws IOException {
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
traceSampler = new SamplerBuilder(TraceUtils.
wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build();
// Copy only the required DFSClient configuration
this.tracer = FsTracer.get(conf);
this.dfsClientConf = new DfsClientConf(conf);
this.conf = conf;
this.stats = stats;
this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
this.smallBufferSize = DFSUtil.getSmallBufferSize(conf);
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
this.ugi = UserGroupInformation.getCurrentUser();
@ -313,16 +310,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
ThreadLocalRandom.current().nextInt() + "_" +
Thread.currentThread().getId();
int numResponseToDrop = conf.getInt(
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
ProxyAndInfo<ClientProtocol> proxyInfo = null;
AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
if (numResponseToDrop > 0) {
// This case is used for testing.
LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
LOG.warn(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
+ " is set to " + numResponseToDrop
+ ", this hacked client will proactively drop responses");
proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
nameNodeUri, ClientProtocol.class, numResponseToDrop,
nnFallbackToSimpleAuth);
}
@ -338,14 +336,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} else {
Preconditions.checkArgument(nameNodeUri != null,
"null URI");
proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
ClientProtocol.class, nnFallbackToSimpleAuth);
proxyInfo = NameNodeProxiesClient.createProxyWithClientProtocol(conf,
nameNodeUri, nnFallbackToSimpleAuth);
this.dtService = proxyInfo.getDelegationTokenService();
this.namenode = proxyInfo.getProxy();
}
String localInterfaces[] =
conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
conf.getTrimmedStrings(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
LOG.debug("Using local interfaces [" +
@ -544,10 +542,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} catch (IOException e) {
// Abort if the lease has already expired.
final long elapsed = Time.monotonicNow() - getLastLeaseRenewal();
if (elapsed > HdfsServerConstants.LEASE_HARDLIMIT_PERIOD) {
if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
LOG.warn("Failed to renew lease for " + clientName + " for "
+ (elapsed/1000) + " seconds (>= hard-limit ="
+ (HdfsServerConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
+ (HdfsConstants.LEASE_HARDLIMIT_PERIOD / 1000) + " seconds.) "
+ "Closing all files being written ...", e);
closeAllFilesBeingWritten(true);
} else {
@ -586,8 +584,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
out.close();
}
} catch(IOException ie) {
LOG.error("Failed to " + (abort? "abort": "close") +
" inode " + inodeId, ie);
LOG.error("Failed to " + (abort ? "abort" : "close") + " file: "
+ out.getSrc() + " with inode: " + inodeId, ie);
}
}
}
@ -624,7 +622,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public long getBlockSize(String f) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getBlockSize", f);
TraceScope scope = newPathTraceScope("getBlockSize", f);
try {
return namenode.getPreferredBlockSize(f);
} catch (IOException ie) {
@ -667,7 +665,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
throws IOException {
assert dtService != null;
TraceScope scope = Trace.startSpan("getDelegationToken", traceSampler);
TraceScope scope = tracer.newScope("getDelegationToken");
try {
Token<DelegationTokenIdentifier> token =
namenode.getDelegationToken(renewer);
@ -732,7 +730,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
static {
//Ensure that HDFS Configuration files are loaded before trying to use
// the renewer.
HdfsConfiguration.init();
HdfsConfigurationLoader.init();
}
@Override
@ -786,8 +784,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
"a failover proxy provider configured.");
}
NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
ProxyAndInfo<ClientProtocol> info =
NameNodeProxiesClient.createProxyWithClientProtocol(conf, uri, null);
assert info.getDelegationTokenService().equals(token.getService()) :
"Returned service '" + info.getDelegationTokenService().toString() +
"' doesn't match expected service '" +
@ -824,7 +822,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
@VisibleForTesting
public LocatedBlocks getLocatedBlocks(String src, long start, long length)
throws IOException {
TraceScope scope = getPathTraceScope("getBlockLocations", src);
TraceScope scope = newPathTraceScope("getBlockLocations", src);
try {
return callGetBlockLocations(namenode, src, start, length);
} finally {
@ -856,7 +854,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
boolean recoverLease(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("recoverLease", src);
TraceScope scope = newPathTraceScope("recoverLease", src);
try {
return namenode.recoverLease(src, clientName);
} catch (RemoteException re) {
@ -883,7 +881,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public BlockLocation[] getBlockLocations(String src, long start,
long length) throws IOException, UnresolvedLinkException {
checkOpen();
TraceScope scope = getPathTraceScope("getBlockLocations", src);
TraceScope scope = newPathTraceScope("getBlockLocations", src);
try {
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
@ -902,7 +900,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
feInfo) throws IOException {
TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler);
TraceScope scope = tracer.newScope("decryptEDEK");
try {
KeyProvider provider = getKeyProvider();
if (provider == null) {
@ -1058,7 +1056,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
throws IOException, UnresolvedLinkException {
checkOpen();
// Get block info from namenode
TraceScope scope = getPathTraceScope("newDFSInputStream", src);
TraceScope scope = newPathTraceScope("newDFSInputStream", src);
try {
LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
if (locatedBlocks != null) {
@ -1314,7 +1312,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void createSymlink(String target, String link, boolean createParent)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("createSymlink", target);
TraceScope scope = newPathTraceScope("createSymlink", target);
try {
final FsPermission dirPerm = applyUMask(null);
namenode.createSymlink(target, link, dirPerm, createParent);
@ -1340,7 +1338,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public String getLinkTarget(String path) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getLinkTarget", path);
TraceScope scope = newPathTraceScope("getLinkTarget", path);
try {
return namenode.getLinkTarget(path);
} catch (RemoteException re) {
@ -1437,7 +1435,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public boolean setReplication(String src, short replication)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setReplication", src);
TraceScope scope = newPathTraceScope("setReplication", src);
try {
return namenode.setReplication(src, replication);
} catch(RemoteException re) {
@ -1461,7 +1459,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void setStoragePolicy(String src, String policyName)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setStoragePolicy", src);
TraceScope scope = newPathTraceScope("setStoragePolicy", src);
try {
namenode.setStoragePolicy(src, policyName);
} catch (RemoteException e) {
@ -1482,7 +1480,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getStoragePolicy", path);
TraceScope scope = newPathTraceScope("getStoragePolicy", path);
try {
return namenode.getStoragePolicy(path);
} catch (RemoteException e) {
@ -1500,7 +1498,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
TraceScope scope = tracer.newScope("getStoragePolicies");
try {
return namenode.getStoragePolicies();
} finally {
@ -1516,7 +1514,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
@Deprecated
public boolean rename(String src, String dst) throws IOException {
checkOpen();
TraceScope scope = getSrcDstTraceScope("rename", src, dst);
TraceScope scope = newSrcDstTraceScope("rename", src, dst);
try {
return namenode.rename(src, dst);
} catch(RemoteException re) {
@ -1537,7 +1535,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void concat(String trg, String [] srcs) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("concat", traceSampler);
TraceScope scope = tracer.newScope("concat");
try {
namenode.concat(trg, srcs);
} catch(RemoteException re) {
@ -1555,7 +1553,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void rename(String src, String dst, Options.Rename... options)
throws IOException {
checkOpen();
TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
TraceScope scope = newSrcDstTraceScope("rename2", src, dst);
try {
namenode.rename2(src, dst, options);
} catch(RemoteException re) {
@ -1584,7 +1582,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
throw new HadoopIllegalArgumentException(
"Cannot truncate to a negative file size: " + newLength + ".");
}
TraceScope scope = getPathTraceScope("truncate", src);
TraceScope scope = newPathTraceScope("truncate", src);
try {
return namenode.truncate(src, newLength, clientName);
} catch (RemoteException re) {
@ -1614,7 +1612,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public boolean delete(String src, boolean recursive) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("delete", src);
TraceScope scope = newPathTraceScope("delete", src);
try {
return namenode.delete(src, recursive);
} catch(RemoteException re) {
@ -1656,7 +1654,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public DirectoryListing listPaths(String src, byte[] startAfter,
boolean needLocation) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("listPaths", src);
TraceScope scope = newPathTraceScope("listPaths", src);
try {
return namenode.getListing(src, startAfter, needLocation);
} catch(RemoteException re) {
@ -1678,7 +1676,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public HdfsFileStatus getFileInfo(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getFileInfo", src);
TraceScope scope = newPathTraceScope("getFileInfo", src);
try {
return namenode.getFileInfo(src);
} catch(RemoteException re) {
@ -1696,7 +1694,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public boolean isFileClosed(String src) throws IOException{
checkOpen();
TraceScope scope = getPathTraceScope("isFileClosed", src);
TraceScope scope = newPathTraceScope("isFileClosed", src);
try {
return namenode.isFileClosed(src);
} catch(RemoteException re) {
@ -1718,7 +1716,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getFileLinkInfo", src);
TraceScope scope = newPathTraceScope("getFileLinkInfo", src);
try {
return namenode.getFileLinkInfo(src);
} catch(RemoteException re) {
@ -2007,7 +2005,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
return PBHelperClient.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
} finally {
IOUtils.cleanup(null, pair.in, pair.out);
IOUtilsClient.cleanup(null, pair.in, pair.out);
}
}
@ -2021,7 +2019,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void setPermission(String src, FsPermission permission)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setPermission", src);
TraceScope scope = newPathTraceScope("setPermission", src);
try {
namenode.setPermission(src, permission);
} catch(RemoteException re) {
@ -2046,7 +2044,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void setOwner(String src, String username, String groupname)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setOwner", src);
TraceScope scope = newPathTraceScope("setOwner", src);
try {
namenode.setOwner(src, username, groupname);
} catch(RemoteException re) {
@ -2062,7 +2060,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
private long[] callGetStats() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getStats", traceSampler);
TraceScope scope = tracer.newScope("getStats");
try {
return namenode.getStats();
} finally {
@ -2121,7 +2119,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
String cookie)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("listCorruptFileBlocks", path);
TraceScope scope = newPathTraceScope("listCorruptFileBlocks", path);
try {
return namenode.listCorruptFileBlocks(path, cookie);
} finally {
@ -2132,7 +2130,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
TraceScope scope = tracer.newScope("datanodeReport");
try {
return namenode.getDatanodeReport(type);
} finally {
@ -2144,7 +2142,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
DatanodeReportType type) throws IOException {
checkOpen();
TraceScope scope =
Trace.startSpan("datanodeStorageReport", traceSampler);
tracer.newScope("datanodeStorageReport");
try {
return namenode.getDatanodeStorageReport(type);
} finally {
@ -2175,7 +2173,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
TraceScope scope =
Trace.startSpan("setSafeMode", traceSampler);
tracer.newScope("setSafeMode");
try {
return namenode.setSafeMode(action, isChecked);
} finally {
@ -2194,7 +2192,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public String createSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("createSnapshot", traceSampler);
TraceScope scope = tracer.newScope("createSnapshot");
try {
return namenode.createSnapshot(snapshotRoot, snapshotName);
} catch(RemoteException re) {
@ -2216,7 +2214,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void deleteSnapshot(String snapshotRoot, String snapshotName)
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("deleteSnapshot", traceSampler);
TraceScope scope = tracer.newScope("deleteSnapshot");
try {
namenode.deleteSnapshot(snapshotRoot, snapshotName);
} catch(RemoteException re) {
@ -2237,7 +2235,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void renameSnapshot(String snapshotDir, String snapshotOldName,
String snapshotNewName) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("renameSnapshot", traceSampler);
TraceScope scope = tracer.newScope("renameSnapshot");
try {
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
} catch(RemoteException re) {
@ -2256,8 +2254,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getSnapshottableDirListing",
traceSampler);
TraceScope scope = tracer.newScope("getSnapshottableDirListing");
try {
return namenode.getSnapshottableDirListing();
} catch(RemoteException re) {
@ -2274,7 +2271,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void allowSnapshot(String snapshotRoot) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("allowSnapshot", traceSampler);
TraceScope scope = tracer.newScope("allowSnapshot");
try {
namenode.allowSnapshot(snapshotRoot);
} catch (RemoteException re) {
@ -2291,7 +2288,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void disallowSnapshot(String snapshotRoot) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("disallowSnapshot", traceSampler);
TraceScope scope = tracer.newScope("disallowSnapshot");
try {
namenode.disallowSnapshot(snapshotRoot);
} catch (RemoteException re) {
@ -2309,7 +2306,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
String fromSnapshot, String toSnapshot) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getSnapshotDiffReport", traceSampler);
TraceScope scope = tracer.newScope("getSnapshotDiffReport");
try {
return namenode.getSnapshotDiffReport(snapshotDir,
fromSnapshot, toSnapshot);
@ -2323,7 +2320,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public long addCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("addCacheDirective", traceSampler);
TraceScope scope = tracer.newScope("addCacheDirective");
try {
return namenode.addCacheDirective(info, flags);
} catch (RemoteException re) {
@ -2336,7 +2333,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void modifyCacheDirective(
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("modifyCacheDirective", traceSampler);
TraceScope scope = tracer.newScope("modifyCacheDirective");
try {
namenode.modifyCacheDirective(info, flags);
} catch (RemoteException re) {
@ -2349,7 +2346,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void removeCacheDirective(long id)
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("removeCacheDirective", traceSampler);
TraceScope scope = tracer.newScope("removeCacheDirective");
try {
namenode.removeCacheDirective(id);
} catch (RemoteException re) {
@ -2362,12 +2359,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
CacheDirectiveInfo filter) throws IOException {
checkOpen();
return new CacheDirectiveIterator(namenode, filter, traceSampler);
return new CacheDirectiveIterator(namenode, filter, tracer);
}
public void addCachePool(CachePoolInfo info) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("addCachePool", traceSampler);
TraceScope scope = tracer.newScope("addCachePool");
try {
namenode.addCachePool(info);
} catch (RemoteException re) {
@ -2379,7 +2376,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void modifyCachePool(CachePoolInfo info) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("modifyCachePool", traceSampler);
TraceScope scope = tracer.newScope("modifyCachePool");
try {
namenode.modifyCachePool(info);
} catch (RemoteException re) {
@ -2391,7 +2388,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void removeCachePool(String poolName) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("removeCachePool", traceSampler);
TraceScope scope = tracer.newScope("removeCachePool");
try {
namenode.removeCachePool(poolName);
} catch (RemoteException re) {
@ -2403,7 +2400,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
checkOpen();
return new CachePoolIterator(namenode, traceSampler);
return new CachePoolIterator(namenode, tracer);
}
/**
@ -2413,7 +2410,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
boolean saveNamespace(long timeWindow, long txGap) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("saveNamespace", traceSampler);
TraceScope scope = tracer.newScope("saveNamespace");
try {
return namenode.saveNamespace(timeWindow, txGap);
} catch(RemoteException re) {
@ -2431,7 +2428,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
long rollEdits() throws AccessControlException, IOException {
checkOpen();
TraceScope scope = Trace.startSpan("rollEdits", traceSampler);
TraceScope scope = tracer.newScope("rollEdits");
try {
return namenode.rollEdits();
} catch(RemoteException re) {
@ -2454,7 +2451,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
boolean restoreFailedStorage(String arg)
throws AccessControlException, IOException{
checkOpen();
TraceScope scope = Trace.startSpan("restoreFailedStorage", traceSampler);
TraceScope scope = tracer.newScope("restoreFailedStorage");
try {
return namenode.restoreFailedStorage(arg);
} finally {
@ -2471,7 +2468,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void refreshNodes() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("refreshNodes", traceSampler);
TraceScope scope = tracer.newScope("refreshNodes");
try {
namenode.refreshNodes();
} finally {
@ -2486,7 +2483,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void metaSave(String pathname) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("metaSave", traceSampler);
TraceScope scope = tracer.newScope("metaSave");
try {
namenode.metaSave(pathname);
} finally {
@ -2504,7 +2501,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void setBalancerBandwidth(long bandwidth) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler);
TraceScope scope = tracer.newScope("setBalancerBandwidth");
try {
namenode.setBalancerBandwidth(bandwidth);
} finally {
@ -2517,7 +2514,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void finalizeUpgrade() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler);
TraceScope scope = tracer.newScope("finalizeUpgrade");
try {
namenode.finalizeUpgrade();
} finally {
@ -2527,7 +2524,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler);
TraceScope scope = tracer.newScope("rollingUpgrade");
try {
return namenode.rollingUpgrade(action);
} finally {
@ -2585,7 +2582,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
if(LOG.isDebugEnabled()) {
LOG.debug(src + ": masked=" + absPermission);
}
TraceScope scope = Trace.startSpan("mkdir", traceSampler);
TraceScope scope = tracer.newScope("mkdir");
try {
return namenode.mkdirs(src, absPermission, createParent);
} catch(RemoteException re) {
@ -2613,7 +2610,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
ContentSummary getContentSummary(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getContentSummary", src);
TraceScope scope = newPathTraceScope("getContentSummary", src);
try {
return namenode.getContentSummary(src);
} catch(RemoteException re) {
@ -2642,7 +2639,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
storagespaceQuota);
}
TraceScope scope = getPathTraceScope("setQuota", src);
TraceScope scope = newPathTraceScope("setQuota", src);
try {
// Pass null as storage type for traditional namespace/storagespace quota.
namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
@ -2678,7 +2675,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
throw new IllegalArgumentException("Don't support Quota for storage type : "
+ type.toString());
}
TraceScope scope = getPathTraceScope("setQuotaByStorageType", src);
TraceScope scope = newPathTraceScope("setQuotaByStorageType", src);
try {
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
} catch (RemoteException re) {
@ -2698,7 +2695,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
*/
public void setTimes(String src, long mtime, long atime) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setTimes", src);
TraceScope scope = newPathTraceScope("setTimes", src);
try {
namenode.setTimes(src, mtime, atime);
} catch(RemoteException re) {
@ -2759,7 +2756,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("modifyAclEntries", src);
TraceScope scope = newPathTraceScope("modifyAclEntries", src);
try {
namenode.modifyAclEntries(src, aclSpec);
} catch(RemoteException re) {
@ -2778,7 +2775,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void removeAclEntries(String src, List<AclEntry> aclSpec)
throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("removeAclEntries", traceSampler);
TraceScope scope = tracer.newScope("removeAclEntries");
try {
namenode.removeAclEntries(src, aclSpec);
} catch(RemoteException re) {
@ -2796,7 +2793,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void removeDefaultAcl(String src) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("removeDefaultAcl", traceSampler);
TraceScope scope = tracer.newScope("removeDefaultAcl");
try {
namenode.removeDefaultAcl(src);
} catch(RemoteException re) {
@ -2814,7 +2811,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void removeAcl(String src) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("removeAcl", traceSampler);
TraceScope scope = tracer.newScope("removeAcl");
try {
namenode.removeAcl(src);
} catch(RemoteException re) {
@ -2832,7 +2829,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("setAcl", traceSampler);
TraceScope scope = tracer.newScope("setAcl");
try {
namenode.setAcl(src, aclSpec);
} catch(RemoteException re) {
@ -2850,7 +2847,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public AclStatus getAclStatus(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getAclStatus", src);
TraceScope scope = newPathTraceScope("getAclStatus", src);
try {
return namenode.getAclStatus(src);
} catch(RemoteException re) {
@ -2866,7 +2863,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void createEncryptionZone(String src, String keyName)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("createEncryptionZone", src);
TraceScope scope = newPathTraceScope("createEncryptionZone", src);
try {
namenode.createEncryptionZone(src, keyName);
} catch (RemoteException re) {
@ -2881,7 +2878,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public EncryptionZone getEZForPath(String src)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getEZForPath", src);
TraceScope scope = newPathTraceScope("getEZForPath", src);
try {
return namenode.getEZForPath(src);
} catch (RemoteException re) {
@ -2895,14 +2892,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public RemoteIterator<EncryptionZone> listEncryptionZones()
throws IOException {
checkOpen();
return new EncryptionZoneIterator(namenode, traceSampler);
return new EncryptionZoneIterator(namenode, tracer);
}
public void setErasureCodingPolicy(String src, ErasureCodingPolicy ecPolicy)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setErasureCodingPolicy", src);
TraceScope scope = newPathTraceScope("setErasureCodingPolicy", src);
try {
namenode.setErasureCodingPolicy(src, ecPolicy);
} catch (RemoteException re) {
@ -2917,7 +2914,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void setXAttr(String src, String name, byte[] value,
EnumSet<XAttrSetFlag> flag) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("setXAttr", src);
TraceScope scope = newPathTraceScope("setXAttr", src);
try {
namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
} catch (RemoteException re) {
@ -2934,7 +2931,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public byte[] getXAttr(String src, String name) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getXAttr", src);
TraceScope scope = newPathTraceScope("getXAttr", src);
try {
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
@ -2950,7 +2947,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public Map<String, byte[]> getXAttrs(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getXAttrs", src);
TraceScope scope = newPathTraceScope("getXAttrs", src);
try {
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
} catch(RemoteException re) {
@ -2965,7 +2962,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public Map<String, byte[]> getXAttrs(String src, List<String> names)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getXAttrs", src);
TraceScope scope = newPathTraceScope("getXAttrs", src);
try {
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
src, XAttrHelper.buildXAttrs(names)));
@ -2981,7 +2978,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public List<String> listXAttrs(String src)
throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("listXAttrs", src);
TraceScope scope = newPathTraceScope("listXAttrs", src);
try {
final Map<String, byte[]> xattrs =
XAttrHelper.buildXAttrMap(namenode.listXAttrs(src));
@ -2997,7 +2994,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void removeXAttr(String src, String name) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("removeXAttr", src);
TraceScope scope = newPathTraceScope("removeXAttr", src);
try {
namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
} catch(RemoteException re) {
@ -3014,7 +3011,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public void checkAccess(String src, FsAction mode) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("checkAccess", src);
TraceScope scope = newPathTraceScope("checkAccess", src);
try {
namenode.checkAccess(src, mode);
} catch (RemoteException re) {
@ -3028,7 +3025,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
checkOpen();
TraceScope scope = Trace.startSpan("getErasureCodingPolicies", traceSampler);
TraceScope scope = tracer.newScope("getErasureCodingPolicies");
try {
return namenode.getErasureCodingPolicies();
} finally {
@ -3038,13 +3035,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
checkOpen();
return new DFSInotifyEventInputStream(traceSampler, namenode);
return new DFSInotifyEventInputStream(namenode, tracer);
}
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
throws IOException {
checkOpen();
return new DFSInotifyEventInputStream(traceSampler, namenode, lastReadTxid);
return new DFSInotifyEventInputStream(namenode, tracer,
lastReadTxid);
}
@Override // RemotePeerFactory
@ -3066,7 +3064,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
return peer;
} finally {
if (!success) {
IOUtils.cleanup(LOG, peer);
IOUtilsClient.cleanup(LOG, peer);
IOUtils.closeSocket(sock);
}
}
@ -3179,11 +3177,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/**
* Probe for encryption enabled on this filesystem.
* See {@link DFSUtil#isHDFSEncryptionEnabled(Configuration)}
* See {@link DFSUtilClient#isHDFSEncryptionEnabled(Configuration)}
* @return true if encryption is enabled
*/
public boolean isHDFSEncryptionEnabled() {
return DFSUtil.isHDFSEncryptionEnabled(this.conf);
return DFSUtilClient.isHDFSEncryptionEnabled(this.conf);
}
/**
@ -3195,27 +3193,21 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
return saslClient;
}
TraceScope getPathTraceScope(String description, String path) {
TraceScope scope = Trace.startSpan(description, traceSampler);
Span span = scope.getSpan();
if (span != null) {
if (path != null) {
span.addKVAnnotation("path", path);
}
TraceScope newPathTraceScope(String description, String path) {
TraceScope scope = tracer.newScope(description);
if (path != null) {
scope.addKVAnnotation("path", path);
}
return scope;
}
TraceScope getSrcDstTraceScope(String description, String src, String dst) {
TraceScope scope = Trace.startSpan(description, traceSampler);
Span span = scope.getSpan();
if (span != null) {
if (src != null) {
span.addKVAnnotation("src", src);
}
if (dst != null) {
span.addKVAnnotation("dst", dst);
}
TraceScope newSrcDstTraceScope(String description, String src, String dst) {
TraceScope scope = tracer.newScope(description);
if (src != null) {
scope.addKVAnnotation("src", src);
}
if (dst != null) {
scope.addKVAnnotation("dst", dst);
}
return scope;
}
@ -3231,7 +3223,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
public ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
checkOpen();
TraceScope scope = getPathTraceScope("getErasureCodingPolicy", src);
TraceScope scope = newPathTraceScope("getErasureCodingPolicy", src);
try {
return namenode.getErasureCodingPolicy(src);
} catch (RemoteException re) {
@ -3241,4 +3233,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
scope.close();
}
}
Tracer getTracer() {
return tracer;
}
}

View File

@ -30,12 +30,15 @@ import org.apache.hadoop.classification.InterfaceAudience;
@VisibleForTesting
@InterfaceAudience.Private
public class DFSClientFaultInjector {
public static DFSClientFaultInjector instance = new DFSClientFaultInjector();
private static DFSClientFaultInjector instance = new DFSClientFaultInjector();
public static AtomicLong exceptionNum = new AtomicLong(0);
public static DFSClientFaultInjector get() {
return instance;
}
public static void set(DFSClientFaultInjector instance) {
DFSClientFaultInjector.instance = instance;
}
public boolean corruptPacket() {
return false;

View File

@ -26,9 +26,8 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.inotify.MissingEventsException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -44,13 +43,8 @@ import java.util.concurrent.TimeUnit;
@InterfaceAudience.Public
@InterfaceStability.Unstable
public class DFSInotifyEventInputStream {
public static Logger LOG = LoggerFactory.getLogger(DFSInotifyEventInputStream
.class);
/**
* The trace sampler to use when making RPCs to the NameNode.
*/
private final Sampler<?> traceSampler;
public static final Logger LOG = LoggerFactory.getLogger(
DFSInotifyEventInputStream.class);
private final ClientProtocol namenode;
private Iterator<EventBatch> it;
@ -65,20 +59,22 @@ public class DFSInotifyEventInputStream {
*/
private Random rng = new Random();
private final Tracer tracer;
private static final int INITIAL_WAIT_MS = 10;
DFSInotifyEventInputStream(Sampler<?> traceSampler, ClientProtocol namenode)
DFSInotifyEventInputStream(ClientProtocol namenode, Tracer tracer)
throws IOException {
// Only consider new transaction IDs.
this(traceSampler, namenode, namenode.getCurrentEditLogTxid());
this(namenode, tracer, namenode.getCurrentEditLogTxid());
}
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
long lastReadTxid) throws IOException {
this.traceSampler = traceSampler;
DFSInotifyEventInputStream(ClientProtocol namenode,
Tracer tracer, long lastReadTxid) throws IOException {
this.namenode = namenode;
this.it = Iterators.emptyIterator();
this.lastReadTxid = lastReadTxid;
this.tracer = tracer;
}
/**
@ -98,8 +94,7 @@ public class DFSInotifyEventInputStream {
* The next available batch of events will be returned.
*/
public EventBatch poll() throws IOException, MissingEventsException {
TraceScope scope =
Trace.startSpan("inotifyPoll", traceSampler);
TraceScope scope = tracer.newScope("inotifyPoll");
try {
// need to keep retrying until the NN sends us the latest committed txid
if (lastReadTxid == -1) {
@ -180,7 +175,7 @@ public class DFSInotifyEventInputStream {
*/
public EventBatch poll(long time, TimeUnit tu) throws IOException,
InterruptedException, MissingEventsException {
TraceScope scope = Trace.startSpan("inotifyPollWithTimeout", traceSampler);
TraceScope scope = tracer.newScope("inotifyPollWithTimeout");
EventBatch next = null;
try {
long initialTime = Time.monotonicNow();
@ -217,7 +212,7 @@ public class DFSInotifyEventInputStream {
*/
public EventBatch take() throws IOException, InterruptedException,
MissingEventsException {
TraceScope scope = Trace.startSpan("inotifyTake", traceSampler);
TraceScope scope = tracer.newScope("inotifyTake");
EventBatch next = null;
try {
int nextWaitMin = INITIAL_WAIT_MS;

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.fs.CanUnbuffer;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
import org.apache.hadoop.fs.ReadOption;
import org.apache.hadoop.fs.StorageType;
@ -77,9 +78,9 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.IdentityHashStore;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import com.google.common.annotations.VisibleForTesting;
@ -677,6 +678,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
setClientCacheContext(dfsClient.getClientContext()).
setUserGroupInformation(dfsClient.ugi).
setConfiguration(dfsClient.getConfiguration()).
setTracer(dfsClient.getTracer()).
build();
}
@ -940,7 +942,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
public synchronized int read(final byte buf[], int off, int len) throws IOException {
ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf);
TraceScope scope =
dfsClient.getPathTraceScope("DFSInputStream#byteArrayRead", src);
dfsClient.newPathTraceScope("DFSInputStream#byteArrayRead", src);
try {
return readWithStrategy(byteArrayReader, off, len);
} finally {
@ -952,7 +954,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
public synchronized int read(final ByteBuffer buf) throws IOException {
ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf);
TraceScope scope =
dfsClient.getPathTraceScope("DFSInputStream#byteBufferRead", src);
dfsClient.newPathTraceScope("DFSInputStream#byteBufferRead", src);
try {
return readWithStrategy(byteBufferReader, 0, buf.remaining());
} finally {
@ -1128,14 +1130,14 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
final ByteBuffer bb,
final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
final int hedgedReadId) {
final Span parentSpan = Trace.currentSpan();
final SpanId parentSpanId = Tracer.getCurrentSpanId();
return new Callable<ByteBuffer>() {
@Override
public ByteBuffer call() throws Exception {
byte[] buf = bb.array();
int offset = bb.position();
TraceScope scope =
Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
TraceScope scope = dfsClient.getTracer().
newScope("hedgedRead" + hedgedReadId, parentSpanId);
try {
actualGetFromOneDataNode(datanode, block, start, end, buf,
offset, corruptedBlockMap);
@ -1421,8 +1423,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
@Override
public int read(long position, byte[] buffer, int offset, int length)
throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("DFSInputStream#byteArrayPread", src);
TraceScope scope = dfsClient.
newPathTraceScope("DFSInputStream#byteArrayPread", src);
try {
return pread(position, buffer, offset, length);
} finally {

View File

@ -32,6 +32,7 @@ import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.fs.permission.FsPermission;
@ -63,9 +64,7 @@ import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DataChecksum.Type;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -231,7 +230,7 @@ public class DFSOutputStream extends FSOutputSummer
short replication, long blockSize, Progressable progress, int buffersize,
DataChecksum checksum, String[] favoredNodes) throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("newStreamForCreate", src);
dfsClient.newPathTraceScope("newStreamForCreate", src);
try {
HdfsFileStatus stat = null;
@ -360,7 +359,7 @@ public class DFSOutputStream extends FSOutputSummer
LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
String[] favoredNodes) throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("newStreamForAppend", src);
dfsClient.newPathTraceScope("newStreamForAppend", src);
if(stat.getErasureCodingPolicy() != null) {
throw new IOException("Not support appending to a striping layout file yet.");
}
@ -388,7 +387,7 @@ public class DFSOutputStream extends FSOutputSummer
}
protected TraceScope createWriteTraceScope() {
return dfsClient.getPathTraceScope("DFSOutputStream#write", src);
return dfsClient.newPathTraceScope("DFSOutputStream#write", src);
}
// @see FSOutputSummer#writeChunk()
@ -502,7 +501,7 @@ public class DFSOutputStream extends FSOutputSummer
@Override
public void hflush() throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("hflush", src);
dfsClient.newPathTraceScope("hflush", src);
try {
flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
} finally {
@ -513,7 +512,7 @@ public class DFSOutputStream extends FSOutputSummer
@Override
public void hsync() throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("hsync", src);
dfsClient.newPathTraceScope("hsync", src);
try {
flushOrSync(true, EnumSet.noneOf(SyncFlag.class));
} finally {
@ -536,7 +535,7 @@ public class DFSOutputStream extends FSOutputSummer
*/
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("hsync", src);
dfsClient.newPathTraceScope("hsync", src);
try {
flushOrSync(true, syncFlags);
} finally {
@ -777,7 +776,7 @@ public class DFSOutputStream extends FSOutputSummer
@Override
public synchronized void close() throws IOException {
TraceScope scope =
dfsClient.getPathTraceScope("DFSOutputStream#close", src);
dfsClient.newPathTraceScope("DFSOutputStream#close", src);
try {
closeImpl();
} finally {
@ -806,7 +805,7 @@ public class DFSOutputStream extends FSOutputSummer
// get last block before destroying the streamer
ExtendedBlock lastBlock = getStreamer().getBlock();
closeThreads(false);
TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER);
TraceScope scope = dfsClient.getTracer().newScope("completeFile");
try {
completeFile(lastBlock);
} finally {
@ -914,6 +913,13 @@ public class DFSOutputStream extends FSOutputSummer
return fileId;
}
/**
* Return the source of stream.
*/
String getSrc() {
return src;
}
/**
* Returns the data streamer object.
*/

View File

@ -28,7 +28,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.util.ByteArrayManager;
import org.apache.htrace.Span;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.TraceScope;
/****************************************************************
* DFSPacket is used by DataStreamer and DFSOutputStream.
@ -39,7 +41,7 @@ import org.apache.htrace.Span;
@InterfaceAudience.Private
public class DFSPacket {
public static final long HEART_BEAT_SEQNO = -1L;
private static long[] EMPTY = new long[0];
private static SpanId[] EMPTY = new SpanId[0];
private final long seqno; // sequence number of buffer in block
private final long offsetInBlock; // offset in block
private boolean syncBlock; // this packet forces the current block to disk
@ -66,9 +68,9 @@ public class DFSPacket {
private int checksumPos;
private final int dataStart;
private int dataPos;
private long[] traceParents = EMPTY;
private SpanId[] traceParents = EMPTY;
private int traceParentsUsed;
private Span span;
private TraceScope scope;
/**
* Create a new packet.
@ -307,7 +309,10 @@ public class DFSPacket {
addTraceParent(span.getSpanId());
}
public void addTraceParent(long id) {
public void addTraceParent(SpanId id) {
if (!id.isValid()) {
return;
}
if (traceParentsUsed == traceParents.length) {
int newLength = (traceParents.length == 0) ? 8 :
traceParents.length * 2;
@ -324,18 +329,18 @@ public class DFSPacket {
*
* Protected by the DFSOutputStream dataQueue lock.
*/
public long[] getTraceParents() {
public SpanId[] getTraceParents() {
// Remove duplicates from the array.
int len = traceParentsUsed;
Arrays.sort(traceParents, 0, len);
int i = 0, j = 0;
long prevVal = 0; // 0 is not a valid span id
SpanId prevVal = SpanId.INVALID;
while (true) {
if (i == len) {
break;
}
long val = traceParents[i];
if (val != prevVal) {
SpanId val = traceParents[i];
if (!val.equals(prevVal)) {
traceParents[j] = val;
j++;
prevVal = val;
@ -349,11 +354,11 @@ public class DFSPacket {
return traceParents;
}
public void setTraceSpan(Span span) {
this.span = span;
public void setTraceScope(TraceScope scope) {
this.scope = scope;
}
public Span getTraceSpan() {
return span;
public TraceScope getTraceScope() {
return scope;
}
}

View File

@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.io.ByteBufferPool;
@ -260,7 +259,7 @@ public class DFSStripedInputStream extends DFSInputStream {
private void closeReader(BlockReaderInfo readerInfo) {
if (readerInfo != null) {
IOUtils.cleanup(DFSClient.LOG, readerInfo.reader);
// IOUtils.cleanup(null, readerInfo.reader);
readerInfo.skip();
}
}
@ -483,7 +482,7 @@ public class DFSStripedInputStream extends DFSInputStream {
@Override
protected LocatedBlock refreshLocatedBlock(LocatedBlock block)
throws IOException {
int idx = BlockIdManager.getBlockIndex(block.getBlock().getLocalBlock());
int idx = StripedBlockUtil.getBlockIndex(block.getBlock().getLocalBlock());
LocatedBlock lb = getBlockGroupAt(block.getStartOffset());
// If indexing information is returned, iterate through the index array
// to find the entry for position idx in the group

View File

@ -37,6 +37,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -53,11 +54,9 @@ import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import com.google.common.base.Preconditions;
import org.apache.htrace.core.TraceScope;
/**
@ -87,7 +86,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
try {
return queues.get(i).take();
} catch(InterruptedException ie) {
throw DFSUtil.toInterruptedIOException("take interrupted, i=" + i, ie);
throw DFSUtilClient.toInterruptedIOException("take interrupted, i=" + i, ie);
}
}
@ -95,7 +94,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
try {
return queues.get(i).poll(100, TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
throw DFSUtil.toInterruptedIOException("take interrupted, i=" + i, e);
throw DFSUtilClient.toInterruptedIOException("take interrupted, i=" + i, e);
}
}
@ -187,7 +186,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
CellBuffers(int numParityBlocks) throws InterruptedException{
if (cellSize % bytesPerChecksum != 0) {
throw new HadoopIllegalArgumentException("Invalid values: "
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (="
+ HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (="
+ bytesPerChecksum + ") must divide cell size (=" + cellSize + ").");
}
@ -280,7 +279,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
try {
cellBuffers = new CellBuffers(numParityBlocks);
} catch (InterruptedException ie) {
throw DFSUtil.toInterruptedIOException(
throw DFSUtilClient.toInterruptedIOException(
"Failed to create cell buffers", ie);
}
@ -621,7 +620,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
coordinator.wait(waitInterval);
remaingTime -= Time.monotonicNow() - start;
} catch (InterruptedException e) {
throw DFSUtil.toInterruptedIOException("Interrupted when waiting" +
throw DFSUtilClient.toInterruptedIOException("Interrupted when waiting" +
" for results of updating striped streamers", e);
}
}
@ -893,7 +892,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
}
closeThreads(false);
TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER);
TraceScope scope = dfsClient.getTracer().newScope("completeFile");
try {
completeFile(currentBlockGroup);
} finally {
@ -942,7 +941,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
try {
Thread.sleep(ms);
} catch(InterruptedException ie) {
throw DFSUtil.toInterruptedIOException(
throw DFSUtilClient.toInterruptedIOException(
"Sleep interrupted during " + op, ie);
}
}

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
@ -53,6 +54,7 @@ import org.slf4j.LoggerFactory;
import javax.net.SocketFactory;
import java.io.IOException;
import java.io.InterruptedIOException;
import java.io.UnsupportedEncodingException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
@ -590,6 +592,29 @@ public class DFSUtilClient {
}
}
public static int getIoFileBufferSize(Configuration conf) {
return conf.getInt(
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
}
public static int getSmallBufferSize(Configuration conf) {
return Math.min(getIoFileBufferSize(conf) / 2, 512);
}
/**
* Probe for HDFS Encryption being enabled; this uses the value of
* the option {@link HdfsClientConfigKeys#DFS_ENCRYPTION_KEY_PROVIDER_URI},
* returning true if that property contains a non-empty, non-whitespace
* string.
* @param conf configuration to probe
* @return true if encryption is considered enabled.
*/
public static boolean isHDFSEncryptionEnabled(Configuration conf) {
return !conf.getTrimmed(
HdfsClientConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "").isEmpty();
}
public static InetSocketAddress getNNAddress(String address) {
return NetUtils.createSocketAddr(address,
HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
@ -628,4 +653,11 @@ public class DFSUtilClient {
return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ namenode.getHostName() + portString);
}
public static InterruptedIOException toInterruptedIOException(String message,
InterruptedException e) {
final InterruptedIOException iioe = new InterruptedIOException(message);
iioe.initCause(e);
return iioe;
}
}

View File

@ -39,9 +39,8 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@ -73,12 +72,11 @@ import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.Time;
import org.apache.htrace.NullScope;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceInfo;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Sampler;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
@ -86,6 +84,9 @@ import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*********************************************************************
*
* The DataStreamer class is responsible for sending data packets to the
@ -109,7 +110,7 @@ import com.google.common.cache.RemovalNotification;
@InterfaceAudience.Private
class DataStreamer extends Daemon {
static final Log LOG = LogFactory.getLog(DataStreamer.class);
static final Logger LOG = LoggerFactory.getLogger(DataStreamer.class);
/**
* Create a socket for a write pipeline
@ -528,7 +529,7 @@ class DataStreamer extends Daemon {
@Override
public void run() {
long lastPacket = Time.monotonicNow();
TraceScope scope = NullScope.INSTANCE;
TraceScope scope = null;
while (!streamerClosed && dfsClient.clientRunning) {
// if the Responder encountered an error, shutdown Responder
if (errorState.hasError() && response != null) {
@ -579,12 +580,11 @@ class DataStreamer extends Daemon {
LOG.warn("Caught exception", e);
}
one = dataQueue.getFirst(); // regular data packet
long parents[] = one.getTraceParents();
SpanId[] parents = one.getTraceParents();
if (parents.length > 0) {
scope = Trace.startSpan("dataStreamer", new TraceInfo(0, parents[0]));
// TODO: use setParents API once it's available from HTrace 3.2
// scope = Trace.startSpan("dataStreamer", Sampler.ALWAYS);
// scope.getSpan().setParents(parents);
scope = dfsClient.getTracer().
newScope("dataStreamer", parents[0]);
scope.getSpan().setParents(parents);
}
}
}
@ -629,12 +629,16 @@ class DataStreamer extends Daemon {
}
// send the packet
Span span = null;
SpanId spanId = SpanId.INVALID;
synchronized (dataQueue) {
// move packet from dataQueue to ackQueue
if (!one.isHeartbeatPacket()) {
span = scope.detach();
one.setTraceSpan(span);
if (scope != null) {
spanId = scope.getSpanId();
scope.detach();
one.setTraceScope(scope);
}
scope = null;
dataQueue.removeFirst();
ackQueue.addLast(one);
dataQueue.notifyAll();
@ -646,7 +650,8 @@ class DataStreamer extends Daemon {
}
// write out data to remote datanode
TraceScope writeScope = Trace.startSpan("writeTo", span);
TraceScope writeScope = dfsClient.getTracer().
newScope("DataStreamer#writeTo", spanId);
try {
one.writeTo(blockStream);
blockStream.flush();
@ -713,7 +718,10 @@ class DataStreamer extends Daemon {
streamerClosed = true;
}
} finally {
scope.close();
if (scope != null) {
scope.close();
scope = null;
}
}
}
closeInternal();
@ -747,7 +755,8 @@ class DataStreamer extends Daemon {
* @throws IOException
*/
void waitForAckedSeqno(long seqno) throws IOException {
TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
TraceScope scope = dfsClient.getTracer().
newScope("waitForAckedSeqno");
try {
if (LOG.isDebugEnabled()) {
LOG.debug("Waiting for ack for: " + seqno);
@ -797,7 +806,7 @@ class DataStreamer extends Daemon {
while (!streamerClosed && dataQueue.size() + ackQueue.size() >
dfsClient.getConf().getWriteMaxPackets()) {
if (firstWait) {
Span span = Trace.currentSpan();
Span span = Tracer.getCurrentSpan();
if (span != null) {
span.addTimelineAnnotation("dataQueue.wait");
}
@ -818,7 +827,7 @@ class DataStreamer extends Daemon {
}
}
} finally {
Span span = Trace.currentSpan();
Span span = Tracer.getCurrentSpan();
if ((span != null) && (!firstWait)) {
span.addTimelineAnnotation("end.wait");
}
@ -953,7 +962,7 @@ class DataStreamer extends Daemon {
setName("ResponseProcessor for block " + block);
PipelineAck ack = new PipelineAck();
TraceScope scope = NullScope.INSTANCE;
TraceScope scope = null;
while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
// process responses from datanodes.
try {
@ -1040,8 +1049,11 @@ class DataStreamer extends Daemon {
block.setNumBytes(one.getLastByteOffsetBlock());
synchronized (dataQueue) {
scope = Trace.continueSpan(one.getTraceSpan());
one.setTraceSpan(null);
scope = one.getTraceScope();
if (scope != null) {
scope.reattach();
one.setTraceScope(null);
}
lastAckedSeqno = seqno;
ackQueue.removeFirst();
dataQueue.notifyAll();
@ -1062,7 +1074,10 @@ class DataStreamer extends Daemon {
responderClosed = true;
}
} finally {
if (scope != null) {
scope.close();
}
scope = null;
}
}
}
@ -1133,11 +1148,12 @@ class DataStreamer extends Daemon {
// a client waiting on close() will be aware that the flush finished.
synchronized (dataQueue) {
DFSPacket endOfBlockPacket = dataQueue.remove(); // remove the end of block packet
Span span = endOfBlockPacket.getTraceSpan();
if (span != null) {
// Close any trace span associated with this Packet
TraceScope scope = Trace.continueSpan(span);
// Close any trace span associated with this Packet
TraceScope scope = endOfBlockPacket.getTraceScope();
if (scope != null) {
scope.reattach();
scope.close();
endOfBlockPacket.setTraceScope(null);
}
assert endOfBlockPacket.isLastPacketInBlock();
assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
@ -1217,22 +1233,46 @@ class DataStreamer extends Daemon {
return;
}
//get a new datanode
int tried = 0;
final DatanodeInfo[] original = nodes;
final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode(
src, stat.getFileId(), block, nodes, storageIDs,
failed.toArray(new DatanodeInfo[failed.size()]),
1, dfsClient.clientName);
setPipeline(lb);
final StorageType[] originalTypes = storageTypes;
final String[] originalIDs = storageIDs;
IOException caughtException = null;
ArrayList<DatanodeInfo> exclude = new ArrayList<DatanodeInfo>(failed);
while (tried < 3) {
LocatedBlock lb;
//get a new datanode
lb = dfsClient.namenode.getAdditionalDatanode(
src, stat.getFileId(), block, nodes, storageIDs,
exclude.toArray(new DatanodeInfo[exclude.size()]),
1, dfsClient.clientName);
// a new node was allocated by the namenode. Update nodes.
setPipeline(lb);
//find the new datanode
final int d = findNewDatanode(original);
//find the new datanode
final int d = findNewDatanode(original);
//transfer replica. pick a source from the original nodes
final DatanodeInfo src = original[tried % original.length];
final DatanodeInfo[] targets = {nodes[d]};
final StorageType[] targetStorageTypes = {storageTypes[d]};
//transfer replica
final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
final DatanodeInfo[] targets = {nodes[d]};
final StorageType[] targetStorageTypes = {storageTypes[d]};
transfer(src, targets, targetStorageTypes, lb.getBlockToken());
try {
transfer(src, targets, targetStorageTypes, lb.getBlockToken());
} catch (IOException ioe) {
DFSClient.LOG.warn("Error transferring data from " + src + " to " +
nodes[d] + ": " + ioe.getMessage());
caughtException = ioe;
// add the allocated node to the exclude list.
exclude.add(nodes[d]);
setPipeline(original, originalTypes, originalIDs);
tried++;
continue;
}
return; // finished successfully
}
// All retries failed
throw (caughtException != null) ? caughtException :
new IOException("Failed to add a node");
}
private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
@ -1245,7 +1285,11 @@ class DataStreamer extends Daemon {
try {
sock = createSocketForPipeline(src, 2, dfsClient);
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
final long readTimeout = dfsClient.getDatanodeReadTimeout(2);
// transfer timeout multiplier based on the transfer size
// One per 200 packets = 12.8MB. Minimum is 2.
int multi = 2 + (int)(bytesSent/dfsClient.getConf().getWritePacketSize())/200;
final long readTimeout = dfsClient.getDatanodeReadTimeout(multi);
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
InputStream unbufIn = NetUtils.getInputStream(sock, readTimeout);
@ -1254,7 +1298,7 @@ class DataStreamer extends Daemon {
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
DFSUtil.getSmallBufferSize(dfsClient.getConfiguration())));
DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
in = new DataInputStream(unbufIn);
//send the TRANSFER_BLOCK request
@ -1528,7 +1572,7 @@ class DataStreamer extends Daemon {
unbufOut = saslStreams.out;
unbufIn = saslStreams.in;
out = new DataOutputStream(new BufferedOutputStream(unbufOut,
DFSUtil.getSmallBufferSize(dfsClient.getConfiguration())));
DFSUtilClient.getSmallBufferSize(dfsClient.getConfiguration())));
blockReplyStream = new DataInputStream(unbufIn);
//
@ -1738,7 +1782,7 @@ class DataStreamer extends Daemon {
void queuePacket(DFSPacket packet) {
synchronized (dataQueue) {
if (packet == null) return;
packet.addTraceParent(Trace.currentSpan());
packet.addTraceParent(Tracer.getCurrentSpanId());
dataQueue.addLast(packet);
lastQueuedSeqno = packet.getSeqno();
if (LOG.isDebugEnabled()) {

View File

@ -62,7 +62,6 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
@ -110,13 +109,13 @@ public class DistributedFileSystem extends FileSystem {
private Path workingDir;
private URI uri;
private String homeDirPrefix =
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
DFSClient dfs;
private boolean verifyChecksum = true;
static{
HdfsConfiguration.init();
HdfsConfigurationLoader.init();
}
public DistributedFileSystem() {
@ -146,9 +145,9 @@ public class DistributedFileSystem extends FileSystem {
throw new IOException("Incomplete HDFS URI, no host: "+ uri);
}
homeDirPrefix = conf.get(
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
this.dfs = new DFSClient(uri, conf, statistics);
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
this.workingDir = getHomeDirectory();
@ -172,7 +171,7 @@ public class DistributedFileSystem extends FileSystem {
@Override
public void setWorkingDirectory(Path dir) {
String result = fixRelativePart(dir).toUri().getPath();
if (!DFSUtil.isValidName(result)) {
if (!DFSUtilClient.isValidName(result)) {
throw new IllegalArgumentException("Invalid DFS directory name " +
result);
}
@ -196,7 +195,7 @@ public class DistributedFileSystem extends FileSystem {
private String getPathName(Path file) {
checkPath(file);
String result = file.toUri().getPath();
if (!DFSUtil.isValidName(result)) {
if (!DFSUtilClient.isValidName(result)) {
throw new IllegalArgumentException("Pathname " + result + " from " +
file+" is not a valid DFS filename.");
}
@ -219,8 +218,7 @@ public class DistributedFileSystem extends FileSystem {
final Path absF = fixRelativePart(p);
return new FileSystemLinkResolver<BlockLocation[]>() {
@Override
public BlockLocation[] doCall(final Path p)
throws IOException, UnresolvedLinkException {
public BlockLocation[] doCall(final Path p) throws IOException {
return dfs.getBlockLocations(getPathName(p), start, len);
}
@Override
@ -449,7 +447,6 @@ public class DistributedFileSystem extends FileSystem {
* Same as create(), except fails if parent directory doesn't already exist.
*/
@Override
@SuppressWarnings("deprecation")
public FSDataOutputStream createNonRecursive(final Path f,
final FsPermission permission, final EnumSet<CreateFlag> flag,
final int bufferSize, final short replication, final long blockSize,

View File

@ -46,6 +46,9 @@ public final class ExternalBlockReader implements BlockReader {
@Override
public int read(byte[] buf, int off, int len) throws IOException {
int nread = accessor.read(pos, buf, off, len);
if (nread < 0) {
return nread;
}
pos += nread;
return nread;
}
@ -53,6 +56,9 @@ public final class ExternalBlockReader implements BlockReader {
@Override
public int read(ByteBuffer buf) throws IOException {
int nread = accessor.read(pos, buf);
if (nread < 0) {
return nread;
}
pos += nread;
return nread;
}
@ -63,7 +69,8 @@ public final class ExternalBlockReader implements BlockReader {
if (n <= 0) {
return 0;
}
// You can't skip past the end of the replica.
// You can't skip past the last offset that we want to read with this
// block reader.
long oldPos = pos;
pos += n;
if (pos > visibleLength) {
@ -74,12 +81,11 @@ public final class ExternalBlockReader implements BlockReader {
@Override
public int available() throws IOException {
// We return the amount of bytes that we haven't read yet from the
// replica, based on our current position. Some of the other block
// readers return a shorter length than that. The only advantage to
// returning a shorter length is that the DFSInputStream will
// trash your block reader and create a new one if someone tries to
// seek() beyond the available() region.
// We return the amount of bytes between the current offset and the visible
// length. Some of the other block readers return a shorter length than
// that. The only advantage to returning a shorter length is that the
// DFSInputStream will trash your block reader and create a new one if
// someone tries to seek() beyond the available() region.
long diff = visibleLength - pos;
if (diff > Integer.MAX_VALUE) {
return Integer.MAX_VALUE;

View File

@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
/**
* Load default HDFS configuration resources.
*/
@InterfaceAudience.Private
class HdfsConfigurationLoader {
static {
// adds the default resources
Configuration.addDefaultResource("hdfs-default.xml");
Configuration.addDefaultResource("hdfs-site.xml");
}
/**
* This method is here so that when invoked, default resources are added if
* they haven't already been previously loaded. Upon loading this class, the
* static initializer block above will be executed to add the default
* resources. It is safe for this method to be called multiple times
* as the static initializer block will only get invoked once.
*/
public static void init() {
}
}

View File

@ -0,0 +1,366 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Proxy;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB;
import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
import org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
import org.apache.hadoop.io.retry.RetryPolicies;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.io.retry.RetryUtils;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
/**
* Create proxy objects with {@link ClientProtocol} to communicate with a remote
* NN. Generally use {@link NameNodeProxiesClient#createProxyWithClientProtocol(
* Configuration, URI, AtomicBoolean)}, which will create either an HA- or
* non-HA-enabled client proxy as appropriate.
*
* For creating proxy objects with other protocols, please see
* {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
@InterfaceAudience.Private
public class NameNodeProxiesClient {
private static final Logger LOG = LoggerFactory.getLogger(
NameNodeProxiesClient.class);
/**
* Wrapper for a client proxy as well as its associated service ID.
* This is simply used as a tuple-like return type for created NN proxy.
*/
public static class ProxyAndInfo<PROXYTYPE> {
private final PROXYTYPE proxy;
private final Text dtService;
private final InetSocketAddress address;
public ProxyAndInfo(PROXYTYPE proxy, Text dtService,
InetSocketAddress address) {
this.proxy = proxy;
this.dtService = dtService;
this.address = address;
}
public PROXYTYPE getProxy() {
return proxy;
}
public Text getDelegationTokenService() {
return dtService;
}
public InetSocketAddress getAddress() {
return address;
}
}
/**
* Creates the namenode proxy with the ClientProtocol. This will handle
* creation of either HA- or non-HA-enabled proxy objects, depending upon
* if the provided URI is a configured logical URI.
*
* @param conf the configuration containing the required IPC
* properties, client failover configurations, etc.
* @param nameNodeUri the URI pointing either to a specific NameNode
* or to a logical nameservice.
* @param fallbackToSimpleAuth set to true or false during calls to indicate
* if a secure client falls back to simple auth
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
* @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
public static ProxyAndInfo<ClientProtocol> createProxyWithClientProtocol(
Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth)
throws IOException {
AbstractNNFailoverProxyProvider<ClientProtocol> failoverProxyProvider =
createFailoverProxyProvider(conf, nameNodeUri, ClientProtocol.class,
true, fallbackToSimpleAuth);
if (failoverProxyProvider == null) {
InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
Text dtService = SecurityUtil.buildTokenService(nnAddr);
ClientProtocol proxy = createNonHAProxyWithClientProtocol(nnAddr, conf,
UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
return new ProxyAndInfo<>(proxy, dtService, nnAddr);
} else {
return createHAProxy(conf, nameNodeUri, ClientProtocol.class,
failoverProxyProvider);
}
}
/**
* Generate a dummy namenode proxy instance that utilizes our hacked
* {@link LossyRetryInvocationHandler}. Proxy instance generated using this
* method will proactively drop RPC responses. Currently this method only
* support HA setup. null will be returned if the given configuration is not
* for HA.
*
* @param config the configuration containing the required IPC
* properties, client failover configurations, etc.
* @param nameNodeUri the URI pointing either to a specific NameNode
* or to a logical nameservice.
* @param xface the IPC interface which should be created
* @param numResponseToDrop The number of responses to drop for each RPC call
* @param fallbackToSimpleAuth set to true or false during calls to indicate
* if a secure client falls back to simple auth
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to. Will return null of the
* given configuration does not support HA.
* @throws IOException if there is an error creating the proxy
*/
public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler(
Configuration config, URI nameNodeUri, Class<T> xface,
int numResponseToDrop, AtomicBoolean fallbackToSimpleAuth)
throws IOException {
Preconditions.checkArgument(numResponseToDrop > 0);
AbstractNNFailoverProxyProvider<T> failoverProxyProvider =
createFailoverProxyProvider(config, nameNodeUri, xface, true,
fallbackToSimpleAuth);
if (failoverProxyProvider != null) { // HA case
int delay = config.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
int maxCap = config.getInt(
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
int maxFailoverAttempts = config.getInt(
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
int maxRetryAttempts = config.getInt(
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
InvocationHandler dummyHandler = new LossyRetryInvocationHandler<>(
numResponseToDrop, failoverProxyProvider,
RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts,
Math.max(numResponseToDrop + 1, maxRetryAttempts), delay,
maxCap));
@SuppressWarnings("unchecked")
T proxy = (T) Proxy.newProxyInstance(
failoverProxyProvider.getInterface().getClassLoader(),
new Class[]{xface}, dummyHandler);
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(
DFSUtilClient.getNNAddress(nameNodeUri));
}
return new ProxyAndInfo<>(proxy, dtService,
DFSUtilClient.getNNAddress(nameNodeUri));
} else {
LOG.warn("Currently creating proxy using " +
"LossyRetryInvocationHandler requires NN HA setup");
return null;
}
}
/** Creates the Failover proxy provider instance*/
@VisibleForTesting
public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider(
Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort,
AtomicBoolean fallbackToSimpleAuth) throws IOException {
Class<FailoverProxyProvider<T>> failoverProxyProviderClass = null;
AbstractNNFailoverProxyProvider<T> providerNN;
try {
// Obtain the class of the proxy provider
failoverProxyProviderClass = getFailoverProxyProviderClass(conf,
nameNodeUri);
if (failoverProxyProviderClass == null) {
return null;
}
// Create a proxy provider instance.
Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass
.getConstructor(Configuration.class, URI.class, Class.class);
FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri,
xface);
// If the proxy provider is of an old implementation, wrap it.
if (!(provider instanceof AbstractNNFailoverProxyProvider)) {
providerNN = new WrappedFailoverProxyProvider<>(provider);
} else {
providerNN = (AbstractNNFailoverProxyProvider<T>)provider;
}
} catch (Exception e) {
final String message = "Couldn't create proxy provider " +
failoverProxyProviderClass;
LOG.debug(message, e);
if (e.getCause() instanceof IOException) {
throw (IOException) e.getCause();
} else {
throw new IOException(message, e);
}
}
// Check the port in the URI, if it is logical.
if (checkPort && providerNN.useLogicalURI()) {
int port = nameNodeUri.getPort();
if (port > 0 &&
port != HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
// Throwing here without any cleanup is fine since we have not
// actually created the underlying proxies yet.
throw new IOException("Port " + port + " specified in URI "
+ nameNodeUri + " but host '" + nameNodeUri.getHost()
+ "' is a logical (HA) namenode"
+ " and does not use port information.");
}
}
providerNN.setFallbackToSimpleAuth(fallbackToSimpleAuth);
return providerNN;
}
/** Gets the configured Failover proxy provider's class */
@VisibleForTesting
public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass(
Configuration conf, URI nameNodeUri) throws IOException {
if (nameNodeUri == null) {
return null;
}
String host = nameNodeUri.getHost();
String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX
+ "." + host;
try {
@SuppressWarnings("unchecked")
Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>)
conf.getClass(configKey, null, FailoverProxyProvider.class);
return ret;
} catch (RuntimeException e) {
if (e.getCause() instanceof ClassNotFoundException) {
throw new IOException("Could not load failover proxy provider class "
+ conf.get(configKey) + " which is configured for authority "
+ nameNodeUri, e);
} else {
throw e;
}
}
}
/**
* Creates an explicitly HA-enabled proxy object.
*
* @param conf the configuration object
* @param nameNodeUri the URI pointing either to a specific NameNode or to a
* logical nameservice.
* @param xface the IPC interface which should be created
* @param failoverProxyProvider Failover proxy provider
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException
*/
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createHAProxy(
Configuration conf, URI nameNodeUri, Class<T> xface,
AbstractNNFailoverProxyProvider<T> failoverProxyProvider)
throws IOException {
Preconditions.checkNotNull(failoverProxyProvider);
// HA case
DfsClientConf config = new DfsClientConf(conf);
T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
RetryPolicies.failoverOnNetworkException(
RetryPolicies.TRY_ONCE_THEN_FAIL, config.getMaxFailoverAttempts(),
config.getMaxRetryAttempts(), config.getFailoverSleepBaseMillis(),
config.getFailoverSleepMaxMillis()));
Text dtService;
if (failoverProxyProvider.useLogicalURI()) {
dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri,
HdfsConstants.HDFS_URI_SCHEME);
} else {
dtService = SecurityUtil.buildTokenService(
DFSUtilClient.getNNAddress(nameNodeUri));
}
return new ProxyAndInfo<>(proxy, dtService,
DFSUtilClient.getNNAddress(nameNodeUri));
}
public static ClientProtocol createNonHAProxyWithClientProtocol(
InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
throws IOException {
RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
ProtobufRpcEngine.class);
final RetryPolicy defaultPolicy =
RetryUtils.getDefaultRetryPolicy(
conf,
HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY,
HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
SafeModeException.class.getName());
final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
ClientNamenodeProtocolPB.class, version, address, ugi, conf,
NetUtils.getDefaultSocketFactory(conf),
org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
fallbackToSimpleAuth).getProxy();
if (withRetries) { // create the proxy with retries
Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
ClientProtocol translatorProxy =
new ClientNamenodeProtocolTranslatorPB(proxy);
return (ClientProtocol) RetryProxy.create(
ClientProtocol.class,
new DefaultFailoverProxyProvider<>(ClientProtocol.class,
translatorProxy),
methodNameToPolicyMap,
defaultPolicy);
} else {
return new ClientNamenodeProtocolTranslatorPB(proxy);
}
}
}

View File

@ -47,9 +47,8 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -106,6 +105,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
int dataLeft = 0;
private final PeerCache peerCache;
private final Tracer tracer;
/* FSInputChecker interface */
@ -210,9 +211,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
protected synchronized int readChunk(long pos, byte[] buf, int offset,
int len, byte[] checksumBuf)
throws IOException {
TraceScope scope =
Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
Sampler.NEVER);
TraceScope scope = tracer.
newScope("RemoteBlockReader#readChunk(" + blockId + ")");
try {
return readChunkImpl(pos, buf, offset, len, checksumBuf);
} finally {
@ -346,7 +346,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
private RemoteBlockReader(String file, String bpid, long blockId,
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache) {
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
// Path is used only for printing block and file information in debug
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
":" + bpid + ":of:"+ file)/*too non path-like?*/,
@ -378,6 +378,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
bytesPerChecksum = this.checksum.getBytesPerChecksum();
checksumSize = this.checksum.getChecksumSize();
this.peerCache = peerCache;
this.tracer = tracer;
}
/**
@ -402,7 +403,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
String clientName, Peer peer,
DatanodeID datanodeID,
PeerCache peerCache,
CachingStrategy cachingStrategy)
CachingStrategy cachingStrategy,
Tracer tracer)
throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out =
@ -438,7 +440,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
peer, datanodeID, peerCache);
peer, datanodeID, peerCache, tracer);
}
@Override

View File

@ -48,12 +48,11 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import com.google.common.annotations.VisibleForTesting;
import org.apache.htrace.core.Tracer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -126,6 +125,8 @@ public class RemoteBlockReader2 implements BlockReader {
private boolean sentStatusCode = false;
private final Tracer tracer;
@VisibleForTesting
public Peer getPeer() {
return peer;
@ -144,8 +145,8 @@ public class RemoteBlockReader2 implements BlockReader {
}
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
TraceScope scope = Trace.startSpan(
"RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
TraceScope scope = tracer.newScope(
"RemoteBlockReader2#readNextPacket(" + blockId + ")");
try {
readNextPacket();
} finally {
@ -172,8 +173,8 @@ public class RemoteBlockReader2 implements BlockReader {
@Override
public synchronized int read(ByteBuffer buf) throws IOException {
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
TraceScope scope = Trace.startSpan(
"RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
TraceScope scope = tracer.newScope(
"RemoteBlockReader2#readNextPacket(" + blockId + ")");
try {
readNextPacket();
} finally {
@ -292,7 +293,7 @@ public class RemoteBlockReader2 implements BlockReader {
protected RemoteBlockReader2(String file, String bpid, long blockId,
DataChecksum checksum, boolean verifyChecksum,
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
DatanodeID datanodeID, PeerCache peerCache) {
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
createSocketAddr(datanodeID.getXferAddr()));
// Path is used only for printing block and file information in debug
@ -313,6 +314,7 @@ public class RemoteBlockReader2 implements BlockReader {
this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
bytesPerChecksum = this.checksum.getBytesPerChecksum();
checksumSize = this.checksum.getChecksumSize();
this.tracer = tracer;
}
@ -407,7 +409,8 @@ public class RemoteBlockReader2 implements BlockReader {
String clientName,
Peer peer, DatanodeID datanodeID,
PeerCache peerCache,
CachingStrategy cachingStrategy) throws IOException {
CachingStrategy cachingStrategy,
Tracer tracer) throws IOException {
// in and out will be closed when sock is closed (by the caller)
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
peer.getOutputStream()));
@ -440,7 +443,7 @@ public class RemoteBlockReader2 implements BlockReader {
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
datanodeID, peerCache);
datanodeID, peerCache, tracer);
}
static void checkSuccess(

View File

@ -40,8 +40,9 @@ public abstract class ReplicaAccessor {
*
* @return The number of bytes read. If the read extends past the end
* of the replica, a short read count will be returned. We
* will never return a negative number. We will never
* return a short read count unless EOF is reached.
* will should return -1 if EOF is reached and no bytes
* can be returned. We will never return a short read
* count unless EOF is reached.
*/
public abstract int read(long pos, byte[] buf, int off, int len)
throws IOException;
@ -58,8 +59,9 @@ public abstract class ReplicaAccessor {
*
* @return The number of bytes read. If the read extends past the end
* of the replica, a short read count will be returned. We
* will never return a negative number. We will never return
* a short read count unless EOF is reached.
* should return -1 if EOF is reached and no bytes can be
* returned. We will never return a short read count unless
* EOF is reached.
*/
public abstract int read(long pos, ByteBuffer buf) throws IOException;

View File

@ -36,6 +36,9 @@ public abstract class ReplicaAccessorBuilder {
public abstract ReplicaAccessorBuilder
setBlock(long blockId, String blockPoolId);
/** Set the genstamp of the block which is being opened. */
public abstract ReplicaAccessorBuilder setGenerationStamp(long genstamp);
/**
* Set whether checksums must be verified. Checksums should be skipped if
* the user has disabled checksum verification in the configuration. Users

View File

@ -143,6 +143,17 @@ public interface HdfsClientConfigKeys {
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
PREFIX + "replica.accessor.builder.classes";
// The number of NN response dropped by client proactively in each RPC call.
// For testing NN retry cache, we can set this property with positive value.
String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY =
"dfs.client.test.drop.namenode.response.number";
int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
// HDFS client HTrace configuration.
String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
/** dfs.client.retry configuration properties */
interface Retry {
String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";

View File

@ -27,18 +27,18 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import com.google.common.annotations.VisibleForTesting;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
@ -73,7 +73,7 @@ import com.google.common.annotations.VisibleForTesting;
*/
@InterfaceAudience.Private
public class LeaseRenewer {
static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
static final Logger LOG = LoggerFactory.getLogger(LeaseRenewer.class);
static final long LEASE_RENEWER_GRACE_DEFAULT = 60*1000L;
static final long LEASE_RENEWER_SLEEP_DEFAULT = 1000L;
@ -165,7 +165,7 @@ public class LeaseRenewer {
/** The time in milliseconds that the map became empty. */
private long emptyTime = Long.MAX_VALUE;
/** A fixed lease renewal time period in milliseconds */
private long renewal = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD/2;
private long renewal = HdfsConstants.LEASE_SOFTLIMIT_PERIOD / 2;
/** A daemon for renewing lease */
private Daemon daemon = null;
@ -378,7 +378,7 @@ public class LeaseRenewer {
//update renewal time
if (renewal == dfsc.getConf().getHdfsTimeout()/2) {
long min = HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD;
long min = HdfsConstants.LEASE_SOFTLIMIT_PERIOD;
for(DFSClient c : dfsclients) {
final int timeout = c.getConf().getHdfsTimeout();
if (timeout > 0 && timeout < min) {

View File

@ -25,11 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import com.google.common.base.Preconditions;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
/**
* CacheDirectiveIterator is a remote iterator that iterates cache directives.
@ -42,14 +41,14 @@ public class CacheDirectiveIterator
private CacheDirectiveInfo filter;
private final ClientProtocol namenode;
private final Sampler<?> traceSampler;
private final Tracer tracer;
public CacheDirectiveIterator(ClientProtocol namenode,
CacheDirectiveInfo filter, Sampler<?> traceSampler) {
CacheDirectiveInfo filter, Tracer tracer) {
super(0L);
this.namenode = namenode;
this.filter = filter;
this.traceSampler = traceSampler;
this.tracer = tracer;
}
private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) {
@ -94,7 +93,7 @@ public class CacheDirectiveIterator
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
throws IOException {
BatchedEntries<CacheDirectiveEntry> entries = null;
TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler);
TraceScope scope = tracer.newScope("listCacheDirectives");
try {
entries = namenode.listCacheDirectives(prevKey, filter);
} catch (IOException e) {

View File

@ -23,9 +23,8 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
/**
* CachePoolIterator is a remote iterator that iterates cache pools.
@ -37,18 +36,18 @@ public class CachePoolIterator
extends BatchedRemoteIterator<String, CachePoolEntry> {
private final ClientProtocol namenode;
private final Sampler traceSampler;
private final Tracer tracer;
public CachePoolIterator(ClientProtocol namenode, Sampler traceSampler) {
public CachePoolIterator(ClientProtocol namenode, Tracer tracer) {
super("");
this.namenode = namenode;
this.traceSampler = traceSampler;
this.tracer = tracer;
}
@Override
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
throws IOException {
TraceScope scope = Trace.startSpan("listCachePools", traceSampler);
TraceScope scope = tracer.newScope("listCachePools");
try {
return namenode.listCachePools(prevKey);
} finally {

View File

@ -23,9 +23,8 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
/**
* EncryptionZoneIterator is a remote iterator that iterates over encryption
@ -37,19 +36,18 @@ public class EncryptionZoneIterator
extends BatchedRemoteIterator<Long, EncryptionZone> {
private final ClientProtocol namenode;
private final Sampler<?> traceSampler;
private final Tracer tracer;
public EncryptionZoneIterator(ClientProtocol namenode,
Sampler<?> traceSampler) {
public EncryptionZoneIterator(ClientProtocol namenode, Tracer tracer) {
super(Long.valueOf(0));
this.namenode = namenode;
this.traceSampler = traceSampler;
this.tracer = tracer;
}
@Override
public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
throws IOException {
TraceScope scope = Trace.startSpan("listEncryptionZones", traceSampler);
TraceScope scope = tracer.newScope("listEncryptionZones");
try {
return namenode.listEncryptionZones(prevId);
} finally {

View File

@ -93,6 +93,29 @@ public final class HdfsConstants {
//for write pipeline
public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by this soft limit and a
* {@link HdfsConstants#LEASE_HARDLIMIT_PERIOD hard limit}. Until the
* soft limit expires, the writer has sole write access to the file. If the
* soft limit expires and the client fails to close the file or renew the
* lease, another client can preempt the lease.
*/
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
/**
* For a HDFS client to write to a file, a lease is granted; During the lease
* period, no other client can write to the file. The writing client can
* periodically renew the lease. When the file is closed, the lease is
* revoked. The lease duration is bound by a
* {@link HdfsConstants#LEASE_SOFTLIMIT_PERIOD soft limit} and this hard
* limit. If after the hard limit expires and the client has failed to renew
* the lease, HDFS assumes that the client has quit and will automatically
* close the file on behalf of the writer, and recover the lease.
*/
public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
// SafeMode actions
public enum SafeModeAction {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET

View File

@ -35,10 +35,8 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceInfo;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.Tracer;
/**
* Static utilities for dealing with the protocol buffers used by the
@ -89,39 +87,21 @@ public abstract class DataTransferProtoUtil {
BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder()
.setBlock(PBHelperClient.convert(blk))
.setToken(PBHelperClient.convert(blockToken));
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
SpanId spanId = Tracer.getCurrentSpanId();
if (spanId.isValid()) {
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
.setTraceId(s.getTraceId())
.setParentId(s.getSpanId()));
.setTraceId(spanId.getHigh())
.setParentId(spanId.getLow()));
}
return builder.build();
}
public static TraceInfo fromProto(DataTransferTraceInfoProto proto) {
if (proto == null) return null;
if (!proto.hasTraceId()) return null;
return new TraceInfo(proto.getTraceId(), proto.getParentId());
}
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
String description) {
return continueTraceSpan(header.getBaseHeader(), description);
}
public static TraceScope continueTraceSpan(BaseHeaderProto header,
String description) {
return continueTraceSpan(header.getTraceInfo(), description);
}
public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto,
String description) {
TraceScope scope = null;
TraceInfo info = fromProto(proto);
if (info != null) {
scope = Trace.startSpan(description, info);
public static SpanId fromProto(DataTransferTraceInfoProto proto) {
if ((proto != null) && proto.hasTraceId() &&
proto.hasParentId()) {
return new SpanId(proto.getTraceId(), proto.getParentId());
}
return scope;
return null;
}
public static void checkBlockOpStatus(
@ -137,6 +117,7 @@ public abstract class DataTransferProtoUtil {
} else {
throw new IOException(
"Got error"
+ ", status=" + response.getStatus().name()
+ ", status message " + response.getMessage()
+ ", " + logInfo
);

View File

@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
import org.apache.htrace.Trace;
import org.apache.htrace.Span;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.Tracer;
import com.google.protobuf.Message;
@ -200,10 +200,11 @@ public class Sender implements DataTransferProtocol {
ReleaseShortCircuitAccessRequestProto.Builder builder =
ReleaseShortCircuitAccessRequestProto.newBuilder().
setSlotId(PBHelperClient.convert(slotId));
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
.setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
SpanId spanId = Tracer.getCurrentSpanId();
if (spanId.isValid()) {
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
setTraceId(spanId.getHigh()).
setParentId(spanId.getLow()));
}
ReleaseShortCircuitAccessRequestProto proto = builder.build();
send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
@ -214,10 +215,11 @@ public class Sender implements DataTransferProtocol {
ShortCircuitShmRequestProto.Builder builder =
ShortCircuitShmRequestProto.newBuilder().
setClientName(clientName);
if (Trace.isTracing()) {
Span s = Trace.currentSpan();
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
.setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
SpanId spanId = Tracer.getCurrentSpanId();
if (spanId.isValid()) {
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
setTraceId(spanId.getHigh()).
setParentId(spanId.getLow()));
}
ShortCircuitShmRequestProto proto = builder.build();
send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.io.retry.FailoverProxyProvider;
public abstract class AbstractNNFailoverProxyProvider<T> implements
FailoverProxyProvider <T> {
protected AtomicBoolean fallbackToSimpleAuth;
private AtomicBoolean fallbackToSimpleAuth;
/**
* Inquire whether logical HA URI is used for the implementation. If it is
@ -48,4 +48,8 @@ public abstract class AbstractNNFailoverProxyProvider<T> implements
AtomicBoolean fallbackToSimpleAuth) {
this.fallbackToSimpleAuth = fallbackToSimpleAuth;
}
public synchronized AtomicBoolean getFallbackToSimpleAuth() {
return fallbackToSimpleAuth;
}
}

View File

@ -17,18 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.namenode.ha;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.security.UserGroupInformation;
import com.google.common.base.Preconditions;
/**
* A NNFailoverProxyProvider implementation which wrapps old implementations

Some files were not shown because too many files have changed in this diff Show More