HDFS-9131. Move config keys used by hdfs-client to HdfsClientConfigKeys. Contributed by Mingliang Liu.

This commit is contained in:
Haohui Mai 2015-09-24 00:30:01 -07:00
parent 4893adff19
commit ead1b9e680
11 changed files with 63 additions and 40 deletions

View File

@ -143,6 +143,17 @@ public interface HdfsClientConfigKeys {
String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY = String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
PREFIX + "replica.accessor.builder.classes"; PREFIX + "replica.accessor.builder.classes";
// The number of NN response dropped by client proactively in each RPC call.
// For testing NN retry cache, we can set this property with positive value.
String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY =
"dfs.client.test.drop.namenode.response.number";
int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
// HDFS client HTrace configuration.
String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
/** dfs.client.retry configuration properties */ /** dfs.client.retry configuration properties */
interface Retry { interface Retry {
String PREFIX = HdfsClientConfigKeys.PREFIX + "retry."; String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";

View File

@ -953,6 +953,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9130. Use GenericTestUtils#setLogLevel to the logging level. HDFS-9130. Use GenericTestUtils#setLogLevel to the logging level.
(Mingliang Liu via wheat9) (Mingliang Liu via wheat9)
HDFS-9131 Move config keys used by hdfs-client to HdfsClientConfigKeys.
(Mingliang Liu via wheat9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -95,6 +95,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@ -284,19 +285,20 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
/** /**
* Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
* If HA is enabled and a positive value is set for * If HA is enabled and a positive value is set for
* {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
* configuration, the DFSClient will use {@link LossyRetryInvocationHandler} * in the configuration, the DFSClient will use
* as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode * {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
* must be null. * Otherwise one of nameNodeUri or rpcNamenode must be null.
*/ */
@VisibleForTesting @VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
Configuration conf, FileSystem.Statistics stats) Configuration conf, FileSystem.Statistics stats)
throws IOException { throws IOException {
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); SpanReceiverHost.get(conf, HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
traceSampler = new SamplerBuilder(TraceUtils. traceSampler = new SamplerBuilder(TraceUtils.
wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); wrapHadoopConf(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
.build();
// Copy only the required DFSClient configuration // Copy only the required DFSClient configuration
this.dfsClientConf = new DfsClientConf(conf); this.dfsClientConf = new DfsClientConf(conf);
this.conf = conf; this.conf = conf;
@ -312,13 +314,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
ThreadLocalRandom.current().nextInt() + "_" + ThreadLocalRandom.current().nextInt() + "_" +
Thread.currentThread().getId(); Thread.currentThread().getId();
int numResponseToDrop = conf.getInt( int numResponseToDrop = conf.getInt(
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
ProxyAndInfo<ClientProtocol> proxyInfo = null; ProxyAndInfo<ClientProtocol> proxyInfo = null;
AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
if (numResponseToDrop > 0) { if (numResponseToDrop > 0) {
// This case is used for testing. // This case is used for testing.
LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY LOG.warn(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
+ " is set to " + numResponseToDrop + " is set to " + numResponseToDrop
+ ", this hacked client will proactively drop responses"); + ", this hacked client will proactively drop responses");
proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf, proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
@ -344,7 +346,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
} }
String localInterfaces[] = String localInterfaces[] =
conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); conf.getTrimmedStrings(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
LOG.debug("Using local interfaces [" + LOG.debug("Using local interfaces [" +

View File

@ -49,8 +49,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT =
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix"; @Deprecated
public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user"; public static final String DFS_USER_HOME_DIR_PREFIX_KEY =
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY;
@Deprecated
public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT =
HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys
.DFS_CHECKSUM_TYPE_KEY; .DFS_CHECKSUM_TYPE_KEY;
public static final String DFS_CHECKSUM_TYPE_DEFAULT = public static final String DFS_CHECKSUM_TYPE_DEFAULT =
@ -65,9 +69,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
// HDFS HTrace configuration is controlled by dfs.htrace.spanreceiver.classes, // HDFS HTrace configuration is controlled by dfs.htrace.spanreceiver.classes,
// etc. // etc.
public static final String DFS_SERVER_HTRACE_PREFIX = "dfs.htrace."; public static final String DFS_SERVER_HTRACE_PREFIX = "dfs.htrace.";
@Deprecated
// HDFS client HTrace configuration. public static final String DFS_CLIENT_HTRACE_PREFIX =
public static final String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace."; HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX;
// HA related configuration // HA related configuration
public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration"; public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
@ -1124,9 +1128,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
@Deprecated @Deprecated
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT
= HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT; = HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT;
@Deprecated
public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; public static final String DFS_CLIENT_LOCAL_INTERFACES =
HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES;
@Deprecated @Deprecated
public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC =
@ -1135,10 +1139,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT; HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT;
// The number of NN response dropped by client proactively in each RPC call. @Deprecated
// For testing NN retry cache, we can set this property with positive value. public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY =
public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY;
public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; @Deprecated
public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT =
HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT;
@Deprecated @Deprecated
public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =

View File

@ -109,7 +109,7 @@ public class DistributedFileSystem extends FileSystem {
private Path workingDir; private Path workingDir;
private URI uri; private URI uri;
private String homeDirPrefix = private String homeDirPrefix =
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT; HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT;
DFSClient dfs; DFSClient dfs;
private boolean verifyChecksum = true; private boolean verifyChecksum = true;
@ -145,9 +145,9 @@ public class DistributedFileSystem extends FileSystem {
throw new IOException("Incomplete HDFS URI, no host: "+ uri); throw new IOException("Incomplete HDFS URI, no host: "+ uri);
} }
homeDirPrefix = conf.get( homeDirPrefix = conf.get(
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT); HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
this.dfs = new DFSClient(uri, conf, statistics); this.dfs = new DFSClient(uri, conf, statistics);
this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority()); this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority());
this.workingDir = getHomeDirectory(); this.workingDir = getHomeDirectory();

View File

@ -216,7 +216,7 @@ public class TestFileCreation {
throws IOException { throws IOException {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
if (netIf != null) { if (netIf != null) {
conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf); conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
} }
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname); conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
if (useDnHostname) { if (useDnHostname) {

View File

@ -27,6 +27,7 @@ import java.io.IOException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test; import org.junit.Test;
/** /**
@ -91,8 +92,8 @@ public class TestLocalDFS {
// test home directory // test home directory
Path home = Path home =
fileSys.makeQualified( fileSys.makeQualified(
new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT new Path(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
+ "/" + getUserName(fileSys))); + "/" + getUserName(fileSys)));
Path fsHome = fileSys.getHomeDirectory(); Path fsHome = fileSys.getHomeDirectory();
assertEquals(home, fsHome); assertEquals(home, fsHome);
@ -110,7 +111,7 @@ public class TestLocalDFS {
final String[] homeBases = new String[] {"/home", "/home/user"}; final String[] homeBases = new String[] {"/home", "/home/user"};
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
for (final String homeBase : homeBases) { for (final String homeBase : homeBases) {
conf.set(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase); conf.set(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem(); FileSystem fileSys = cluster.getFileSystem();
try { try {

View File

@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.ipc.metrics.RetryCacheMetrics; import org.apache.hadoop.ipc.metrics.RetryCacheMetrics;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
@ -60,7 +60,7 @@ public class TestNameNodeRetryCacheMetrics {
public void setup() throws Exception { public void setup() throws Exception {
conf = new HdfsConfiguration(); conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
.build(); .build();

View File

@ -18,15 +18,15 @@
package org.apache.hadoop.hdfs.server.namenode.ha; package org.apache.hadoop.hdfs.server.namenode.ha;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test; import org.junit.Test;
/** /**
* This test makes sure that when * This test makes sure that when
* {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set, * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set,
* DFSClient instances can still be created within NN/DN (e.g., the fs instance * DFSClient instances can still be created within NN/DN (e.g., the fs instance
* used by the trash emptier thread in NN) * used by the trash emptier thread in NN)
*/ */
@ -39,8 +39,8 @@ public class TestLossyRetryInvocationHandler {
// enable both trash emptier and dropping response // enable both trash emptier and dropping response
conf.setLong("fs.trash.interval", 360); conf.setLong("fs.trash.interval", 360);
conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
try { try {
cluster = new MiniDFSCluster.Builder(conf) cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)

View File

@ -22,9 +22,9 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.htrace.Sampler; import org.apache.htrace.Sampler;
import org.apache.htrace.Span; import org.apache.htrace.Span;
import org.apache.htrace.Trace; import org.apache.htrace.Trace;
@ -189,7 +189,7 @@ public class TestTracing {
public static void setup() throws IOException { public static void setup() throws IOException {
conf = new Configuration(); conf = new Configuration();
conf.setLong("dfs.blocksize", 100 * 1024); conf.setLong("dfs.blocksize", 100 * 1024);
conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX + conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX, SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName()); SetSpanReceiver.class.getName());
} }

View File

@ -64,7 +64,7 @@ public class TestTracingShortCircuitLocalRead {
public void testShortCircuitTraceHooks() throws IOException { public void testShortCircuitTraceHooks() throws IOException {
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS); assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
conf = new Configuration(); conf = new Configuration();
conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX + conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX, SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
SetSpanReceiver.class.getName()); SetSpanReceiver.class.getName());
conf.setLong("dfs.blocksize", 100 * 1024); conf.setLong("dfs.blocksize", 100 * 1024);