diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index e417fbe2172..fe315316745 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -143,6 +143,17 @@ public interface HdfsClientConfigKeys { String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY = PREFIX + "replica.accessor.builder.classes"; + // The number of NN response dropped by client proactively in each RPC call. + // For testing NN retry cache, we can set this property with positive value. + String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = + "dfs.client.test.drop.namenode.response.number"; + int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; + String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; + // HDFS client HTrace configuration. + String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace."; + String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix"; + String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user"; + /** dfs.client.retry configuration properties */ interface Retry { String PREFIX = HdfsClientConfigKeys.PREFIX + "retry."; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6550113f7db..fdecdda18a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -953,6 +953,9 @@ Release 2.8.0 - UNRELEASED HDFS-9130. Use GenericTestUtils#setLogLevel to the logging level. (Mingliang Liu via wheat9) + HDFS-9131 Move config keys used by hdfs-client to HdfsClientConfigKeys. + (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 8f87895f813..d7751a5c775 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -95,6 +95,7 @@ import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; @@ -284,19 +285,20 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, /** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. - * If HA is enabled and a positive value is set for - * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the - * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} - * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode - * must be null. + * If HA is enabled and a positive value is set for + * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} + * in the configuration, the DFSClient will use + * {@link LossyRetryInvocationHandler} as its RetryInvocationHandler. + * Otherwise one of nameNodeUri or rpcNamenode must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, Configuration conf, FileSystem.Statistics stats) throws IOException { - SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX); + SpanReceiverHost.get(conf, HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX); traceSampler = new SamplerBuilder(TraceUtils. - wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build(); + wrapHadoopConf(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)) + .build(); // Copy only the required DFSClient configuration this.dfsClientConf = new DfsClientConf(conf); this.conf = conf; @@ -312,13 +314,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, ThreadLocalRandom.current().nextInt() + "_" + Thread.currentThread().getId(); int numResponseToDrop = conf.getInt( - DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, - DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); + HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, + HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); ProxyAndInfo proxyInfo = null; AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false); if (numResponseToDrop > 0) { // This case is used for testing. - LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + LOG.warn(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); proxyInfo = NameNodeProxiesClient.createProxyWithLossyRetryHandler(conf, @@ -344,7 +346,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, } String localInterfaces[] = - conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); + conf.getTrimmedStrings(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES); localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces); if (LOG.isDebugEnabled() && 0 != localInterfaces.length) { LOG.debug("Using local interfaces [" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 28ea8661a8a..780484ca2d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -49,8 +49,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; - public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix"; - public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user"; + @Deprecated + public static final String DFS_USER_HOME_DIR_PREFIX_KEY = + HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY; + @Deprecated + public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = + HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT; public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys .DFS_CHECKSUM_TYPE_KEY; public static final String DFS_CHECKSUM_TYPE_DEFAULT = @@ -65,9 +69,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // HDFS HTrace configuration is controlled by dfs.htrace.spanreceiver.classes, // etc. public static final String DFS_SERVER_HTRACE_PREFIX = "dfs.htrace."; - - // HDFS client HTrace configuration. - public static final String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace."; + @Deprecated + public static final String DFS_CLIENT_HTRACE_PREFIX = + HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX; // HA related configuration public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration"; @@ -1124,9 +1128,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { @Deprecated public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT; - - public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; - + @Deprecated + public static final String DFS_CLIENT_LOCAL_INTERFACES = + HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES; @Deprecated public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = @@ -1135,10 +1139,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT; - // The number of NN response dropped by client proactively in each RPC call. - // For testing NN retry cache, we can set this property with positive value. - public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; - public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; + @Deprecated + public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = + HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY; + @Deprecated + public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT; @Deprecated public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index f4cf4c201ea..1d20f825a70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -109,7 +109,7 @@ public class DistributedFileSystem extends FileSystem { private Path workingDir; private URI uri; private String homeDirPrefix = - DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT; + HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT; DFSClient dfs; private boolean verifyChecksum = true; @@ -145,9 +145,9 @@ public class DistributedFileSystem extends FileSystem { throw new IOException("Incomplete HDFS URI, no host: "+ uri); } homeDirPrefix = conf.get( - DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, - DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT); - + HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, + HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT); + this.dfs = new DFSClient(uri, conf, statistics); this.uri = URI.create(uri.getScheme()+"://"+uri.getAuthority()); this.workingDir = getHomeDirectory(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 85d079c3e0b..e59963b867f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -216,7 +216,7 @@ public class TestFileCreation { throws IOException { Configuration conf = new HdfsConfiguration(); if (netIf != null) { - conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf); } conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java index 15429627457..b353de1ac91 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLocalDFS.java @@ -27,6 +27,7 @@ import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.junit.Test; /** @@ -91,8 +92,8 @@ public class TestLocalDFS { // test home directory Path home = fileSys.makeQualified( - new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT - + "/" + getUserName(fileSys))); + new Path(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT + + "/" + getUserName(fileSys))); Path fsHome = fileSys.getHomeDirectory(); assertEquals(home, fsHome); @@ -110,7 +111,7 @@ public class TestLocalDFS { final String[] homeBases = new String[] {"/home", "/home/user"}; Configuration conf = new HdfsConfiguration(); for (final String homeBase : homeBases) { - conf.set(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase); + conf.set(HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys = cluster.getFileSystem(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java index e5d059e3a5b..97158108848 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRetryCacheMetrics.java @@ -20,11 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; import org.apache.hadoop.ipc.metrics.RetryCacheMetrics; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -60,7 +60,7 @@ public class TestNameNodeRetryCacheMetrics { public void setup() throws Exception { conf = new HdfsConfiguration(); conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true); - conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java index 9434392ccb8..8cdd445b263 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java @@ -18,15 +18,15 @@ package org.apache.hadoop.hdfs.server.namenode.ha; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.junit.Test; /** * This test makes sure that when - * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set, + * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set, * DFSClient instances can still be created within NN/DN (e.g., the fs instance * used by the trash emptier thread in NN) */ @@ -39,8 +39,8 @@ public class TestLossyRetryInvocationHandler { // enable both trash emptier and dropping response conf.setLong("fs.trash.interval", 360); - conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); - + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); + try { cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java index c3d2c73bde7..5b365ba4e57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java @@ -22,9 +22,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.htrace.Sampler; import org.apache.htrace.Span; import org.apache.htrace.Trace; @@ -189,7 +189,7 @@ public class TestTracing { public static void setup() throws IOException { conf = new Configuration(); conf.setLong("dfs.blocksize", 100 * 1024); - conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX + + conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX, SetSpanReceiver.class.getName()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java index 0804a057975..a34748d5a1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracingShortCircuitLocalRead.java @@ -64,7 +64,7 @@ public class TestTracingShortCircuitLocalRead { public void testShortCircuitTraceHooks() throws IOException { assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS); conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX + + conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX, SetSpanReceiver.class.getName()); conf.setLong("dfs.blocksize", 100 * 1024);