From 9b2a719031f1e2b097afb19e0994ae22ac7c2288 Mon Sep 17 00:00:00 2001 From: Mingliang Liu Date: Wed, 12 Oct 2016 17:26:11 -0700 Subject: [PATCH] HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang (cherry picked from commit b36aaa913c54829adbf5fa852af7f45be4db0f07) --- .../org/apache/hadoop/fs/http/server/FSOperations.java | 9 +++++++-- .../lib/service/hadoop/FileSystemAccessService.java | 6 ++++-- .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 ++++ .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 7 +++++++ .../java/org/apache/hadoop/hdfs/TestFileAppend4.java | 3 ++- .../server/blockmanagement/TestBlockTokenWithDFS.java | 3 ++- 6 files changed, 26 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index 39597ebb120..2d17b7246c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -46,6 +46,9 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT; + /** * FileSystem operation executors used by {@link HttpFSServer}. */ @@ -439,7 +442,8 @@ public class FSOperations { blockSize = fs.getDefaultBlockSize(path); } FsPermission fsPermission = new FsPermission(permission); - int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096); + int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY, + HTTP_BUFFER_SIZE_DEFAULT); OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null); IOUtils.copyBytes(is, os, bufferSize, true); os.close(); @@ -690,7 +694,8 @@ public class FSOperations { */ @Override public InputStream execute(FileSystem fs) throws IOException { - int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096); + int bufferSize = HttpFSServerWebApp.get().getConfig().getInt( + HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT); return fs.open(path, bufferSize); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java index 88780cbebad..53f64d6e025 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/service/hadoop/FileSystemAccessService.java @@ -50,6 +50,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; + @InterfaceAudience.Private public class FileSystemAccessService extends BaseService implements FileSystemAccess { private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class); @@ -159,7 +161,7 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL); } Configuration conf = new Configuration(); - conf.set("hadoop.security.authentication", "kerberos"); + conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); try { UserGroupInformation.loginUserFromKeytab(principal, keytab); @@ -169,7 +171,7 @@ public class FileSystemAccessService extends BaseService implements FileSystemAc LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab); } else if (security.equals("simple")) { Configuration conf = new Configuration(); - conf.set("hadoop.security.authentication", "simple"); + conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple"); UserGroupInformation.setConfiguration(conf); LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name")); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f6e056f404f..15b2d151c14 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -909,6 +909,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT = HdfsConstants.DEFAULT_DATA_SOCKET_SIZE; + public static final String HTTPFS_BUFFER_SIZE_KEY = + "httpfs.buffer.size"; + public static final int HTTP_BUFFER_SIZE_DEFAULT = 4096; + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 0dc497db9c4..9ec7ec33e55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -3053,4 +3053,11 @@ call queue + + httpfs.buffer.size + 4096 + + The size buffer to be used when creating or opening httpfs filesystem IO stream. + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 265b510ec10..01141efbc9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -89,7 +90,7 @@ public class TestFileAppend4 { // handle failures in the DFSClient pipeline quickly // (for cluster.shutdown(); fs.close() idiom) - conf.setInt("ipc.client.connect.max.retries", 1); + conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1); } /* diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 8ccbd483335..d8cb1da2780 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -215,7 +216,7 @@ public class TestBlockTokenWithDFS { conf.setInt("io.bytes.per.checksum", BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes); - conf.setInt("ipc.client.connect.max.retries", 0); + conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); // Set short retry timeouts so this test runs faster conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10); return conf;