HDFS-10903. Replace config key literal strings with config key names II: hadoop hdfs. Contributed by Chen Liang

(cherry picked from commit b36aaa913c)
This commit is contained in:
Mingliang Liu 2016-10-12 17:26:11 -07:00
parent 9bde45d2fe
commit 9b2a719031
6 changed files with 26 additions and 6 deletions

View File

@ -46,6 +46,9 @@
import java.util.Map;
import java.util.Map.Entry;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
/**
* FileSystem operation executors used by {@link HttpFSServer}.
*/
@ -439,7 +442,8 @@ public Void execute(FileSystem fs) throws IOException {
blockSize = fs.getDefaultBlockSize(path);
}
FsPermission fsPermission = new FsPermission(permission);
int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
int bufferSize = fs.getConf().getInt(HTTPFS_BUFFER_SIZE_KEY,
HTTP_BUFFER_SIZE_DEFAULT);
OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
IOUtils.copyBytes(is, os, bufferSize, true);
os.close();
@ -690,7 +694,8 @@ public FSOpen(String path) {
*/
@Override
public InputStream execute(FileSystem fs) throws IOException {
int bufferSize = HttpFSServerWebApp.get().getConfig().getInt("httpfs.buffer.size", 4096);
int bufferSize = HttpFSServerWebApp.get().getConfig().getInt(
HTTPFS_BUFFER_SIZE_KEY, HTTP_BUFFER_SIZE_DEFAULT);
return fs.open(path, bufferSize);
}

View File

@ -50,6 +50,8 @@
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
@InterfaceAudience.Private
public class FileSystemAccessService extends BaseService implements FileSystemAccess {
private static final Logger LOG = LoggerFactory.getLogger(FileSystemAccessService.class);
@ -159,7 +161,7 @@ protected void init() throws ServiceException {
throw new ServiceException(FileSystemAccessException.ERROR.H01, KERBEROS_PRINCIPAL);
}
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
try {
UserGroupInformation.loginUserFromKeytab(principal, keytab);
@ -169,7 +171,7 @@ protected void init() throws ServiceException {
LOG.info("Using FileSystemAccess Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
} else if (security.equals("simple")) {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "simple");
conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
UserGroupInformation.setConfiguration(conf);
LOG.info("Using FileSystemAccess simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
} else {

View File

@ -909,6 +909,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT =
HdfsConstants.DEFAULT_DATA_SOCKET_SIZE;
public static final String HTTPFS_BUFFER_SIZE_KEY =
"httpfs.buffer.size";
public static final int HTTP_BUFFER_SIZE_DEFAULT = 4096;
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
@Deprecated
public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

View File

@ -3053,4 +3053,11 @@
call queue</description>
</property>
<property>
<name>httpfs.buffer.size</name>
<value>4096</value>
<description>
The size buffer to be used when creating or opening httpfs filesystem IO stream.
</description>
</property>
</configuration>

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
@ -89,7 +90,7 @@ public void setUp() throws Exception {
// handle failures in the DFSClient pipeline quickly
// (for cluster.shutdown(); fs.close() idiom)
conf.setInt("ipc.client.connect.max.retries", 1);
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
}
/*

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -215,7 +216,7 @@ private static Configuration getConf(int numDataNodes) {
conf.setInt("io.bytes.per.checksum", BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
conf.setInt("ipc.client.connect.max.retries", 0);
conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
return conf;