HDFS-2198. Remove hardcoded configuration keys. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1151501 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
4a535823a7
commit
b60772c47d
|
@ -601,6 +601,8 @@ Trunk (unreleased changes)
|
||||||
|
|
||||||
HDFS-2180. Refactor NameNode HTTP server into new class. (todd)
|
HDFS-2180. Refactor NameNode HTTP server into new class. (todd)
|
||||||
|
|
||||||
|
HDFS-2198. Remove hardcoded configuration keys. (suresh)
|
||||||
|
|
||||||
HDFS-2149. Move EditLogOp serialization formats into FsEditLogOp
|
HDFS-2149. Move EditLogOp serialization formats into FsEditLogOp
|
||||||
implementations. (Ivan Kelly via todd)
|
implementations. (Ivan Kelly via todd)
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
|
||||||
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
|
||||||
|
@ -149,48 +150,38 @@ public class DFSClient implements FSConstants, java.io.Closeable {
|
||||||
|
|
||||||
Conf(Configuration conf) {
|
Conf(Configuration conf) {
|
||||||
maxBlockAcquireFailures = conf.getInt(
|
maxBlockAcquireFailures = conf.getInt(
|
||||||
DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
||||||
confTime = conf.getInt(
|
confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
|
||||||
HdfsConstants.WRITE_TIMEOUT);
|
HdfsConstants.WRITE_TIMEOUT);
|
||||||
ioBufferSize = conf.getInt(
|
ioBufferSize = conf.getInt(
|
||||||
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
|
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
|
||||||
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
|
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
|
||||||
bytesPerChecksum = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
|
bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
|
||||||
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
DFS_BYTES_PER_CHECKSUM_DEFAULT);
|
||||||
socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||||
HdfsConstants.READ_TIMEOUT);
|
HdfsConstants.READ_TIMEOUT);
|
||||||
/** dfs.write.packet.size is an internal config variable */
|
/** dfs.write.packet.size is an internal config variable */
|
||||||
writePacketSize = conf.getInt(
|
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
|
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
|
||||||
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
|
defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
|
||||||
defaultBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
|
|
||||||
DEFAULT_BLOCK_SIZE);
|
DEFAULT_BLOCK_SIZE);
|
||||||
defaultReplication = (short) conf.getInt(
|
defaultReplication = (short) conf.getInt(
|
||||||
DFSConfigKeys.DFS_REPLICATION_KEY,
|
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
|
||||||
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
|
|
||||||
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
|
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
|
||||||
socketCacheCapacity = conf.getInt(
|
socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
|
DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
|
||||||
DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
|
prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
|
||||||
prefetchSize = conf.getLong(
|
|
||||||
DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
|
|
||||||
10 * defaultBlockSize);
|
10 * defaultBlockSize);
|
||||||
timeWindow = conf
|
timeWindow = conf
|
||||||
.getInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
|
.getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
|
||||||
nCachedConnRetry = conf.getInt(
|
nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY,
|
DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
|
||||||
DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
|
nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
|
||||||
nBlockWriteRetry = conf.getInt(
|
DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
|
||||||
DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
|
|
||||||
DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
|
|
||||||
nBlockWriteLocateFollowingRetry = conf
|
nBlockWriteLocateFollowingRetry = conf
|
||||||
.getInt(
|
.getInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
|
||||||
DFSConfigKeys
|
DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
|
||||||
.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
|
|
||||||
DFSConfigKeys
|
|
||||||
.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
|
|
||||||
uMask = FsPermission.getUMask(conf);
|
uMask = FsPermission.getUMask(conf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.BlockReader;
|
import org.apache.hadoop.hdfs.BlockReader;
|
||||||
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
@ -64,7 +65,7 @@ import org.apache.hadoop.util.VersionInfo;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class JspHelper {
|
public class JspHelper {
|
||||||
public static final String CURRENT_CONF = "current.conf";
|
public static final String CURRENT_CONF = "current.conf";
|
||||||
final static public String WEB_UGI_PROPERTY_NAME = "dfs.web.ugi";
|
final static public String WEB_UGI_PROPERTY_NAME = DFSConfigKeys.DFS_WEB_UGI_KEY;
|
||||||
public static final String DELEGATION_PARAMETER_NAME = "delegation";
|
public static final String DELEGATION_PARAMETER_NAME = "delegation";
|
||||||
public static final String NAMENODE_ADDRESS = "nnaddr";
|
public static final String NAMENODE_ADDRESS = "nnaddr";
|
||||||
static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
|
static final String SET_DELEGATION = "&" + DELEGATION_PARAMETER_NAME +
|
||||||
|
|
|
@ -18,32 +18,7 @@
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.server.common.Util.now;
|
import static org.apache.hadoop.hdfs.server.common.Util.now;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
|
@ -448,8 +423,11 @@ public class DataNode extends Configured
|
||||||
name = config.get(DFS_DATANODE_HOST_NAME_KEY);
|
name = config.get(DFS_DATANODE_HOST_NAME_KEY);
|
||||||
}
|
}
|
||||||
if (name == null) {
|
if (name == null) {
|
||||||
name = DNS.getDefaultHost(config.get("dfs.datanode.dns.interface",
|
name = DNS
|
||||||
"default"), config.get("dfs.datanode.dns.nameserver", "default"));
|
.getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
|
||||||
|
DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get(
|
||||||
|
DFS_DATANODE_DNS_NAMESERVER_KEY,
|
||||||
|
DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
|
||||||
}
|
}
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
@ -521,7 +499,7 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
|
|
||||||
private void startPlugins(Configuration conf) {
|
private void startPlugins(Configuration conf) {
|
||||||
plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
|
plugins = conf.getInstances(DFS_DATANODE_PLUGINS_KEY, ServicePlugin.class);
|
||||||
for (ServicePlugin p: plugins) {
|
for (ServicePlugin p: plugins) {
|
||||||
try {
|
try {
|
||||||
p.start(this);
|
p.start(this);
|
||||||
|
@ -810,8 +788,9 @@ public class DataNode extends Configured
|
||||||
StartupOption startOpt = getStartupOption(conf);
|
StartupOption startOpt = getStartupOption(conf);
|
||||||
assert startOpt != null : "Startup option must be set.";
|
assert startOpt != null : "Startup option must be set.";
|
||||||
|
|
||||||
boolean simulatedFSDataset =
|
boolean simulatedFSDataset = conf.getBoolean(
|
||||||
conf.getBoolean("dfs.datanode.simulateddatastorage", false);
|
DFS_DATANODE_SIMULATEDDATASTORAGE_KEY,
|
||||||
|
DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT);
|
||||||
|
|
||||||
if (simulatedFSDataset) {
|
if (simulatedFSDataset) {
|
||||||
initFsDataSet(conf, dataDirs);
|
initFsDataSet(conf, dataDirs);
|
||||||
|
@ -1455,8 +1434,9 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
|
|
||||||
// get version and id info from the name-node
|
// get version and id info from the name-node
|
||||||
boolean simulatedFSDataset =
|
boolean simulatedFSDataset = conf.getBoolean(
|
||||||
conf.getBoolean("dfs.datanode.simulateddatastorage", false);
|
DFS_DATANODE_SIMULATEDDATASTORAGE_KEY,
|
||||||
|
DFS_DATANODE_SIMULATEDDATASTORAGE_DEFAULT);
|
||||||
|
|
||||||
if (simulatedFSDataset) {
|
if (simulatedFSDataset) {
|
||||||
storage.createStorageID(getPort());
|
storage.createStorageID(getPort());
|
||||||
|
@ -1480,8 +1460,8 @@ public class DataNode extends Configured
|
||||||
* Determine the http server's effective addr
|
* Determine the http server's effective addr
|
||||||
*/
|
*/
|
||||||
public static InetSocketAddress getInfoAddr(Configuration conf) {
|
public static InetSocketAddress getInfoAddr(Configuration conf) {
|
||||||
return NetUtils.createSocketAddr(
|
return NetUtils.createSocketAddr(conf.get(DFS_DATANODE_HTTP_ADDRESS_KEY,
|
||||||
conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
|
DFS_DATANODE_HTTP_ADDRESS_DEFAULT));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void registerMXBean() {
|
private void registerMXBean() {
|
||||||
|
@ -2258,11 +2238,11 @@ public class DataNode extends Configured
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setStartupOption(Configuration conf, StartupOption opt) {
|
private static void setStartupOption(Configuration conf, StartupOption opt) {
|
||||||
conf.set("dfs.datanode.startup", opt.toString());
|
conf.set(DFS_DATANODE_STARTUP_KEY, opt.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
static StartupOption getStartupOption(Configuration conf) {
|
static StartupOption getStartupOption(Configuration conf) {
|
||||||
return StartupOption.valueOf(conf.get("dfs.datanode.startup",
|
return StartupOption.valueOf(conf.get(DFS_DATANODE_STARTUP_KEY,
|
||||||
StartupOption.REGULAR.toString()));
|
StartupOption.REGULAR.toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2661,7 +2641,7 @@ public class DataNode extends Configured
|
||||||
// Determine a Datanode's streaming address
|
// Determine a Datanode's streaming address
|
||||||
public static InetSocketAddress getStreamingAddr(Configuration conf) {
|
public static InetSocketAddress getStreamingAddr(Configuration conf) {
|
||||||
return NetUtils.createSocketAddr(
|
return NetUtils.createSocketAddr(
|
||||||
conf.get("dfs.datanode.address", "0.0.0.0:50010"));
|
conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override // DataNodeMXBean
|
@Override // DataNodeMXBean
|
||||||
|
@ -2672,7 +2652,7 @@ public class DataNode extends Configured
|
||||||
@Override // DataNodeMXBean
|
@Override // DataNodeMXBean
|
||||||
public String getRpcPort(){
|
public String getRpcPort(){
|
||||||
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
|
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
|
||||||
this.getConf().get("dfs.datanode.ipc.address"));
|
this.getConf().get(DFS_DATANODE_IPC_ADDRESS_KEY));
|
||||||
return Integer.toString(ipcAddr.getPort());
|
return Integer.toString(ipcAddr.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.util.Daemon;
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DirectoryScanner implements Runnable {
|
public class DirectoryScanner implements Runnable {
|
||||||
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
|
private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
|
||||||
private static final int DEFAULT_SCAN_INTERVAL = 21600;
|
|
||||||
|
|
||||||
private final DataNode datanode;
|
private final DataNode datanode;
|
||||||
private final FSDataset dataset;
|
private final FSDataset dataset;
|
||||||
|
@ -225,7 +224,7 @@ public class DirectoryScanner implements Runnable {
|
||||||
this.datanode = dn;
|
this.datanode = dn;
|
||||||
this.dataset = dataset;
|
this.dataset = dataset;
|
||||||
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
|
int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
|
||||||
DEFAULT_SCAN_INTERVAL);
|
DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
|
||||||
scanPeriodMsecs = interval * 1000L; //msec
|
scanPeriodMsecs = interval * 1000L; //msec
|
||||||
int threads =
|
int threads =
|
||||||
conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
|
conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
|
||||||
|
|
|
@ -43,7 +43,7 @@ import org.apache.hadoop.fs.Trash;
|
||||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.fs.permission.PermissionStatus;
|
import org.apache.hadoop.fs.permission.PermissionStatus;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
@ -156,20 +156,20 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
* Following are nameservice specific keys.
|
* Following are nameservice specific keys.
|
||||||
*/
|
*/
|
||||||
public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
|
public static final String[] NAMESERVICE_SPECIFIC_KEYS = {
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
|
DFS_NAMENODE_RPC_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
|
DFS_NAMENODE_NAME_DIR_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
|
DFS_NAMENODE_EDITS_DIR_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
|
DFS_NAMENODE_CHECKPOINT_DIR_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
|
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
DFS_NAMENODE_HTTPS_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
|
DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
|
DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
|
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
|
DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY
|
||||||
};
|
};
|
||||||
|
|
||||||
public long getProtocolVersion(String protocol,
|
public long getProtocolVersion(String protocol,
|
||||||
|
@ -264,7 +264,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
public static void setServiceAddress(Configuration conf,
|
public static void setServiceAddress(Configuration conf,
|
||||||
String address) {
|
String address) {
|
||||||
LOG.info("Setting ADDRESS " + address);
|
LOG.info("Setting ADDRESS " + address);
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
|
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, address);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -276,7 +276,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
*/
|
*/
|
||||||
public static InetSocketAddress getServiceAddress(Configuration conf,
|
public static InetSocketAddress getServiceAddress(Configuration conf,
|
||||||
boolean fallback) {
|
boolean fallback) {
|
||||||
String addr = conf.get(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
|
String addr = conf.get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
|
||||||
if (addr == null || addr.isEmpty()) {
|
if (addr == null || addr.isEmpty()) {
|
||||||
return fallback ? getAddress(conf) : null;
|
return fallback ? getAddress(conf) : null;
|
||||||
}
|
}
|
||||||
|
@ -362,11 +362,11 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
|
|
||||||
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
|
protected InetSocketAddress getHttpServerAddress(Configuration conf) {
|
||||||
return NetUtils.createSocketAddr(
|
return NetUtils.createSocketAddr(
|
||||||
conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:50070"));
|
conf.get(DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_HTTP_ADDRESS_DEFAULT));
|
||||||
}
|
}
|
||||||
|
|
||||||
protected void setHttpServerAddress(Configuration conf) {
|
protected void setHttpServerAddress(Configuration conf) {
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
|
||||||
getHostPortString(getHttpAddress()));
|
getHostPortString(getHttpAddress()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,8 +391,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
*/
|
*/
|
||||||
void loginAsNameNodeUser(Configuration conf) throws IOException {
|
void loginAsNameNodeUser(Configuration conf) throws IOException {
|
||||||
InetSocketAddress socAddr = getRpcServerAddress(conf);
|
InetSocketAddress socAddr = getRpcServerAddress(conf);
|
||||||
SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
|
SecurityUtil.login(conf, DFS_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
|
DFS_NAMENODE_USER_NAME_KEY, socAddr.getHostName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -405,8 +405,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
loginAsNameNodeUser(conf);
|
loginAsNameNodeUser(conf);
|
||||||
int handlerCount =
|
int handlerCount =
|
||||||
conf.getInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY,
|
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_DEFAULT);
|
DFS_DATANODE_HANDLER_COUNT_DEFAULT);
|
||||||
|
|
||||||
NameNode.initMetrics(conf, this.getRole());
|
NameNode.initMetrics(conf, this.getRole());
|
||||||
loadNamesystem(conf);
|
loadNamesystem(conf);
|
||||||
|
@ -414,8 +414,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
|
InetSocketAddress dnSocketAddr = getServiceRpcServerAddress(conf);
|
||||||
if (dnSocketAddr != null) {
|
if (dnSocketAddr != null) {
|
||||||
int serviceHandlerCount =
|
int serviceHandlerCount =
|
||||||
conf.getInt(DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
|
conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
|
DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
|
||||||
this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
|
this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
|
||||||
dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
|
dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
|
||||||
false, conf, namesystem.getDelegationTokenSecretManager());
|
false, conf, namesystem.getDelegationTokenSecretManager());
|
||||||
|
@ -493,7 +493,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
}
|
}
|
||||||
startTrashEmptier(conf);
|
startTrashEmptier(conf);
|
||||||
|
|
||||||
plugins = conf.getInstances("dfs.namenode.plugins", ServicePlugin.class);
|
plugins = conf.getInstances(DFS_NAMENODE_PLUGINS_KEY,
|
||||||
|
ServicePlugin.class);
|
||||||
for (ServicePlugin p: plugins) {
|
for (ServicePlugin p: plugins) {
|
||||||
try {
|
try {
|
||||||
p.start(this);
|
p.start(this);
|
||||||
|
@ -1338,12 +1339,12 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
private static boolean format(Configuration conf,
|
private static boolean format(Configuration conf,
|
||||||
boolean isConfirmationNeeded)
|
boolean isConfirmationNeeded)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
if (!conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,
|
if (!conf.getBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
|
DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_DEFAULT)) {
|
||||||
throw new IOException("The option " + DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
|
throw new IOException("The option " + DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY
|
||||||
+ " is set to false for this filesystem, so it "
|
+ " is set to false for this filesystem, so it "
|
||||||
+ "cannot be formatted. You will need to set "
|
+ "cannot be formatted. You will need to set "
|
||||||
+ DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
|
+ DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY +" parameter "
|
||||||
+ "to true in order to format this filesystem");
|
+ "to true in order to format this filesystem");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1490,11 +1491,11 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void setStartupOption(Configuration conf, StartupOption opt) {
|
private static void setStartupOption(Configuration conf, StartupOption opt) {
|
||||||
conf.set("dfs.namenode.startup", opt.toString());
|
conf.set(DFS_NAMENODE_STARTUP_KEY, opt.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
static StartupOption getStartupOption(Configuration conf) {
|
static StartupOption getStartupOption(Configuration conf) {
|
||||||
return StartupOption.valueOf(conf.get("dfs.namenode.startup",
|
return StartupOption.valueOf(conf.get(DFS_NAMENODE_STARTUP_KEY,
|
||||||
StartupOption.REGULAR.toString()));
|
StartupOption.REGULAR.toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1586,10 +1587,10 @@ public class NameNode implements NamenodeProtocols, FSConstants {
|
||||||
|
|
||||||
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
|
DFSUtil.setGenericConf(conf, nameserviceId, NAMESERVICE_SPECIFIC_KEYS);
|
||||||
|
|
||||||
if (conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
|
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
|
||||||
URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
|
URI defaultUri = URI.create(FSConstants.HDFS_URI_SCHEME + "://"
|
||||||
+ conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
|
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
|
||||||
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, defaultUri.toString());
|
conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
|
import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
@ -137,8 +137,8 @@ public class SecondaryNameNode implements Runnable {
|
||||||
|
|
||||||
public static InetSocketAddress getHttpAddress(Configuration conf) {
|
public static InetSocketAddress getHttpAddress(Configuration conf) {
|
||||||
return NetUtils.createSocketAddr(conf.get(
|
return NetUtils.createSocketAddr(conf.get(
|
||||||
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
|
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -149,15 +149,12 @@ public class SecondaryNameNode implements Runnable {
|
||||||
infoBindAddress = infoSocAddr.getHostName();
|
infoBindAddress = infoSocAddr.getHostName();
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
if (UserGroupInformation.isSecurityEnabled()) {
|
if (UserGroupInformation.isSecurityEnabled()) {
|
||||||
SecurityUtil.login(conf,
|
SecurityUtil.login(conf, DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
|
||||||
DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
|
DFS_SECONDARY_NAMENODE_USER_NAME_KEY, infoBindAddress);
|
||||||
DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY,
|
|
||||||
infoBindAddress);
|
|
||||||
}
|
}
|
||||||
// initiate Java VM metrics
|
// initiate Java VM metrics
|
||||||
JvmMetrics.create("SecondaryNameNode",
|
JvmMetrics.create("SecondaryNameNode",
|
||||||
conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
|
conf.get(DFS_METRICS_SESSION_ID_KEY), DefaultMetricsSystem.instance());
|
||||||
DefaultMetricsSystem.instance());
|
|
||||||
|
|
||||||
// Create connection to the namenode.
|
// Create connection to the namenode.
|
||||||
shouldRun = true;
|
shouldRun = true;
|
||||||
|
@ -178,19 +175,19 @@ public class SecondaryNameNode implements Runnable {
|
||||||
checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
|
checkpointImage.recoverCreate(checkpointDirs, checkpointEditsDirs);
|
||||||
|
|
||||||
// Initialize other scheduling parameters from the configuration
|
// Initialize other scheduling parameters from the configuration
|
||||||
checkpointPeriod = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,
|
checkpointPeriod = conf.getLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
|
DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
|
||||||
checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY,
|
checkpointSize = conf.getLong(DFS_NAMENODE_CHECKPOINT_SIZE_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
|
DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
|
||||||
|
|
||||||
// initialize the webserver for uploading files.
|
// initialize the webserver for uploading files.
|
||||||
// Kerberized SSL servers must be run from the host principal...
|
// Kerberized SSL servers must be run from the host principal...
|
||||||
UserGroupInformation httpUGI =
|
UserGroupInformation httpUGI =
|
||||||
UserGroupInformation.loginUserFromKeytabAndReturnUGI(
|
UserGroupInformation.loginUserFromKeytabAndReturnUGI(
|
||||||
SecurityUtil.getServerPrincipal(conf
|
SecurityUtil.getServerPrincipal(conf
|
||||||
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
|
.get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
|
||||||
infoBindAddress),
|
infoBindAddress),
|
||||||
conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
|
conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
|
||||||
try {
|
try {
|
||||||
infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
|
infoServer = httpUGI.doAs(new PrivilegedExceptionAction<HttpServer>() {
|
||||||
@Override
|
@Override
|
||||||
|
@ -201,7 +198,7 @@ public class SecondaryNameNode implements Runnable {
|
||||||
int tmpInfoPort = infoSocAddr.getPort();
|
int tmpInfoPort = infoSocAddr.getPort();
|
||||||
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
|
infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
|
||||||
tmpInfoPort == 0, conf,
|
tmpInfoPort == 0, conf,
|
||||||
new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " ")));
|
new AccessControlList(conf.get(DFS_ADMIN, " ")));
|
||||||
|
|
||||||
if(UserGroupInformation.isSecurityEnabled()) {
|
if(UserGroupInformation.isSecurityEnabled()) {
|
||||||
System.setProperty("https.cipherSuites",
|
System.setProperty("https.cipherSuites",
|
||||||
|
@ -232,7 +229,7 @@ public class SecondaryNameNode implements Runnable {
|
||||||
infoPort = infoServer.getPort();
|
infoPort = infoServer.getPort();
|
||||||
if(!UserGroupInformation.isSecurityEnabled())
|
if(!UserGroupInformation.isSecurityEnabled())
|
||||||
imagePort = infoPort;
|
imagePort = infoPort;
|
||||||
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort);
|
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort);
|
||||||
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
|
LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
|
||||||
LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
|
LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
|
||||||
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
|
LOG.warn("Checkpoint Period :" + checkpointPeriod + " secs " +
|
||||||
|
|
Loading…
Reference in New Issue