HDFS-8803. Move DfsClientConf to hdfs-client. Contributed by Mingliang Liu.
This commit is contained in:
parent
c0a4cd978a
commit
535b6db74c
|
@ -17,7 +17,12 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.client;
|
package org.apache.hadoop.hdfs.client;
|
||||||
|
|
||||||
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
|
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
/** Client configuration properties */
|
/** Client configuration properties */
|
||||||
|
@InterfaceAudience.Private
|
||||||
public interface HdfsClientConfigKeys {
|
public interface HdfsClientConfigKeys {
|
||||||
long SECOND = 1000L;
|
long SECOND = 1000L;
|
||||||
long MINUTE = 60 * SECOND;
|
long MINUTE = 60 * SECOND;
|
||||||
|
@ -31,7 +36,7 @@ public interface HdfsClientConfigKeys {
|
||||||
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
||||||
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
|
"^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
|
||||||
|
|
||||||
static final String PREFIX = "dfs.client.";
|
String PREFIX = "dfs.client.";
|
||||||
String DFS_NAMESERVICES = "dfs.nameservices";
|
String DFS_NAMESERVICES = "dfs.nameservices";
|
||||||
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
|
int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;
|
||||||
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address";
|
||||||
|
@ -45,6 +50,72 @@ public interface HdfsClientConfigKeys {
|
||||||
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
|
int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
|
||||||
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
||||||
"dfs.namenode.kerberos.principal";
|
"dfs.namenode.kerberos.principal";
|
||||||
|
String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
|
||||||
|
int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
|
||||||
|
String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
|
||||||
|
String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY =
|
||||||
|
"dfs.client.socketcache.capacity";
|
||||||
|
int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
|
||||||
|
String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY =
|
||||||
|
"dfs.client.socketcache.expiryMsec";
|
||||||
|
long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
|
||||||
|
String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
|
||||||
|
boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
|
||||||
|
String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES =
|
||||||
|
"dfs.client.cache.drop.behind.writes";
|
||||||
|
String DFS_CLIENT_CACHE_DROP_BEHIND_READS =
|
||||||
|
"dfs.client.cache.drop.behind.reads";
|
||||||
|
String DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead";
|
||||||
|
String DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry";
|
||||||
|
int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
|
||||||
|
String DFS_CLIENT_CONTEXT = "dfs.client.context";
|
||||||
|
String DFS_CLIENT_CONTEXT_DEFAULT = "default";
|
||||||
|
String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS =
|
||||||
|
"dfs.client.file-block-storage-locations.num-threads";
|
||||||
|
int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
|
||||||
|
String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
|
||||||
|
"dfs.client.file-block-storage-locations.timeout.millis";
|
||||||
|
int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
|
||||||
|
String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
|
||||||
|
"dfs.client.use.legacy.blockreader";
|
||||||
|
boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
|
||||||
|
String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL =
|
||||||
|
"dfs.client.use.legacy.blockreader.local";
|
||||||
|
boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
|
||||||
|
String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY =
|
||||||
|
"dfs.client.datanode-restart.timeout";
|
||||||
|
long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30;
|
||||||
|
// Much code in hdfs is not yet updated to use these keys.
|
||||||
|
// the initial delay (unit is ms) for locateFollowingBlock, the delay time
|
||||||
|
// will increase exponentially(double) for each retry.
|
||||||
|
String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY =
|
||||||
|
"dfs.client.max.block.acquire.failures";
|
||||||
|
int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
|
||||||
|
String DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
|
||||||
|
String DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
|
||||||
|
String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
|
||||||
|
int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
|
||||||
|
String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY =
|
||||||
|
"dfs.datanode.socket.write.timeout";
|
||||||
|
String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC =
|
||||||
|
"dfs.client.domain.socket.data.traffic";
|
||||||
|
boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
|
||||||
|
String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
|
||||||
|
String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
|
||||||
|
String DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS =
|
||||||
|
"dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
|
||||||
|
int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT =
|
||||||
|
60000;
|
||||||
|
String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
|
||||||
|
"dfs.client.slow.io.warning.threshold.ms";
|
||||||
|
long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;
|
||||||
|
String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
|
||||||
|
"dfs.client.key.provider.cache.expiry";
|
||||||
|
long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
|
||||||
|
TimeUnit.DAYS.toMillis(10); // 10 days
|
||||||
|
String DFS_HDFS_BLOCKS_METADATA_ENABLED =
|
||||||
|
"dfs.datanode.hdfs-blocks-metadata.enabled";
|
||||||
|
boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
|
||||||
|
|
||||||
/** dfs.client.retry configuration properties */
|
/** dfs.client.retry configuration properties */
|
||||||
interface Retry {
|
interface Retry {
|
||||||
|
|
|
@ -17,49 +17,73 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs.client.impl;
|
package org.apache.hadoop.hdfs.client.impl;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
|
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.BlockReaderFactory;
|
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
||||||
import org.apache.hadoop.ipc.Client;
|
import org.apache.hadoop.ipc.Client;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import com.google.common.annotations.VisibleForTesting;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Failover;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.HedgedRead;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Mmap;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Read;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ShortCircuit;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* DFSClient configuration
|
* DFSClient configuration.
|
||||||
*/
|
*/
|
||||||
public class DfsClientConf {
|
public class DfsClientConf {
|
||||||
|
private static final Logger LOG = LoggerFactory.getLogger(DfsClientConf
|
||||||
|
.class);
|
||||||
|
|
||||||
private final int hdfsTimeout; // timeout value for a DFS operation.
|
private final int hdfsTimeout; // timeout value for a DFS operation.
|
||||||
|
|
||||||
|
@ -76,7 +100,7 @@ public class DfsClientConf {
|
||||||
private final ByteArrayManager.Conf writeByteArrayManagerConf;
|
private final ByteArrayManager.Conf writeByteArrayManagerConf;
|
||||||
private final int socketTimeout;
|
private final int socketTimeout;
|
||||||
private final long excludedNodesCacheExpiry;
|
private final long excludedNodesCacheExpiry;
|
||||||
/** Wait time window (in msec) if BlockMissingException is caught */
|
/** Wait time window (in msec) if BlockMissingException is caught. */
|
||||||
private final int timeWindow;
|
private final int timeWindow;
|
||||||
private final int numCachedConnRetry;
|
private final int numCachedConnRetry;
|
||||||
private final int numBlockWriteRetry;
|
private final int numBlockWriteRetry;
|
||||||
|
@ -106,62 +130,63 @@ public class DfsClientConf {
|
||||||
hdfsTimeout = Client.getTimeout(conf);
|
hdfsTimeout = Client.getTimeout(conf);
|
||||||
|
|
||||||
maxRetryAttempts = conf.getInt(
|
maxRetryAttempts = conf.getInt(
|
||||||
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
|
Retry.MAX_ATTEMPTS_KEY,
|
||||||
HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
|
Retry.MAX_ATTEMPTS_DEFAULT);
|
||||||
timeWindow = conf.getInt(
|
timeWindow = conf.getInt(
|
||||||
HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY,
|
Retry.WINDOW_BASE_KEY,
|
||||||
HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT);
|
Retry.WINDOW_BASE_DEFAULT);
|
||||||
retryTimesForGetLastBlockLength = conf.getInt(
|
retryTimesForGetLastBlockLength = conf.getInt(
|
||||||
HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
|
Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
|
||||||
HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
|
Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
|
||||||
retryIntervalForGetLastBlockLength = conf.getInt(
|
retryIntervalForGetLastBlockLength = conf.getInt(
|
||||||
HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
|
Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
|
||||||
HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
|
Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
|
||||||
|
|
||||||
maxFailoverAttempts = conf.getInt(
|
maxFailoverAttempts = conf.getInt(
|
||||||
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY,
|
Failover.MAX_ATTEMPTS_KEY,
|
||||||
HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT);
|
Failover.MAX_ATTEMPTS_DEFAULT);
|
||||||
failoverSleepBaseMillis = conf.getInt(
|
failoverSleepBaseMillis = conf.getInt(
|
||||||
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY,
|
Failover.SLEEPTIME_BASE_KEY,
|
||||||
HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT);
|
Failover.SLEEPTIME_BASE_DEFAULT);
|
||||||
failoverSleepMaxMillis = conf.getInt(
|
failoverSleepMaxMillis = conf.getInt(
|
||||||
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY,
|
Failover.SLEEPTIME_MAX_KEY,
|
||||||
HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT);
|
Failover.SLEEPTIME_MAX_DEFAULT);
|
||||||
|
|
||||||
maxBlockAcquireFailures = conf.getInt(
|
maxBlockAcquireFailures = conf.getInt(
|
||||||
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
||||||
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
||||||
datanodeSocketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
datanodeSocketWriteTimeout = conf.getInt(
|
||||||
HdfsServerConstants.WRITE_TIMEOUT);
|
DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||||
|
HdfsConstants.WRITE_TIMEOUT);
|
||||||
ioBufferSize = conf.getInt(
|
ioBufferSize = conf.getInt(
|
||||||
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
|
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
|
||||||
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
|
CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
|
||||||
defaultChecksumOpt = getChecksumOptFromConf(conf);
|
defaultChecksumOpt = getChecksumOptFromConf(conf);
|
||||||
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||||
HdfsServerConstants.READ_TIMEOUT);
|
HdfsConstants.READ_TIMEOUT);
|
||||||
/** dfs.write.packet.size is an internal config variable */
|
/** dfs.write.packet.size is an internal config variable */
|
||||||
writePacketSize = conf.getInt(
|
writePacketSize = conf.getInt(
|
||||||
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
|
DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
|
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
|
||||||
writeMaxPackets = conf.getInt(
|
writeMaxPackets = conf.getInt(
|
||||||
HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_KEY,
|
Write.MAX_PACKETS_IN_FLIGHT_KEY,
|
||||||
HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
|
Write.MAX_PACKETS_IN_FLIGHT_DEFAULT);
|
||||||
|
|
||||||
final boolean byteArrayManagerEnabled = conf.getBoolean(
|
final boolean byteArrayManagerEnabled = conf.getBoolean(
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_KEY,
|
Write.ByteArrayManager.ENABLED_KEY,
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_DEFAULT);
|
Write.ByteArrayManager.ENABLED_DEFAULT);
|
||||||
if (!byteArrayManagerEnabled) {
|
if (!byteArrayManagerEnabled) {
|
||||||
writeByteArrayManagerConf = null;
|
writeByteArrayManagerConf = null;
|
||||||
} else {
|
} else {
|
||||||
final int countThreshold = conf.getInt(
|
final int countThreshold = conf.getInt(
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
|
Write.ByteArrayManager.COUNT_THRESHOLD_KEY,
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
|
Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT);
|
||||||
final int countLimit = conf.getInt(
|
final int countLimit = conf.getInt(
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_KEY,
|
Write.ByteArrayManager.COUNT_LIMIT_KEY,
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
|
Write.ByteArrayManager.COUNT_LIMIT_DEFAULT);
|
||||||
final long countResetTimePeriodMs = conf.getLong(
|
final long countResetTimePeriodMs = conf.getLong(
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
|
Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY,
|
||||||
HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
|
Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT);
|
||||||
writeByteArrayManagerConf = new ByteArrayManager.Conf(
|
writeByteArrayManagerConf = new ByteArrayManager.Conf(
|
||||||
countThreshold, countLimit, countResetTimePeriodMs);
|
countThreshold, countLimit, countResetTimePeriodMs);
|
||||||
}
|
}
|
||||||
|
@ -172,62 +197,62 @@ public class DfsClientConf {
|
||||||
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
|
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
|
||||||
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
|
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
|
||||||
excludedNodesCacheExpiry = conf.getLong(
|
excludedNodesCacheExpiry = conf.getLong(
|
||||||
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
|
Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY,
|
||||||
HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
|
Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
|
||||||
prefetchSize = conf.getLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY,
|
prefetchSize = conf.getLong(Read.PREFETCH_SIZE_KEY,
|
||||||
10 * defaultBlockSize);
|
10 * defaultBlockSize);
|
||||||
numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
|
numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
|
||||||
DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
|
DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
|
||||||
numBlockWriteRetry = conf.getInt(
|
numBlockWriteRetry = conf.getInt(
|
||||||
HdfsClientConfigKeys.BlockWrite.RETRIES_KEY,
|
BlockWrite.RETRIES_KEY,
|
||||||
HdfsClientConfigKeys.BlockWrite.RETRIES_DEFAULT);
|
BlockWrite.RETRIES_DEFAULT);
|
||||||
numBlockWriteLocateFollowingRetry = conf.getInt(
|
numBlockWriteLocateFollowingRetry = conf.getInt(
|
||||||
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
|
BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
|
||||||
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
|
BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
|
||||||
blockWriteLocateFollowingInitialDelayMs = conf.getInt(
|
blockWriteLocateFollowingInitialDelayMs = conf.getInt(
|
||||||
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
|
BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY,
|
||||||
HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
|
BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT);
|
||||||
uMask = FsPermission.getUMask(conf);
|
uMask = FsPermission.getUMask(conf);
|
||||||
connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
|
connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
|
||||||
DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
|
DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
|
||||||
hdfsBlocksMetadataEnabled = conf.getBoolean(
|
hdfsBlocksMetadataEnabled = conf.getBoolean(
|
||||||
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
|
HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
|
||||||
DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
|
HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
|
||||||
fileBlockStorageLocationsNumThreads = conf.getInt(
|
fileBlockStorageLocationsNumThreads = conf.getInt(
|
||||||
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
|
||||||
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
|
||||||
fileBlockStorageLocationsTimeoutMs = conf.getInt(
|
fileBlockStorageLocationsTimeoutMs = conf.getInt(
|
||||||
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
|
||||||
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
|
||||||
|
|
||||||
datanodeRestartTimeout = conf.getLong(
|
datanodeRestartTimeout = conf.getLong(
|
||||||
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
|
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY,
|
||||||
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000;
|
DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000;
|
||||||
slowIoWarningThresholdMs = conf.getLong(
|
slowIoWarningThresholdMs = conf.getLong(
|
||||||
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
|
DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
|
DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
|
||||||
|
|
||||||
shortCircuitConf = new ShortCircuitConf(conf);
|
shortCircuitConf = new ShortCircuitConf(conf);
|
||||||
|
|
||||||
hedgedReadThresholdMillis = conf.getLong(
|
hedgedReadThresholdMillis = conf.getLong(
|
||||||
HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY,
|
HedgedRead.THRESHOLD_MILLIS_KEY,
|
||||||
HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_DEFAULT);
|
HedgedRead.THRESHOLD_MILLIS_DEFAULT);
|
||||||
hedgedReadThreadpoolSize = conf.getInt(
|
hedgedReadThreadpoolSize = conf.getInt(
|
||||||
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY,
|
HedgedRead.THREADPOOL_SIZE_KEY,
|
||||||
HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT);
|
HedgedRead.THREADPOOL_SIZE_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
private DataChecksum.Type getChecksumType(Configuration conf) {
|
private DataChecksum.Type getChecksumType(Configuration conf) {
|
||||||
final String checksum = conf.get(
|
final String checksum = conf.get(
|
||||||
DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
|
DFS_CHECKSUM_TYPE_KEY,
|
||||||
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
|
DFS_CHECKSUM_TYPE_DEFAULT);
|
||||||
try {
|
try {
|
||||||
return DataChecksum.Type.valueOf(checksum);
|
return DataChecksum.Type.valueOf(checksum);
|
||||||
} catch(IllegalArgumentException iae) {
|
} catch(IllegalArgumentException iae) {
|
||||||
DFSClient.LOG.warn("Bad checksum type: " + checksum + ". Using default "
|
LOG.warn("Bad checksum type: {}. Using default {}", checksum,
|
||||||
+ DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
|
DFS_CHECKSUM_TYPE_DEFAULT);
|
||||||
return DataChecksum.Type.valueOf(
|
return DataChecksum.Type.valueOf(
|
||||||
DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
|
DFS_CHECKSUM_TYPE_DEFAULT);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,8 +523,11 @@ public class DfsClientConf {
|
||||||
return shortCircuitConf;
|
return shortCircuitConf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration for short-circuit reads.
|
||||||
|
*/
|
||||||
public static class ShortCircuitConf {
|
public static class ShortCircuitConf {
|
||||||
private static final Log LOG = LogFactory.getLog(ShortCircuitConf.class);
|
private static final Logger LOG = DfsClientConf.LOG;
|
||||||
|
|
||||||
private final int socketCacheCapacity;
|
private final int socketCacheCapacity;
|
||||||
private final long socketCacheExpiry;
|
private final long socketCacheExpiry;
|
||||||
|
@ -524,10 +552,6 @@ public class DfsClientConf {
|
||||||
|
|
||||||
private final long keyProviderCacheExpiryMs;
|
private final long keyProviderCacheExpiryMs;
|
||||||
|
|
||||||
@VisibleForTesting
|
|
||||||
public BlockReaderFactory.FailureInjector brfFailureInjector =
|
|
||||||
new BlockReaderFactory.FailureInjector();
|
|
||||||
|
|
||||||
public ShortCircuitConf(Configuration conf) {
|
public ShortCircuitConf(Configuration conf) {
|
||||||
socketCacheCapacity = conf.getInt(
|
socketCacheCapacity = conf.getInt(
|
||||||
DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
|
DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
|
||||||
|
@ -537,66 +561,64 @@ public class DfsClientConf {
|
||||||
DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
|
DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
|
||||||
|
|
||||||
useLegacyBlockReader = conf.getBoolean(
|
useLegacyBlockReader = conf.getBoolean(
|
||||||
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
|
DFS_CLIENT_USE_LEGACY_BLOCKREADER,
|
||||||
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
|
DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
|
||||||
useLegacyBlockReaderLocal = conf.getBoolean(
|
useLegacyBlockReaderLocal = conf.getBoolean(
|
||||||
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
|
DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
|
||||||
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
|
DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
|
||||||
shortCircuitLocalReads = conf.getBoolean(
|
shortCircuitLocalReads = conf.getBoolean(
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.KEY,
|
Read.ShortCircuit.KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT);
|
Read.ShortCircuit.DEFAULT);
|
||||||
domainSocketDataTraffic = conf.getBoolean(
|
domainSocketDataTraffic = conf.getBoolean(
|
||||||
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
|
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
|
||||||
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
|
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
|
||||||
domainSocketPath = conf.getTrimmed(
|
domainSocketPath = conf.getTrimmed(
|
||||||
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
|
DFS_DOMAIN_SOCKET_PATH_KEY,
|
||||||
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
|
DFS_DOMAIN_SOCKET_PATH_DEFAULT);
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
LOG.debug(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
|
||||||
LOG.debug(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
|
+ " = {}", useLegacyBlockReaderLocal);
|
||||||
+ " = " + useLegacyBlockReaderLocal);
|
LOG.debug(Read.ShortCircuit.KEY
|
||||||
LOG.debug(HdfsClientConfigKeys.Read.ShortCircuit.KEY
|
+ " = {}", shortCircuitLocalReads);
|
||||||
+ " = " + shortCircuitLocalReads);
|
LOG.debug(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
|
||||||
LOG.debug(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
|
+ " = {}", domainSocketDataTraffic);
|
||||||
+ " = " + domainSocketDataTraffic);
|
LOG.debug(DFS_DOMAIN_SOCKET_PATH_KEY
|
||||||
LOG.debug(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
|
+ " = {}", domainSocketPath);
|
||||||
+ " = " + domainSocketPath);
|
|
||||||
}
|
|
||||||
|
|
||||||
skipShortCircuitChecksums = conf.getBoolean(
|
skipShortCircuitChecksums = conf.getBoolean(
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT);
|
Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT);
|
||||||
shortCircuitBufferSize = conf.getInt(
|
shortCircuitBufferSize = conf.getInt(
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_KEY,
|
Read.ShortCircuit.BUFFER_SIZE_KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_DEFAULT);
|
Read.ShortCircuit.BUFFER_SIZE_DEFAULT);
|
||||||
shortCircuitStreamsCacheSize = conf.getInt(
|
shortCircuitStreamsCacheSize = conf.getInt(
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
|
Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT);
|
Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT);
|
||||||
shortCircuitStreamsCacheExpiryMs = conf.getLong(
|
shortCircuitStreamsCacheExpiryMs = conf.getLong(
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
|
Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT);
|
Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT);
|
||||||
shortCircuitMmapEnabled = conf.getBoolean(
|
shortCircuitMmapEnabled = conf.getBoolean(
|
||||||
HdfsClientConfigKeys.Mmap.ENABLED_KEY,
|
Mmap.ENABLED_KEY,
|
||||||
HdfsClientConfigKeys.Mmap.ENABLED_DEFAULT);
|
Mmap.ENABLED_DEFAULT);
|
||||||
shortCircuitMmapCacheSize = conf.getInt(
|
shortCircuitMmapCacheSize = conf.getInt(
|
||||||
HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY,
|
Mmap.CACHE_SIZE_KEY,
|
||||||
HdfsClientConfigKeys.Mmap.CACHE_SIZE_DEFAULT);
|
Mmap.CACHE_SIZE_DEFAULT);
|
||||||
shortCircuitMmapCacheExpiryMs = conf.getLong(
|
shortCircuitMmapCacheExpiryMs = conf.getLong(
|
||||||
HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_KEY,
|
Mmap.CACHE_TIMEOUT_MS_KEY,
|
||||||
HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_DEFAULT);
|
Mmap.CACHE_TIMEOUT_MS_DEFAULT);
|
||||||
shortCircuitMmapCacheRetryTimeout = conf.getLong(
|
shortCircuitMmapCacheRetryTimeout = conf.getLong(
|
||||||
HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_KEY,
|
Mmap.RETRY_TIMEOUT_MS_KEY,
|
||||||
HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_DEFAULT);
|
Mmap.RETRY_TIMEOUT_MS_DEFAULT);
|
||||||
shortCircuitCacheStaleThresholdMs = conf.getLong(
|
shortCircuitCacheStaleThresholdMs = conf.getLong(
|
||||||
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY,
|
ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY,
|
||||||
HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT);
|
ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT);
|
||||||
shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
|
shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
|
||||||
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
|
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
|
||||||
DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
|
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
|
||||||
|
|
||||||
keyProviderCacheExpiryMs = conf.getLong(
|
keyProviderCacheExpiryMs = conf.getLong(
|
||||||
DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
|
DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
|
||||||
DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
|
DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
|
@ -0,0 +1,18 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.client.impl;
|
|
@ -78,6 +78,13 @@ public class HdfsConstants {
|
||||||
public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
|
public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol";
|
"org.apache.hadoop.hdfs.protocol.ClientProtocol";
|
||||||
|
|
||||||
|
// Timeouts for communicating with DataNode for streaming writes/reads
|
||||||
|
public static final int READ_TIMEOUT = 60 * 1000;
|
||||||
|
public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;
|
||||||
|
public static final int WRITE_TIMEOUT = 8 * 60 * 1000;
|
||||||
|
//for write pipeline
|
||||||
|
public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000;
|
||||||
|
|
||||||
// SafeMode actions
|
// SafeMode actions
|
||||||
public enum SafeModeAction {
|
public enum SafeModeAction {
|
||||||
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET
|
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET
|
||||||
|
|
|
@ -22,29 +22,30 @@ import java.util.LinkedList;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import org.slf4j.Logger;
|
||||||
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Manage byte array creation and release.
|
* Manage byte array creation and release.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public abstract class ByteArrayManager {
|
public abstract class ByteArrayManager {
|
||||||
static final Log LOG = LogFactory.getLog(ByteArrayManager.class);
|
static final Logger LOG = LoggerFactory.getLogger(ByteArrayManager.class);
|
||||||
private static final ThreadLocal<StringBuilder> debugMessage = new ThreadLocal<StringBuilder>() {
|
private static final ThreadLocal<StringBuilder> DEBUG_MESSAGE =
|
||||||
|
new ThreadLocal<StringBuilder>() {
|
||||||
protected StringBuilder initialValue() {
|
protected StringBuilder initialValue() {
|
||||||
return new StringBuilder();
|
return new StringBuilder();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
private static void logDebugMessage() {
|
private static void logDebugMessage() {
|
||||||
final StringBuilder b = debugMessage.get();
|
final StringBuilder b = DEBUG_MESSAGE.get();
|
||||||
LOG.debug(b);
|
LOG.debug(b.toString());
|
||||||
b.setLength(0);
|
b.setLength(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +98,7 @@ public abstract class ByteArrayManager {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Increment the counter, and reset it if there is no increment
|
* Increment the counter, and reset it if there is no increment
|
||||||
* for acertain time period.
|
* for a certain time period.
|
||||||
*
|
*
|
||||||
* @return the new count.
|
* @return the new count.
|
||||||
*/
|
*/
|
||||||
|
@ -112,10 +113,10 @@ public abstract class ByteArrayManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** A map from integers to counters. */
|
/** A map from integers to counters. */
|
||||||
static class CounterMap {
|
static final class CounterMap {
|
||||||
/** @see ByteArrayManager.Conf#countResetTimePeriodMs */
|
/** @see ByteArrayManager.Conf#countResetTimePeriodMs */
|
||||||
private final long countResetTimePeriodMs;
|
private final long countResetTimePeriodMs;
|
||||||
private final Map<Integer, Counter> map = new HashMap<Integer, Counter>();
|
private final Map<Integer, Counter> map = new HashMap<>();
|
||||||
|
|
||||||
private CounterMap(long countResetTimePeriodMs) {
|
private CounterMap(long countResetTimePeriodMs) {
|
||||||
this.countResetTimePeriodMs = countResetTimePeriodMs;
|
this.countResetTimePeriodMs = countResetTimePeriodMs;
|
||||||
|
@ -125,7 +126,8 @@ public abstract class ByteArrayManager {
|
||||||
* @return the counter for the given key;
|
* @return the counter for the given key;
|
||||||
* and create a new counter if it does not exist.
|
* and create a new counter if it does not exist.
|
||||||
*/
|
*/
|
||||||
synchronized Counter get(final Integer key, final boolean createIfNotExist) {
|
synchronized Counter get(final Integer key, final boolean
|
||||||
|
createIfNotExist) {
|
||||||
Counter count = map.get(key);
|
Counter count = map.get(key);
|
||||||
if (count == null && createIfNotExist) {
|
if (count == null && createIfNotExist) {
|
||||||
count = new Counter(countResetTimePeriodMs);
|
count = new Counter(countResetTimePeriodMs);
|
||||||
|
@ -133,17 +135,13 @@ public abstract class ByteArrayManager {
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized void clear() {
|
|
||||||
map.clear();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Manage byte arrays with the same fixed length. */
|
/** Manage byte arrays with the same fixed length. */
|
||||||
static class FixedLengthManager {
|
static class FixedLengthManager {
|
||||||
private final int byteArrayLength;
|
private final int byteArrayLength;
|
||||||
private final int maxAllocated;
|
private final int maxAllocated;
|
||||||
private final Queue<byte[]> freeQueue = new LinkedList<byte[]>();
|
private final Queue<byte[]> freeQueue = new LinkedList<>();
|
||||||
|
|
||||||
private int numAllocated = 0;
|
private int numAllocated = 0;
|
||||||
|
|
||||||
|
@ -163,25 +161,25 @@ public abstract class ByteArrayManager {
|
||||||
*/
|
*/
|
||||||
synchronized byte[] allocate() throws InterruptedException {
|
synchronized byte[] allocate() throws InterruptedException {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(", ").append(this);
|
DEBUG_MESSAGE.get().append(", ").append(this);
|
||||||
}
|
}
|
||||||
for(; numAllocated >= maxAllocated;) {
|
for(; numAllocated >= maxAllocated;) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(": wait ...");
|
DEBUG_MESSAGE.get().append(": wait ...");
|
||||||
logDebugMessage();
|
logDebugMessage();
|
||||||
}
|
}
|
||||||
|
|
||||||
wait();
|
wait();
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append("wake up: ").append(this);
|
DEBUG_MESSAGE.get().append("wake up: ").append(this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
numAllocated++;
|
numAllocated++;
|
||||||
|
|
||||||
final byte[] array = freeQueue.poll();
|
final byte[] array = freeQueue.poll();
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(", recycled? ").append(array != null);
|
DEBUG_MESSAGE.get().append(", recycled? ").append(array != null);
|
||||||
}
|
}
|
||||||
return array != null? array : new byte[byteArrayLength];
|
return array != null? array : new byte[byteArrayLength];
|
||||||
}
|
}
|
||||||
|
@ -197,7 +195,7 @@ public abstract class ByteArrayManager {
|
||||||
Preconditions.checkNotNull(array);
|
Preconditions.checkNotNull(array);
|
||||||
Preconditions.checkArgument(array.length == byteArrayLength);
|
Preconditions.checkArgument(array.length == byteArrayLength);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(", ").append(this);
|
DEBUG_MESSAGE.get().append(", ").append(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
notify();
|
notify();
|
||||||
|
@ -210,7 +208,7 @@ public abstract class ByteArrayManager {
|
||||||
|
|
||||||
if (freeQueue.size() < maxAllocated - numAllocated) {
|
if (freeQueue.size() < maxAllocated - numAllocated) {
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(", freeQueue.offer");
|
DEBUG_MESSAGE.get().append(", freeQueue.offer");
|
||||||
}
|
}
|
||||||
freeQueue.offer(array);
|
freeQueue.offer(array);
|
||||||
}
|
}
|
||||||
|
@ -227,7 +225,7 @@ public abstract class ByteArrayManager {
|
||||||
/** A map from array lengths to byte array managers. */
|
/** A map from array lengths to byte array managers. */
|
||||||
static class ManagerMap {
|
static class ManagerMap {
|
||||||
private final int countLimit;
|
private final int countLimit;
|
||||||
private final Map<Integer, FixedLengthManager> map = new HashMap<Integer, FixedLengthManager>();
|
private final Map<Integer, FixedLengthManager> map = new HashMap<>();
|
||||||
|
|
||||||
ManagerMap(int countLimit) {
|
ManagerMap(int countLimit) {
|
||||||
this.countLimit = countLimit;
|
this.countLimit = countLimit;
|
||||||
|
@ -243,12 +241,11 @@ public abstract class ByteArrayManager {
|
||||||
}
|
}
|
||||||
return manager;
|
return manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
synchronized void clear() {
|
|
||||||
map.clear();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Configuration for ByteArrayManager.
|
||||||
|
*/
|
||||||
public static class Conf {
|
public static class Conf {
|
||||||
/**
|
/**
|
||||||
* The count threshold for each array length so that a manager is created
|
* The count threshold for each array length so that a manager is created
|
||||||
|
@ -265,7 +262,8 @@ public abstract class ByteArrayManager {
|
||||||
*/
|
*/
|
||||||
private final long countResetTimePeriodMs;
|
private final long countResetTimePeriodMs;
|
||||||
|
|
||||||
public Conf(int countThreshold, int countLimit, long countResetTimePeriodMs) {
|
public Conf(int countThreshold, int countLimit, long
|
||||||
|
countResetTimePeriodMs) {
|
||||||
this.countThreshold = countThreshold;
|
this.countThreshold = countThreshold;
|
||||||
this.countLimit = countLimit;
|
this.countLimit = countLimit;
|
||||||
this.countResetTimePeriodMs = countResetTimePeriodMs;
|
this.countResetTimePeriodMs = countResetTimePeriodMs;
|
||||||
|
@ -334,7 +332,8 @@ public abstract class ByteArrayManager {
|
||||||
* Allocate a byte array, where the length of the allocated array
|
* Allocate a byte array, where the length of the allocated array
|
||||||
* is the least power of two of the given length
|
* is the least power of two of the given length
|
||||||
* unless the given length is less than {@link #MIN_ARRAY_LENGTH}.
|
* unless the given length is less than {@link #MIN_ARRAY_LENGTH}.
|
||||||
* In such case, the returned array length is equal to {@link #MIN_ARRAY_LENGTH}.
|
* In such case, the returned array length is equal to {@link
|
||||||
|
* #MIN_ARRAY_LENGTH}.
|
||||||
*
|
*
|
||||||
* If the number of allocated arrays exceeds the capacity,
|
* If the number of allocated arrays exceeds the capacity,
|
||||||
* the current thread is blocked until
|
* the current thread is blocked until
|
||||||
|
@ -343,13 +342,15 @@ public abstract class ByteArrayManager {
|
||||||
* The byte array allocated by this method must be returned for recycling
|
* The byte array allocated by this method must be returned for recycling
|
||||||
* via the {@link Impl#release(byte[])} method.
|
* via the {@link Impl#release(byte[])} method.
|
||||||
*
|
*
|
||||||
* @return a byte array with length larger than or equal to the given length.
|
* @return a byte array with length larger than or equal to the given
|
||||||
|
* length.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public byte[] newByteArray(final int arrayLength) throws InterruptedException {
|
public byte[] newByteArray(final int arrayLength)
|
||||||
|
throws InterruptedException {
|
||||||
Preconditions.checkArgument(arrayLength >= 0);
|
Preconditions.checkArgument(arrayLength >= 0);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append("allocate(").append(arrayLength).append(")");
|
DEBUG_MESSAGE.get().append("allocate(").append(arrayLength).append(")");
|
||||||
}
|
}
|
||||||
|
|
||||||
final byte[] array;
|
final byte[] array;
|
||||||
|
@ -361,17 +362,19 @@ public abstract class ByteArrayManager {
|
||||||
final long count = counters.get(powerOfTwo, true).increment();
|
final long count = counters.get(powerOfTwo, true).increment();
|
||||||
final boolean aboveThreshold = count > conf.countThreshold;
|
final boolean aboveThreshold = count > conf.countThreshold;
|
||||||
// create a new manager only if the count is above threshold.
|
// create a new manager only if the count is above threshold.
|
||||||
final FixedLengthManager manager = managers.get(powerOfTwo, aboveThreshold);
|
final FixedLengthManager manager =
|
||||||
|
managers.get(powerOfTwo, aboveThreshold);
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(": count=").append(count)
|
DEBUG_MESSAGE.get().append(": count=").append(count)
|
||||||
.append(aboveThreshold? ", aboveThreshold": ", belowThreshold");
|
.append(aboveThreshold? ", aboveThreshold": ", belowThreshold");
|
||||||
}
|
}
|
||||||
array = manager != null? manager.allocate(): new byte[powerOfTwo];
|
array = manager != null? manager.allocate(): new byte[powerOfTwo];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(", return byte[").append(array.length).append("]");
|
DEBUG_MESSAGE.get().append(", return byte[")
|
||||||
|
.append(array.length).append("]");
|
||||||
logDebugMessage();
|
logDebugMessage();
|
||||||
}
|
}
|
||||||
return array;
|
return array;
|
||||||
|
@ -389,7 +392,8 @@ public abstract class ByteArrayManager {
|
||||||
public int release(final byte[] array) {
|
public int release(final byte[] array) {
|
||||||
Preconditions.checkNotNull(array);
|
Preconditions.checkNotNull(array);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append("recycle: array.length=").append(array.length);
|
DEBUG_MESSAGE.get()
|
||||||
|
.append("recycle: array.length=").append(array.length);
|
||||||
}
|
}
|
||||||
|
|
||||||
final int freeQueueSize;
|
final int freeQueueSize;
|
||||||
|
@ -401,7 +405,7 @@ public abstract class ByteArrayManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
debugMessage.get().append(", freeQueueSize=").append(freeQueueSize);
|
DEBUG_MESSAGE.get().append(", freeQueueSize=").append(freeQueueSize);
|
||||||
logDebugMessage();
|
logDebugMessage();
|
||||||
}
|
}
|
||||||
return freeQueueSize;
|
return freeQueueSize;
|
|
@ -0,0 +1,18 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
|
@ -469,6 +469,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8911. NameNode Metric : Add Editlog counters as a JMX metric.
|
HDFS-8911. NameNode Metric : Add Editlog counters as a JMX metric.
|
||||||
(Anu Engineer via Arpit Agarwal)
|
(Anu Engineer via Arpit Agarwal)
|
||||||
|
|
||||||
|
HDFS-8803. Move DfsClientConf to hdfs-client. (Mingliang Liu via wheat9)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -91,7 +91,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
/**
|
/**
|
||||||
* Injects failures into specific operations during unit tests.
|
* Injects failures into specific operations during unit tests.
|
||||||
*/
|
*/
|
||||||
private final FailureInjector failureInjector;
|
private static FailureInjector failureInjector = new FailureInjector();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The file name, for logging and debugging purposes.
|
* The file name, for logging and debugging purposes.
|
||||||
|
@ -187,7 +187,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
|
|
||||||
public BlockReaderFactory(DfsClientConf conf) {
|
public BlockReaderFactory(DfsClientConf conf) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.failureInjector = conf.getShortCircuitConf().brfFailureInjector;
|
|
||||||
this.remainingCacheTries = conf.getNumCachedConnRetry();
|
this.remainingCacheTries = conf.getNumCachedConnRetry();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,6 +277,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public static void setFailureInjectorForTesting(FailureInjector injector) {
|
||||||
|
failureInjector = injector;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build a BlockReader with the given options.
|
* Build a BlockReader with the given options.
|
||||||
*
|
*
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
|
import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory;
|
||||||
|
@ -138,8 +139,8 @@ public class ClientContext {
|
||||||
*/
|
*/
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public static ClientContext getFromConf(Configuration conf) {
|
public static ClientContext getFromConf(Configuration conf) {
|
||||||
return get(conf.get(DFSConfigKeys.DFS_CLIENT_CONTEXT,
|
return get(conf.get(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
|
||||||
DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT),
|
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT),
|
||||||
new DfsClientConf(conf));
|
new DfsClientConf(conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,11 +18,11 @@
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
||||||
|
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
@ -443,12 +443,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
||||||
*/
|
*/
|
||||||
int getDatanodeWriteTimeout(int numNodes) {
|
int getDatanodeWriteTimeout(int numNodes) {
|
||||||
final int t = dfsClientConf.getDatanodeSocketWriteTimeout();
|
final int t = dfsClientConf.getDatanodeSocketWriteTimeout();
|
||||||
return t > 0? t + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION*numNodes: 0;
|
return t > 0? t + HdfsConstants.WRITE_TIMEOUT_EXTENSION*numNodes: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int getDatanodeReadTimeout(int numNodes) {
|
int getDatanodeReadTimeout(int numNodes) {
|
||||||
final int t = dfsClientConf.getSocketTimeout();
|
final int t = dfsClientConf.getSocketTimeout();
|
||||||
return t > 0? HdfsServerConstants.READ_TIMEOUT_EXTENSION*numNodes + t: 0;
|
return t > 0? HdfsConstants.READ_TIMEOUT_EXTENSION*numNodes + t: 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
|
|
|
@ -44,14 +44,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
|
|
||||||
public static final String DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
|
public static final String DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
|
||||||
public static final int DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
|
public static final int DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
|
||||||
public static final String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
|
public static final String DFS_BYTES_PER_CHECKSUM_KEY =
|
||||||
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
|
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
||||||
|
public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
||||||
public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
|
public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
|
||||||
public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
|
public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
|
||||||
public static final String DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
|
public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys
|
||||||
public static final String DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
|
.DFS_CHECKSUM_TYPE_KEY;
|
||||||
public static final String DFS_HDFS_BLOCKS_METADATA_ENABLED = "dfs.datanode.hdfs-blocks-metadata.enabled";
|
public static final String DFS_CHECKSUM_TYPE_DEFAULT =
|
||||||
public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
|
HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
|
||||||
|
public static final String DFS_HDFS_BLOCKS_METADATA_ENABLED =
|
||||||
|
HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED;
|
||||||
|
public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT;
|
||||||
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
||||||
HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
|
HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
|
||||||
|
|
||||||
|
@ -489,7 +495,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB
|
public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB
|
||||||
public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction";
|
public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction";
|
||||||
public static final float DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT = 0.75f;
|
public static final float DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT = 0.75f;
|
||||||
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout";
|
public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||||
public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
|
public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup";
|
||||||
public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
|
public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins";
|
||||||
public static final String DFS_WEB_UGI_KEY = "dfs.web.ugi";
|
public static final String DFS_WEB_UGI_KEY = "dfs.web.ugi";
|
||||||
|
@ -500,8 +507,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
public static final String DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
public static final String DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths";
|
public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths";
|
||||||
public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT = "/dev/shm,/tmp";
|
public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT = "/dev/shm,/tmp";
|
||||||
public static final String DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS = "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
|
public static final String
|
||||||
public static final int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = 60000;
|
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS =
|
||||||
|
HdfsClientConfigKeys
|
||||||
|
.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
|
||||||
|
public static final int
|
||||||
|
DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT =
|
||||||
|
HdfsClientConfigKeys
|
||||||
|
.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT;
|
||||||
public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
|
public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
|
||||||
public static final String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
public static final String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
|
||||||
HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY;
|
||||||
|
@ -542,8 +555,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
0.6f;
|
0.6f;
|
||||||
|
|
||||||
public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user";
|
public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user";
|
||||||
public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
|
public static final String DFS_DOMAIN_SOCKET_PATH_KEY =
|
||||||
public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
|
HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
||||||
|
public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled";
|
public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled";
|
||||||
public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;
|
public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;
|
||||||
|
@ -962,64 +977,136 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
||||||
= HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT;
|
= HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT;
|
||||||
|
|
||||||
|
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
||||||
|
@Deprecated
|
||||||
|
public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
|
@Deprecated
|
||||||
public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
|
public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
|
||||||
|
@Deprecated
|
||||||
|
public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
||||||
|
@Deprecated
|
||||||
|
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
|
@Deprecated
|
||||||
public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity";
|
public static final String DFS_CLIENT_USE_DN_HOSTNAME =
|
||||||
public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
|
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
|
||||||
public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
|
@Deprecated
|
||||||
public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
|
public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_READS =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_CACHE_READAHEAD =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_CACHED_CONN_RETRY_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
|
||||||
|
@Deprecated
|
||||||
|
public static final int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
|
@Deprecated
|
||||||
public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
|
public static final String DFS_CLIENT_CONTEXT = HdfsClientConfigKeys
|
||||||
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = "dfs.client.cache.drop.behind.writes";
|
.DFS_CLIENT_CONTEXT;
|
||||||
public static final String DFS_CLIENT_CACHE_DROP_BEHIND_READS = "dfs.client.cache.drop.behind.reads";
|
@Deprecated
|
||||||
public static final String DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead";
|
public static final String DFS_CLIENT_CONTEXT_DEFAULT =
|
||||||
public static final String DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry";
|
HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
|
||||||
public static final int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
|
@Deprecated
|
||||||
|
public static final String
|
||||||
|
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS;
|
||||||
|
@Deprecated
|
||||||
|
public static final int
|
||||||
|
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT =
|
||||||
|
HdfsClientConfigKeys
|
||||||
|
.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT;
|
||||||
|
@Deprecated
|
||||||
|
public static final String
|
||||||
|
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS;
|
||||||
|
@Deprecated
|
||||||
|
public static final int
|
||||||
|
DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT =
|
||||||
|
HdfsClientConfigKeys
|
||||||
|
.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_CONTEXT = "dfs.client.context";
|
@Deprecated
|
||||||
public static final String DFS_CLIENT_CONTEXT_DEFAULT = "default";
|
public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY =
|
||||||
public static final String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = "dfs.client.file-block-storage-locations.num-threads";
|
HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
|
||||||
public static final int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
|
@Deprecated
|
||||||
public static final String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = "dfs.client.file-block-storage-locations.timeout.millis";
|
public static final long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT =
|
||||||
public static final int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
|
HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = "dfs.client.datanode-restart.timeout";
|
|
||||||
public static final long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30;
|
|
||||||
|
|
||||||
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
|
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
|
||||||
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
|
public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
|
||||||
public static final String DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
|
public static final String DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
|
||||||
public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
|
public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
|
||||||
|
|
||||||
// Much code in hdfs is not yet updated to use these keys.
|
// Much code in hdfs is not yet updated to use these keys.
|
||||||
// the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry.
|
// the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry.
|
||||||
public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
|
@Deprecated
|
||||||
public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
|
public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||||
|
@Deprecated
|
||||||
|
public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER = "dfs.client.use.legacy.blockreader";
|
@Deprecated
|
||||||
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
|
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER =
|
||||||
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = "dfs.client.use.legacy.blockreader.local";
|
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
|
||||||
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
|
@Deprecated
|
||||||
|
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
|
||||||
|
@Deprecated
|
||||||
|
public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL;
|
||||||
|
@Deprecated
|
||||||
|
public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT
|
||||||
|
= HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT;
|
||||||
|
|
||||||
public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
|
public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
|
||||||
|
|
||||||
|
|
||||||
public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic";
|
@Deprecated
|
||||||
public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
|
public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
||||||
|
@Deprecated
|
||||||
|
public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT;
|
||||||
|
|
||||||
// The number of NN response dropped by client proactively in each RPC call.
|
// The number of NN response dropped by client proactively in each RPC call.
|
||||||
// For testing NN retry cache, we can set this property with positive value.
|
// For testing NN retry cache, we can set this property with positive value.
|
||||||
public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
|
public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
|
||||||
public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
|
public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
|
||||||
|
|
||||||
|
@Deprecated
|
||||||
public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
|
public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
|
||||||
"dfs.client.slow.io.warning.threshold.ms";
|
HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY;
|
||||||
public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;
|
|
||||||
|
@Deprecated
|
||||||
|
public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT =
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT;
|
||||||
|
@Deprecated
|
||||||
public static final String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
|
public static final String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
|
||||||
"dfs.client.key.provider.cache.expiry";
|
HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS;
|
||||||
|
@Deprecated
|
||||||
public static final long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
|
public static final long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
|
||||||
TimeUnit.DAYS.toMillis(10); // 10 days
|
HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Syncable;
|
import org.apache.hadoop.fs.Syncable;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||||
|
@ -202,7 +203,7 @@ public class DFSOutputStream extends FSOutputSummer
|
||||||
}
|
}
|
||||||
if (blockSize % bytesPerChecksum != 0) {
|
if (blockSize % bytesPerChecksum != 0) {
|
||||||
throw new HadoopIllegalArgumentException("Invalid values: "
|
throw new HadoopIllegalArgumentException("Invalid values: "
|
||||||
+ DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
|
+ HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
|
||||||
+ ") must divide block size (=" + blockSize + ").");
|
+ ") must divide block size (=" + blockSize + ").");
|
||||||
}
|
}
|
||||||
this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
|
this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
|
||||||
|
|
|
@ -93,7 +93,7 @@ public class HdfsConfiguration extends Configuration {
|
||||||
new DeprecationDelta("dfs.secondary.http.address",
|
new DeprecationDelta("dfs.secondary.http.address",
|
||||||
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY),
|
DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY),
|
||||||
new DeprecationDelta("dfs.socket.timeout",
|
new DeprecationDelta("dfs.socket.timeout",
|
||||||
DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY),
|
||||||
new DeprecationDelta("fs.checkpoint.dir",
|
new DeprecationDelta("fs.checkpoint.dir",
|
||||||
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY),
|
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY),
|
||||||
new DeprecationDelta("fs.checkpoint.edits.dir",
|
new DeprecationDelta("fs.checkpoint.edits.dir",
|
||||||
|
@ -127,19 +127,19 @@ public class HdfsConfiguration extends Configuration {
|
||||||
new DeprecationDelta("dfs.permissions.supergroup",
|
new DeprecationDelta("dfs.permissions.supergroup",
|
||||||
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
|
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
|
||||||
new DeprecationDelta("dfs.write.packet.size",
|
new DeprecationDelta("dfs.write.packet.size",
|
||||||
DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
|
HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY),
|
||||||
new DeprecationDelta("dfs.block.size",
|
new DeprecationDelta("dfs.block.size",
|
||||||
DFSConfigKeys.DFS_BLOCK_SIZE_KEY),
|
DFSConfigKeys.DFS_BLOCK_SIZE_KEY),
|
||||||
new DeprecationDelta("dfs.datanode.max.xcievers",
|
new DeprecationDelta("dfs.datanode.max.xcievers",
|
||||||
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY),
|
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY),
|
||||||
new DeprecationDelta("io.bytes.per.checksum",
|
new DeprecationDelta("io.bytes.per.checksum",
|
||||||
DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
|
HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY),
|
||||||
new DeprecationDelta("dfs.federation.nameservices",
|
new DeprecationDelta("dfs.federation.nameservices",
|
||||||
DFSConfigKeys.DFS_NAMESERVICES),
|
DFSConfigKeys.DFS_NAMESERVICES),
|
||||||
new DeprecationDelta("dfs.federation.nameservice.id",
|
new DeprecationDelta("dfs.federation.nameservice.id",
|
||||||
DFSConfigKeys.DFS_NAMESERVICE_ID),
|
DFSConfigKeys.DFS_NAMESERVICE_ID),
|
||||||
new DeprecationDelta("dfs.client.file-block-storage-locations.timeout",
|
new DeprecationDelta("dfs.client.file-block-storage-locations.timeout",
|
||||||
DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
|
HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
|
||||||
|
@ -63,7 +64,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
|
import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
||||||
|
@ -318,7 +318,7 @@ public class Dispatcher {
|
||||||
try {
|
try {
|
||||||
sock.connect(
|
sock.connect(
|
||||||
NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
|
NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()),
|
||||||
HdfsServerConstants.READ_TIMEOUT);
|
HdfsConstants.READ_TIMEOUT);
|
||||||
|
|
||||||
sock.setKeepAlive(true);
|
sock.setKeepAlive(true);
|
||||||
|
|
||||||
|
|
|
@ -279,12 +279,6 @@ public interface HdfsServerConstants {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Timeouts for communicating with DataNode for streaming writes/reads
|
|
||||||
int READ_TIMEOUT = 60 * 1000;
|
|
||||||
int READ_TIMEOUT_EXTENSION = 5 * 1000;
|
|
||||||
int WRITE_TIMEOUT = 8 * 60 * 1000;
|
|
||||||
int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Defines the NameNode role.
|
* Defines the NameNode role.
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -27,11 +27,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHO
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
||||||
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||||
|
@ -55,9 +55,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTI
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -107,9 +108,9 @@ public class DNConf {
|
||||||
public DNConf(Configuration conf) {
|
public DNConf(Configuration conf) {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||||
HdfsServerConstants.READ_TIMEOUT);
|
HdfsConstants.READ_TIMEOUT);
|
||||||
socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||||
HdfsServerConstants.WRITE_TIMEOUT);
|
HdfsConstants.WRITE_TIMEOUT);
|
||||||
socketKeepaliveTimeout = conf.getInt(
|
socketKeepaliveTimeout = conf.getInt(
|
||||||
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
|
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
|
DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
|
||||||
|
@ -149,8 +150,8 @@ public class DNConf {
|
||||||
DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT);
|
DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT);
|
||||||
|
|
||||||
this.dfsclientSlowIoWarningThresholdMs = conf.getLong(
|
this.dfsclientSlowIoWarningThresholdMs = conf.getLong(
|
||||||
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
|
HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY,
|
||||||
DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
|
HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
|
||||||
this.datanodeSlowIoWarningThresholdMs = conf.getLong(
|
this.datanodeSlowIoWarningThresholdMs = conf.getLong(
|
||||||
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
|
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY,
|
||||||
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
|
DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT);
|
||||||
|
|
|
@ -145,7 +145,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
|
@ -921,8 +920,8 @@ public class DataNode extends ReconfigurableBase
|
||||||
|
|
||||||
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
|
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) ||
|
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) ||
|
||||||
conf.getBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
|
conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
|
||||||
DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) {
|
HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) {
|
||||||
DomainPeerServer domainPeerServer =
|
DomainPeerServer domainPeerServer =
|
||||||
getDomainPeerServer(conf, streamingAddr.getPort());
|
getDomainPeerServer(conf, streamingAddr.getPort());
|
||||||
if (domainPeerServer != null) {
|
if (domainPeerServer != null) {
|
||||||
|
@ -943,8 +942,8 @@ public class DataNode extends ReconfigurableBase
|
||||||
if (domainSocketPath.isEmpty()) {
|
if (domainSocketPath.isEmpty()) {
|
||||||
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
|
if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
|
||||||
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) &&
|
HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) &&
|
||||||
(!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
|
(!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
|
||||||
DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
|
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
|
||||||
LOG.warn("Although short-circuit local reads are configured, " +
|
LOG.warn("Although short-circuit local reads are configured, " +
|
||||||
"they are disabled because you didn't configure " +
|
"they are disabled because you didn't configure " +
|
||||||
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
|
DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
|
||||||
|
@ -2141,7 +2140,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
}
|
}
|
||||||
|
|
||||||
long writeTimeout = dnConf.socketWriteTimeout +
|
long writeTimeout = dnConf.socketWriteTimeout +
|
||||||
HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
|
HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
|
||||||
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
|
||||||
InputStream unbufIn = NetUtils.getInputStream(sock);
|
InputStream unbufIn = NetUtils.getInputStream(sock);
|
||||||
DataEncryptionKeyFactory keyFactory =
|
DataEncryptionKeyFactory keyFactory =
|
||||||
|
|
|
@ -72,7 +72,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmR
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
|
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo;
|
||||||
|
@ -705,9 +704,9 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
mirrorSock = datanode.newSocket();
|
mirrorSock = datanode.newSocket();
|
||||||
try {
|
try {
|
||||||
int timeoutValue = dnConf.socketTimeout
|
int timeoutValue = dnConf.socketTimeout
|
||||||
+ (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length);
|
+ (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length);
|
||||||
int writeTimeout = dnConf.socketWriteTimeout +
|
int writeTimeout = dnConf.socketWriteTimeout +
|
||||||
(HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
|
(HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
|
||||||
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
|
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
|
||||||
mirrorSock.setSoTimeout(timeoutValue);
|
mirrorSock.setSoTimeout(timeoutValue);
|
||||||
mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
|
mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
|
||||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.http.HttpConfig;
|
import org.apache.hadoop.http.HttpConfig;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -94,7 +94,7 @@ public class SecureDataNodeStarter implements Daemon {
|
||||||
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
|
InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
|
||||||
int socketWriteTimeout = conf.getInt(
|
int socketWriteTimeout = conf.getInt(
|
||||||
DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
|
||||||
HdfsServerConstants.WRITE_TIMEOUT);
|
HdfsConstants.WRITE_TIMEOUT);
|
||||||
|
|
||||||
ServerSocket ss = (socketWriteTimeout > 0) ?
|
ServerSocket ss = (socketWriteTimeout > 0) ?
|
||||||
ServerSocketChannel.open().socket() : new ServerSocket();
|
ServerSocketChannel.open().socket() : new ServerSocket();
|
||||||
|
|
|
@ -24,12 +24,12 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
import org.apache.hadoop.hdfs.RemotePeerFactory;
|
||||||
import org.apache.hadoop.fs.StorageType;
|
import org.apache.hadoop.fs.StorageType;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.net.Peer;
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
import org.apache.hadoop.hdfs.util.LightWeightLinkedSet;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
@ -813,7 +813,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
|
chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
|
||||||
targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
|
targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
|
||||||
} catch (IOException ie) {
|
} catch (IOException ie) {
|
||||||
if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
|
if (failures >= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
|
||||||
throw new IOException("Could not obtain block " + lblock, ie);
|
throw new IOException("Could not obtain block " + lblock, ie);
|
||||||
}
|
}
|
||||||
LOG.info("Could not obtain block from any node: " + ie);
|
LOG.info("Could not obtain block from any node: " + ie);
|
||||||
|
@ -849,8 +849,8 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
||||||
Peer peer = null;
|
Peer peer = null;
|
||||||
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||||
try {
|
try {
|
||||||
s.connect(addr, HdfsServerConstants.READ_TIMEOUT);
|
s.connect(addr, HdfsConstants.READ_TIMEOUT);
|
||||||
s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
|
||||||
peer = TcpPeerServer.peerFromSocketAndKey(
|
peer = TcpPeerServer.peerFromSocketAndKey(
|
||||||
dfs.getSaslDataTransferClient(), s, NamenodeFsck.this,
|
dfs.getSaslDataTransferClient(), s, NamenodeFsck.this,
|
||||||
blockToken, datanodeId);
|
blockToken, datanodeId);
|
||||||
|
|
|
@ -27,7 +27,7 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.apache.hadoop.util.PerformanceAdvisory;
|
import org.apache.hadoop.util.PerformanceAdvisory;
|
||||||
|
@ -112,7 +112,7 @@ public class DomainSocketFactory {
|
||||||
} else {
|
} else {
|
||||||
if (conf.getDomainSocketPath().isEmpty()) {
|
if (conf.getDomainSocketPath().isEmpty()) {
|
||||||
throw new HadoopIllegalArgumentException(feature + " is enabled but "
|
throw new HadoopIllegalArgumentException(feature + " is enabled but "
|
||||||
+ DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
|
+ HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
|
||||||
} else if (DomainSocket.getLoadingFailureReason() != null) {
|
} else if (DomainSocket.getLoadingFailureReason() != null) {
|
||||||
LOG.warn(feature + " cannot be used because "
|
LOG.warn(feature + " cannot be used because "
|
||||||
+ DomainSocket.getLoadingFailureReason());
|
+ DomainSocket.getLoadingFailureReason());
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fi.FiTestUtil;
|
import org.apache.hadoop.fi.FiTestUtil;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects;
|
import org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
|
||||||
|
@ -226,11 +227,11 @@ public class TestFiPipelines {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
int customPerChecksumSize = 700;
|
int customPerChecksumSize = 700;
|
||||||
int customBlockSize = customPerChecksumSize * 3;
|
int customBlockSize = customPerChecksumSize * 3;
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
|
||||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
|
conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void initLoggers() {
|
private static void initLoggers() {
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -52,7 +53,7 @@ public class TestFiDataTransferProtocol {
|
||||||
static {
|
static {
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
||||||
}
|
}
|
||||||
|
|
||||||
static private FSDataOutputStream createFile(FileSystem fs, Path p
|
static private FSDataOutputStream createFile(FileSystem fs, Path p
|
||||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSClient;
|
import org.apache.hadoop.hdfs.DFSClient;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -57,8 +58,8 @@ public class TestFiDataTransferProtocol2 {
|
||||||
static {
|
static {
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
|
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
||||||
}
|
}
|
||||||
|
|
||||||
static final byte[] bytes = new byte[MAX_N_PACKET * PACKET_SIZE];
|
static final byte[] bytes = new byte[MAX_N_PACKET * PACKET_SIZE];
|
||||||
|
|
|
@ -337,7 +337,7 @@ public class TestEnhancedByteBufferAccess {
|
||||||
ByteBuffer results[] = { null, null, null, null };
|
ByteBuffer results[] = { null, null, null, null };
|
||||||
|
|
||||||
DistributedFileSystem fs = null;
|
DistributedFileSystem fs = null;
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
|
@ -599,7 +599,7 @@ public class TestEnhancedByteBufferAccess {
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
||||||
false);
|
false);
|
||||||
final String CONTEXT = "testZeroCopyReadOfCachedData";
|
final String CONTEXT = "testZeroCopyReadOfCachedData";
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
||||||
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
|
||||||
DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,
|
DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH,
|
||||||
(int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize()));
|
(int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize()));
|
||||||
|
@ -722,7 +722,7 @@ public class TestEnhancedByteBufferAccess {
|
||||||
final String CONTEXT = "testClientMmapDisable";
|
final String CONTEXT = "testClientMmapDisable";
|
||||||
FSDataInputStream fsIn = null;
|
FSDataInputStream fsIn = null;
|
||||||
DistributedFileSystem fs = null;
|
DistributedFileSystem fs = null;
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// With HdfsClientConfigKeys.Mmap.ENABLED_KEY set to false,
|
// With HdfsClientConfigKeys.Mmap.ENABLED_KEY set to false,
|
||||||
|
@ -753,7 +753,7 @@ public class TestEnhancedByteBufferAccess {
|
||||||
// Now try again with HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY == 0.
|
// Now try again with HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY == 0.
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Mmap.ENABLED_KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.Mmap.ENABLED_KEY, true);
|
||||||
conf.setInt(HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, 0);
|
conf.setInt(HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, 0);
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs = cluster.getFileSystem();
|
fs = cluster.getFileSystem();
|
||||||
|
@ -784,7 +784,7 @@ public class TestEnhancedByteBufferAccess {
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
final Path TEST_PATH = new Path("/a");
|
final Path TEST_PATH = new Path("/a");
|
||||||
final String CONTEXT = "test2GBMmapLimit";
|
final String CONTEXT = "test2GBMmapLimit";
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);
|
||||||
|
|
||||||
FSDataInputStream fsIn = null, fsIn2 = null;
|
FSDataInputStream fsIn = null, fsIn2 = null;
|
||||||
ByteBuffer buf1 = null, buf2 = null;
|
ByteBuffer buf1 = null, buf2 = null;
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.fs;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
@ -42,7 +41,7 @@ public class TestUnbuffer {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
// Set a new ClientContext. This way, we will have our own PeerCache,
|
// Set a new ClientContext. This way, we will have our own PeerCache,
|
||||||
// rather than sharing one with other unit tests.
|
// rather than sharing one with other unit tests.
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
|
||||||
"testUnbufferClosesSocketsContext");
|
"testUnbufferClosesSocketsContext");
|
||||||
|
|
||||||
// Disable short-circuit reads. With short-circuit, we wouldn't hold open a
|
// Disable short-circuit reads. With short-circuit, we wouldn't hold open a
|
||||||
|
@ -50,9 +49,9 @@ public class TestUnbuffer {
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
|
||||||
|
|
||||||
// Set a really long socket timeout to avoid test timing issues.
|
// Set a really long socket timeout to avoid test timing issues.
|
||||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||||
100000000L);
|
100000000L);
|
||||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
|
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
|
||||||
100000000L);
|
100000000L);
|
||||||
|
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
|
|
@ -36,10 +36,10 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
|
import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry;
|
||||||
|
@ -202,8 +202,8 @@ public class BlockReaderTestUtil {
|
||||||
Socket sock = NetUtils.
|
Socket sock = NetUtils.
|
||||||
getDefaultSocketFactory(fs.getConf()).createSocket();
|
getDefaultSocketFactory(fs.getConf()).createSocket();
|
||||||
try {
|
try {
|
||||||
sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
|
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
|
||||||
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
|
||||||
peer = TcpPeerServer.peerFromSocket(sock);
|
peer = TcpPeerServer.peerFromSocket(sock);
|
||||||
} finally {
|
} finally {
|
||||||
if (peer == null) {
|
if (peer == null) {
|
||||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -52,9 +53,9 @@ public class FileAppendTest4 {
|
||||||
private static DistributedFileSystem fs;
|
private static DistributedFileSystem fs;
|
||||||
|
|
||||||
private static void init(Configuration conf) {
|
private static void init(Configuration conf) {
|
||||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
|
conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
|
|
|
@ -18,8 +18,8 @@
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
|
|
|
@ -126,10 +126,10 @@ public class TestBlockReaderLocal {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
||||||
!checksum);
|
!checksum);
|
||||||
conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
|
conf.setLong(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY,
|
||||||
BlockReaderLocalTest.BYTES_PER_CHECKSUM);
|
BlockReaderLocalTest.BYTES_PER_CHECKSUM);
|
||||||
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
|
conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C");
|
||||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
|
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead);
|
||||||
test.setConfiguration(conf);
|
test.setConfiguration(conf);
|
||||||
FileInputStream dataIn = null, metaIn = null;
|
FileInputStream dataIn = null, metaIn = null;
|
||||||
final Path TEST_PATH = new Path("/a");
|
final Path TEST_PATH = new Path("/a");
|
||||||
|
|
|
@ -64,12 +64,12 @@ public class TestBlockReaderLocalLegacy {
|
||||||
getAbsolutePath());
|
getAbsolutePath());
|
||||||
}
|
}
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
||||||
false);
|
false);
|
||||||
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
|
||||||
// Set short retry timeouts so this test runs faster
|
// Set short retry timeouts so this test runs faster
|
||||||
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
|
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
|
||||||
return conf;
|
return conf;
|
||||||
|
@ -164,7 +164,7 @@ public class TestBlockReaderLocalLegacy {
|
||||||
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
|
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
|
||||||
final short REPL_FACTOR = 1;
|
final short REPL_FACTOR = 1;
|
||||||
final HdfsConfiguration conf = getConfiguration(null);
|
final HdfsConfiguration conf = getConfiguration(null);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||||
|
|
||||||
final MiniDFSCluster cluster =
|
final MiniDFSCluster cluster =
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
|
|
|
@ -169,7 +169,7 @@ public class TestClientProtocolForPipelineRecovery {
|
||||||
@Test
|
@Test
|
||||||
public void testPipelineRecoveryOnOOB() throws Exception {
|
public void testPipelineRecoveryOnOOB() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15");
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
int numDataNodes = 1;
|
int numDataNodes = 1;
|
||||||
|
@ -207,7 +207,7 @@ public class TestClientProtocolForPipelineRecovery {
|
||||||
@Test
|
@Test
|
||||||
public void testPipelineRecoveryOnRestartFailure() throws Exception {
|
public void testPipelineRecoveryOnRestartFailure() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5");
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
int numDataNodes = 2;
|
int numDataNodes = 2;
|
||||||
|
|
|
@ -25,6 +25,7 @@ import java.net.InetSocketAddress;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -89,8 +90,8 @@ public class TestConnCache {
|
||||||
// instances. Also use a really long socket timeout so that nothing
|
// instances. Also use a really long socket timeout so that nothing
|
||||||
// gets closed before we get around to checking the cache size at the end.
|
// gets closed before we get around to checking the cache size at the end.
|
||||||
final String contextName = "testReadFromOneDNContext";
|
final String contextName = "testReadFromOneDNContext";
|
||||||
configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, contextName);
|
configuration.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, contextName);
|
||||||
configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
configuration.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||||
100000000L);
|
100000000L);
|
||||||
BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration);
|
BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration);
|
||||||
final Path testFile = new Path("/testConnCache.dat");
|
final Path testFile = new Path("/testConnCache.dat");
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
@ -177,11 +177,11 @@ public class TestDFSClientRetries {
|
||||||
InterruptedException {
|
InterruptedException {
|
||||||
final int writeTimeout = 100; //milliseconds.
|
final int writeTimeout = 100; //milliseconds.
|
||||||
// set a very short write timeout for datanode, so that tests runs fast.
|
// set a very short write timeout for datanode, so that tests runs fast.
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout);
|
conf.setInt(HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout);
|
||||||
// set a smaller block size
|
// set a smaller block size
|
||||||
final int blockSize = 10*1024*1024;
|
final int blockSize = 10*1024*1024;
|
||||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1);
|
||||||
// set a small buffer size
|
// set a small buffer size
|
||||||
final int bufferSize = 4096;
|
final int bufferSize = 4096;
|
||||||
conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize);
|
conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize);
|
||||||
|
@ -639,9 +639,10 @@ public class TestDFSClientRetries {
|
||||||
long blockSize = 128*1024*1024; // DFS block size
|
long blockSize = 128*1024*1024; // DFS block size
|
||||||
int bufferSize = 4096;
|
int bufferSize = 4096;
|
||||||
|
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
xcievers);
|
||||||
retries);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
|
||||||
|
retries);
|
||||||
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin);
|
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin);
|
||||||
// Disable keepalive
|
// Disable keepalive
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class TestDFSInputStream {
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testSkipWithRemoteBlockReader() throws IOException {
|
public void testSkipWithRemoteBlockReader() throws IOException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
try {
|
try {
|
||||||
testSkipInner(cluster);
|
testSkipInner(cluster);
|
||||||
|
|
|
@ -17,12 +17,12 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
import static org.junit.Assert.assertNotNull;
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||||
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
|
||||||
|
@ -58,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -108,8 +108,8 @@ public class TestDataTransferProtocol {
|
||||||
StringUtils.byteToHexString(sendBuf.toByteArray()));
|
StringUtils.byteToHexString(sendBuf.toByteArray()));
|
||||||
|
|
||||||
sock = new Socket();
|
sock = new Socket();
|
||||||
sock.connect(dnAddr, HdfsServerConstants.READ_TIMEOUT);
|
sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT);
|
||||||
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
|
||||||
|
|
||||||
OutputStream out = sock.getOutputStream();
|
OutputStream out = sock.getOutputStream();
|
||||||
// Should we excuse
|
// Should we excuse
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
|
||||||
|
@ -287,7 +288,7 @@ public class TestDatanodeDeath {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
.numDataNodes(numDatanodes).build();
|
.numDataNodes(numDatanodes).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
|
@ -343,7 +344,7 @@ public class TestDatanodeDeath {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
||||||
int myMaxNodes = 5;
|
int myMaxNodes = 5;
|
||||||
System.out.println("SimpleTest starting with DataNode to Kill " +
|
System.out.println("SimpleTest starting with DataNode to Kill " +
|
||||||
datanodeToKill);
|
datanodeToKill);
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -46,7 +47,7 @@ public class TestDisableConnCache {
|
||||||
// Configure a new instance with no peer caching, ensure that it doesn't
|
// Configure a new instance with no peer caching, ensure that it doesn't
|
||||||
// cache anything
|
// cache anything
|
||||||
confWithoutCache.setInt(
|
confWithoutCache.setInt(
|
||||||
DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
|
HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
|
||||||
BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache);
|
BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache);
|
||||||
final Path testFile = new Path("/testConnCache.dat");
|
final Path testFile = new Path("/testConnCache.dat");
|
||||||
util.writeFile(testFile, FILE_SIZE / 1024);
|
util.writeFile(testFile, FILE_SIZE / 1024);
|
||||||
|
|
|
@ -572,7 +572,7 @@ public class TestDistributedFileSystem {
|
||||||
final Path dir = new Path("/filechecksum");
|
final Path dir = new Path("/filechecksum");
|
||||||
final int block_size = 1024;
|
final int block_size = 1024;
|
||||||
final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
||||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
|
conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
|
||||||
|
|
||||||
//try different number of blocks
|
//try different number of blocks
|
||||||
for(int n = 0; n < 5; n++) {
|
for(int n = 0; n < 5; n++) {
|
||||||
|
@ -1075,7 +1075,7 @@ public class TestDistributedFileSystem {
|
||||||
public void testDFSClientPeerReadTimeout() throws IOException {
|
public void testDFSClientPeerReadTimeout() throws IOException {
|
||||||
final int timeout = 1000;
|
final int timeout = 1000;
|
||||||
final Configuration conf = new HdfsConfiguration();
|
final Configuration conf = new HdfsConfiguration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
|
||||||
|
|
||||||
// only need cluster to create a dfs client to get a peer
|
// only need cluster to create a dfs client to get a peer
|
||||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
@ -1120,7 +1120,7 @@ public class TestDistributedFileSystem {
|
||||||
public void testDFSClientPeerWriteTimeout() throws IOException {
|
public void testDFSClientPeerWriteTimeout() throws IOException {
|
||||||
final int timeout = 1000;
|
final int timeout = 1000;
|
||||||
final Configuration conf = new HdfsConfiguration();
|
final Configuration conf = new HdfsConfiguration();
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout);
|
||||||
|
|
||||||
// only need cluster to create a dfs client to get a peer
|
// only need cluster to create a dfs client to get a peer
|
||||||
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
|
|
|
@ -28,13 +28,13 @@ import java.util.Arrays;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import org.apache.commons.logging.impl.Log4JLogger;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CreateFlag;
|
import org.apache.hadoop.fs.CreateFlag;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
|
||||||
|
@ -489,8 +489,8 @@ public class TestFileAppend2 {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000);
|
conf.setInt(HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50);
|
||||||
|
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
@ -84,7 +85,7 @@ public class TestFileAppend4 {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||||
1000);
|
1000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
|
||||||
// handle under-replicated blocks quickly (for replication asserts)
|
// handle under-replicated blocks quickly (for replication asserts)
|
||||||
conf.setInt(
|
conf.setInt(
|
||||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
|
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5);
|
||||||
|
@ -339,7 +340,7 @@ public class TestFileAppend4 {
|
||||||
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
|
||||||
1000);
|
1000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
|
||||||
|
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -20,10 +20,10 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
|
||||||
|
@ -66,6 +66,7 @@ import org.apache.hadoop.fs.InvalidPathException;
|
||||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
@ -220,7 +221,7 @@ public class TestFileCreation {
|
||||||
if (netIf != null) {
|
if (netIf != null) {
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
|
conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
|
||||||
}
|
}
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
|
||||||
if (useDnHostname) {
|
if (useDnHostname) {
|
||||||
// Since the mini cluster only listens on the loopback we have to
|
// Since the mini cluster only listens on the loopback we have to
|
||||||
// ensure the hostname used to access DNs maps to the loopback. We
|
// ensure the hostname used to access DNs maps to the loopback. We
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
@ -386,7 +387,7 @@ public class TestHFlush {
|
||||||
final int fileLen = 6;
|
final int fileLen = 6;
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
final int timeout = 2000;
|
final int timeout = 2000;
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY,
|
||||||
timeout);
|
timeout);
|
||||||
|
|
||||||
final Path p = new Path("/pipelineHeartbeat/foo");
|
final Path p = new Path("/pipelineHeartbeat/foo");
|
||||||
|
|
|
@ -28,7 +28,7 @@ public class TestParallelRead extends TestParallelReadUtil {
|
||||||
// off both short-circuit local reads and UNIX domain socket data traffic.
|
// off both short-circuit local reads and UNIX domain socket data traffic.
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
|
||||||
false);
|
false);
|
||||||
// dfs.domain.socket.path should be ignored because the previous two keys
|
// dfs.domain.socket.path should be ignored because the previous two keys
|
||||||
// were set to false. This is a regression test for HDFS-4473.
|
// were set to false. This is a regression test for HDFS-4473.
|
||||||
|
|
|
@ -29,8 +29,8 @@ public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil {
|
||||||
DFSInputStream.tcpReadsDisabledForTesting = true;
|
DFSInputStream.tcpReadsDisabledForTesting = true;
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "");
|
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, "");
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
||||||
false);
|
false);
|
||||||
|
|
|
@ -51,13 +51,13 @@ public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
|
||||||
false);
|
false);
|
||||||
conf.setBoolean(DFSConfigKeys.
|
conf.setBoolean(HdfsClientConfigKeys.
|
||||||
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
|
DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
|
||||||
// We want to test reading from stale sockets.
|
// We want to test reading from stale sockets.
|
||||||
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
|
conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
|
||||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
|
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
|
||||||
5 * 60 * 1000);
|
5 * 60 * 1000);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
|
||||||
// Avoid using the FileInputStreamCache.
|
// Avoid using the FileInputStreamCache.
|
||||||
conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
|
conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
|
||||||
0);
|
0);
|
||||||
|
|
|
@ -41,7 +41,7 @@ public class TestParallelUnixDomainRead extends TestParallelReadUtil {
|
||||||
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
|
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
|
||||||
new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
|
new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
|
||||||
DomainSocket.disableBindPathValidation();
|
DomainSocket.disableBindPathValidation();
|
||||||
setupCluster(1, conf);
|
setupCluster(1, conf);
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
@ -148,11 +149,11 @@ public class TestPipelines {
|
||||||
conf = new Configuration();
|
conf = new Configuration();
|
||||||
int customPerChecksumSize = 700;
|
int customPerChecksumSize = 700;
|
||||||
int customBlockSize = customPerChecksumSize * 3;
|
int customBlockSize = customPerChecksumSize * 3;
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100);
|
||||||
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
|
conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
|
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static void initLoggers() {
|
private static void initLoggers() {
|
||||||
|
|
|
@ -212,7 +212,7 @@ public class TestPread {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int numBlocks = 1;
|
int numBlocks = 1;
|
||||||
assertTrue(numBlocks <= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
assertTrue(numBlocks <= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
|
||||||
byte[] expected = new byte[numBlocks * blockSize];
|
byte[] expected = new byte[numBlocks * blockSize];
|
||||||
Random rand = new Random(seed);
|
Random rand = new Random(seed);
|
||||||
rand.nextBytes(expected);
|
rand.nextBytes(expected);
|
||||||
|
|
|
@ -21,6 +21,7 @@ import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -58,7 +59,7 @@ public class TestRead {
|
||||||
new ShortCircuitTestContext("testEOFWithBlockReaderLocal");
|
new ShortCircuitTestContext("testEOFWithBlockReaderLocal");
|
||||||
try {
|
try {
|
||||||
final Configuration conf = testContext.newConfiguration();
|
final Configuration conf = testContext.newConfiguration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
|
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
.format(true).build();
|
.format(true).build();
|
||||||
testEOF(cluster, 1);
|
testEOF(cluster, 1);
|
||||||
|
@ -73,7 +74,7 @@ public class TestRead {
|
||||||
@Test(timeout=60000)
|
@Test(timeout=60000)
|
||||||
public void testEOFWithRemoteBlockReader() throws Exception {
|
public void testEOFWithRemoteBlockReader() throws Exception {
|
||||||
final Configuration conf = new Configuration();
|
final Configuration conf = new Configuration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
|
conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
.format(true).build();
|
.format(true).build();
|
||||||
testEOF(cluster, 1);
|
testEOF(cluster, 1);
|
||||||
|
|
|
@ -17,11 +17,13 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hdfs;
|
package org.apache.hadoop.hdfs;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
|
|
||||||
public class TestRemoteBlockReader extends TestBlockReaderBase {
|
public class TestRemoteBlockReader extends TestBlockReaderBase {
|
||||||
|
|
||||||
HdfsConfiguration createConf() {
|
HdfsConfiguration createConf() {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
||||||
return conf;
|
return conf;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,13 +50,13 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
|
import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
import org.apache.hadoop.hdfs.server.balancer.TestBalancer;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
||||||
|
@ -169,8 +169,8 @@ public class TestBlockTokenWithDFS {
|
||||||
Peer peer = null;
|
Peer peer = null;
|
||||||
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||||
try {
|
try {
|
||||||
sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
|
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
|
||||||
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
|
||||||
peer = TcpPeerServer.peerFromSocket(sock);
|
peer = TcpPeerServer.peerFromSocket(sock);
|
||||||
} finally {
|
} finally {
|
||||||
if (peer == null) {
|
if (peer == null) {
|
||||||
|
|
|
@ -45,8 +45,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
||||||
import org.apache.hadoop.hdfs.client.BlockReportOptions;
|
import org.apache.hadoop.hdfs.client.BlockReportOptions;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
|
@ -55,7 +57,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
||||||
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -106,7 +107,7 @@ public class TestBlockReplacement {
|
||||||
final Random r = new Random();
|
final Random r = new Random();
|
||||||
|
|
||||||
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
|
||||||
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2);
|
CONF.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2);
|
||||||
CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500);
|
CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500);
|
||||||
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR)
|
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR)
|
||||||
.racks(INITIAL_RACKS).build();
|
.racks(INITIAL_RACKS).build();
|
||||||
|
@ -325,7 +326,7 @@ public class TestBlockReplacement {
|
||||||
Socket sock = new Socket();
|
Socket sock = new Socket();
|
||||||
try {
|
try {
|
||||||
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
|
sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
|
||||||
HdfsServerConstants.READ_TIMEOUT);
|
HdfsConstants.READ_TIMEOUT);
|
||||||
sock.setKeepAlive(true);
|
sock.setKeepAlive(true);
|
||||||
// sendRequest
|
// sendRequest
|
||||||
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
|
DataOutputStream out = new DataOutputStream(sock.getOutputStream());
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
|
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
@ -49,7 +50,7 @@ import org.junit.Test;
|
||||||
public class TestCachingStrategy {
|
public class TestCachingStrategy {
|
||||||
private static final Log LOG = LogFactory.getLog(TestCachingStrategy.class);
|
private static final Log LOG = LogFactory.getLog(TestCachingStrategy.class);
|
||||||
private static final int MAX_TEST_FILE_LEN = 1024 * 1024;
|
private static final int MAX_TEST_FILE_LEN = 1024 * 1024;
|
||||||
private static final int WRITE_PACKET_SIZE = DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
private static final int WRITE_PACKET_SIZE = HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
|
||||||
|
|
||||||
private final static TestRecordingCacheTracker tracker =
|
private final static TestRecordingCacheTracker tracker =
|
||||||
new TestRecordingCacheTracker();
|
new TestRecordingCacheTracker();
|
||||||
|
@ -259,8 +260,8 @@ public class TestCachingStrategy {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, false);
|
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, false);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, false);
|
conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, false);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true);
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
String TEST_PATH = "/test";
|
String TEST_PATH = "/test";
|
||||||
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
|
int TEST_PATH_LEN = MAX_TEST_FILE_LEN;
|
||||||
|
|
|
@ -54,11 +54,11 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
||||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
|
|
||||||
import org.apache.hadoop.hdfs.server.common.Storage;
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
|
@ -523,8 +523,8 @@ public class TestDataNodeVolumeFailure {
|
||||||
Peer peer = null;
|
Peer peer = null;
|
||||||
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
|
||||||
try {
|
try {
|
||||||
sock.connect(addr, HdfsServerConstants.READ_TIMEOUT);
|
sock.connect(addr, HdfsConstants.READ_TIMEOUT);
|
||||||
sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
|
sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
|
||||||
peer = TcpPeerServer.peerFromSocket(sock);
|
peer = TcpPeerServer.peerFromSocket(sock);
|
||||||
} finally {
|
} finally {
|
||||||
if (peer == null) {
|
if (peer == null) {
|
||||||
|
|
|
@ -264,11 +264,12 @@ public abstract class LazyPersistTestCase {
|
||||||
if (useSCR) {
|
if (useSCR) {
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||||
// Do not share a client context across tests.
|
// Do not share a client context across tests.
|
||||||
conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
|
||||||
conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||||
UserGroupInformation.getCurrentUser().getShortUserName());
|
UserGroupInformation.getCurrentUser().getShortUserName());
|
||||||
if (useLegacyBlockReaderLocal) {
|
if (useLegacyBlockReaderLocal) {
|
||||||
conf.setBoolean(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
conf.setBoolean(
|
||||||
|
HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||||
} else {
|
} else {
|
||||||
sockDir = new TemporarySocketDirectory();
|
sockDir = new TemporarySocketDirectory();
|
||||||
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
|
conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||||
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
@ -53,7 +54,7 @@ public class TestDatanodeRestart {
|
||||||
// bring up a cluster of 3
|
// bring up a cluster of 3
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
@ -77,7 +78,7 @@ public class TestDatanodeRestart {
|
||||||
public void testRbwReplicas() throws IOException {
|
public void testRbwReplicas() throws IOException {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
try {
|
try {
|
||||||
|
@ -146,7 +147,7 @@ public class TestDatanodeRestart {
|
||||||
@Test public void testRecoverReplicas() throws Exception {
|
@Test public void testRecoverReplicas() throws Exception {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
|
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
|
||||||
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
|
conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -18,8 +18,8 @@
|
||||||
package org.apache.hadoop.hdfs.shortcircuit;
|
package org.apache.hadoop.hdfs.shortcircuit;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC;
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
|
||||||
import static org.hamcrest.CoreMatchers.equalTo;
|
import static org.hamcrest.CoreMatchers.equalTo;
|
||||||
|
|
||||||
|
@ -71,6 +71,7 @@ import org.junit.Assert;
|
||||||
import org.junit.Assume;
|
import org.junit.Assume;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
import org.mockito.internal.util.reflection.Whitebox;
|
||||||
import org.mockito.invocation.InvocationOnMock;
|
import org.mockito.invocation.InvocationOnMock;
|
||||||
import org.mockito.stubbing.Answer;
|
import org.mockito.stubbing.Answer;
|
||||||
|
|
||||||
|
@ -671,8 +672,8 @@ public class TestShortCircuitCache {
|
||||||
|
|
||||||
// The second read should fail, and we should only have 1 segment and 1 slot
|
// The second read should fail, and we should only have 1 segment and 1 slot
|
||||||
// left.
|
// left.
|
||||||
fs.getClient().getConf().getShortCircuitConf().brfFailureInjector =
|
BlockReaderFactory.setFailureInjectorForTesting(
|
||||||
new TestCleanupFailureInjector();
|
new TestCleanupFailureInjector());
|
||||||
try {
|
try {
|
||||||
DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
|
DFSTestUtil.readFileBuffer(fs, TEST_PATH2);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
|
@ -766,8 +767,8 @@ public class TestShortCircuitCache {
|
||||||
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
DistributedFileSystem fs = cluster.getFileSystem();
|
DistributedFileSystem fs = cluster.getFileSystem();
|
||||||
fs.getClient().getConf().getShortCircuitConf().brfFailureInjector =
|
BlockReaderFactory.setFailureInjectorForTesting(
|
||||||
new TestPreReceiptVerificationFailureInjector();
|
new TestPreReceiptVerificationFailureInjector());
|
||||||
final Path TEST_PATH1 = new Path("/test_file1");
|
final Path TEST_PATH1 = new Path("/test_file1");
|
||||||
DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
|
DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2);
|
||||||
final Path TEST_PATH2 = new Path("/test_file2");
|
final Path TEST_PATH2 = new Path("/test_file2");
|
||||||
|
|
|
@ -253,7 +253,7 @@ public class TestShortCircuitLocalRead {
|
||||||
ignoreChecksum);
|
ignoreChecksum);
|
||||||
// Set a random client context name so that we don't share a cache with
|
// Set a random client context name so that we don't share a cache with
|
||||||
// other invocations of this function.
|
// other invocations of this function.
|
||||||
conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT,
|
conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT,
|
||||||
UUID.randomUUID().toString());
|
UUID.randomUUID().toString());
|
||||||
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
|
conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
|
||||||
new File(sockDir.getDir(),
|
new File(sockDir.getDir(),
|
||||||
|
@ -261,7 +261,7 @@ public class TestShortCircuitLocalRead {
|
||||||
if (shortCircuitUser != null) {
|
if (shortCircuitUser != null) {
|
||||||
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
|
||||||
shortCircuitUser);
|
shortCircuitUser);
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
|
||||||
}
|
}
|
||||||
if (simulatedStorage) {
|
if (simulatedStorage) {
|
||||||
SimulatedFSDataset.setFactory(conf);
|
SimulatedFSDataset.setFactory(conf);
|
||||||
|
@ -592,7 +592,7 @@ public class TestShortCircuitLocalRead {
|
||||||
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
|
public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser,
|
||||||
int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
|
int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true);
|
||||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||||
|
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
|
||||||
|
|
Loading…
Reference in New Issue