From 535b6db74c1d39eab1fabf7c036c5e08689dbed6 Mon Sep 17 00:00:00 2001 From: Haohui Mai Date: Wed, 19 Aug 2015 11:28:05 -0700 Subject: [PATCH] HDFS-8803. Move DfsClientConf to hdfs-client. Contributed by Mingliang Liu. --- .../hdfs/client/HdfsClientConfigKeys.java | 73 ++++- .../hdfs/client/impl/DfsClientConf.java | 302 ++++++++++-------- .../hadoop/hdfs/client/impl/package-info.java | 18 ++ .../hadoop/hdfs/protocol/HdfsConstants.java | 7 + .../hadoop/hdfs/util/ByteArrayManager.java | 118 +++---- .../apache/hadoop/hdfs/util/package-info.java | 18 ++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/BlockReaderFactory.java | 8 +- .../org/apache/hadoop/hdfs/ClientContext.java | 5 +- .../org/apache/hadoop/hdfs/DFSClient.java | 14 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 179 ++++++++--- .../apache/hadoop/hdfs/DFSOutputStream.java | 3 +- .../apache/hadoop/hdfs/HdfsConfiguration.java | 8 +- .../hdfs/server/balancer/Dispatcher.java | 4 +- .../server/common/HdfsServerConstants.java | 6 - .../hadoop/hdfs/server/datanode/DNConf.java | 17 +- .../hadoop/hdfs/server/datanode/DataNode.java | 11 +- .../hdfs/server/datanode/DataXceiver.java | 5 +- .../datanode/SecureDataNodeStarter.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 8 +- .../hdfs/server/namenode/NamenodeFsck.java | 8 +- .../shortcircuit/DomainSocketFactory.java | 4 +- .../apache/hadoop/hdfs/TestFiPipelines.java | 9 +- .../datanode/TestFiDataTransferProtocol.java | 3 +- .../datanode/TestFiDataTransferProtocol2.java | 5 +- .../fs/TestEnhancedByteBufferAccess.java | 10 +- .../org/apache/hadoop/fs/TestUnbuffer.java | 7 +- .../hadoop/hdfs/BlockReaderTestUtil.java | 6 +- .../apache/hadoop/hdfs/FileAppendTest4.java | 5 +- .../hadoop/hdfs/TestBlockReaderFactory.java | 4 +- .../hadoop/hdfs/TestBlockReaderLocal.java | 4 +- .../hdfs/TestBlockReaderLocalLegacy.java | 6 +- ...TestClientProtocolForPipelineRecovery.java | 4 +- .../org/apache/hadoop/hdfs/TestConnCache.java | 5 +- .../hadoop/hdfs/TestDFSClientRetries.java | 15 +- .../hadoop/hdfs/TestDFSInputStream.java | 2 +- .../hdfs/TestDataTransferKeepalive.java | 8 +- .../hadoop/hdfs/TestDataTransferProtocol.java | 6 +- .../apache/hadoop/hdfs/TestDatanodeDeath.java | 5 +- .../hadoop/hdfs/TestDisableConnCache.java | 3 +- .../hdfs/TestDistributedFileSystem.java | 6 +- .../apache/hadoop/hdfs/TestFileAppend2.java | 6 +- .../apache/hadoop/hdfs/TestFileAppend4.java | 5 +- .../apache/hadoop/hdfs/TestFileCreation.java | 11 +- .../org/apache/hadoop/hdfs/TestHFlush.java | 3 +- .../apache/hadoop/hdfs/TestParallelRead.java | 2 +- .../TestParallelShortCircuitLegacyRead.java | 4 +- .../TestParallelShortCircuitReadUnCached.java | 6 +- .../hdfs/TestParallelUnixDomainRead.java | 2 +- .../org/apache/hadoop/hdfs/TestPipelines.java | 9 +- .../org/apache/hadoop/hdfs/TestPread.java | 2 +- .../java/org/apache/hadoop/hdfs/TestRead.java | 5 +- .../hadoop/hdfs/TestRemoteBlockReader.java | 4 +- .../TestBlockTokenWithDFS.java | 6 +- .../server/datanode/TestBlockReplacement.java | 7 +- .../server/datanode/TestCachingStrategy.java | 7 +- .../datanode/TestDataNodeVolumeFailure.java | 6 +- .../fsdataset/impl/LazyPersistTestCase.java | 5 +- .../fsdataset/impl/TestDatanodeRestart.java | 7 +- .../shortcircuit/TestShortCircuitCache.java | 13 +- .../TestShortCircuitLocalRead.java | 6 +- 61 files changed, 649 insertions(+), 402 deletions(-) rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java (64%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java rename hadoop-hdfs-project/{hadoop-hdfs => hadoop-hdfs-client}/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java (85%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 9146057da9b..557ec5a64ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -17,7 +17,12 @@ */ package org.apache.hadoop.hdfs.client; +import org.apache.hadoop.classification.InterfaceAudience; + +import java.util.concurrent.TimeUnit; + /** Client configuration properties */ +@InterfaceAudience.Private public interface HdfsClientConfigKeys { long SECOND = 1000L; long MINUTE = 60 * SECOND; @@ -31,7 +36,7 @@ public interface HdfsClientConfigKeys { String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$"; - static final String PREFIX = "dfs.client."; + String PREFIX = "dfs.client."; String DFS_NAMESERVICES = "dfs.nameservices"; int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; @@ -45,6 +50,72 @@ public interface HdfsClientConfigKeys { int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020; String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = "dfs.namenode.kerberos.principal"; + String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size"; + int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024; + String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout"; + String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = + "dfs.client.socketcache.capacity"; + int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16; + String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = + "dfs.client.socketcache.expiryMsec"; + long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000; + String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname"; + boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false; + String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = + "dfs.client.cache.drop.behind.writes"; + String DFS_CLIENT_CACHE_DROP_BEHIND_READS = + "dfs.client.cache.drop.behind.reads"; + String DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead"; + String DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry"; + int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3; + String DFS_CLIENT_CONTEXT = "dfs.client.context"; + String DFS_CLIENT_CONTEXT_DEFAULT = "default"; + String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = + "dfs.client.file-block-storage-locations.num-threads"; + int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10; + String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = + "dfs.client.file-block-storage-locations.timeout.millis"; + int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000; + String DFS_CLIENT_USE_LEGACY_BLOCKREADER = + "dfs.client.use.legacy.blockreader"; + boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false; + String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = + "dfs.client.use.legacy.blockreader.local"; + boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false; + String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = + "dfs.client.datanode-restart.timeout"; + long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30; + // Much code in hdfs is not yet updated to use these keys. + // the initial delay (unit is ms) for locateFollowingBlock, the delay time + // will increase exponentially(double) for each retry. + String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = + "dfs.client.max.block.acquire.failures"; + int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3; + String DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type"; + String DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C"; + String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum"; + int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512; + String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = + "dfs.datanode.socket.write.timeout"; + String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = + "dfs.client.domain.socket.data.traffic"; + boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false; + String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path"; + String DFS_DOMAIN_SOCKET_PATH_DEFAULT = ""; + String DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS = + "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms"; + int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = + 60000; + String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = + "dfs.client.slow.io.warning.threshold.ms"; + long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000; + String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS = + "dfs.client.key.provider.cache.expiry"; + long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT = + TimeUnit.DAYS.toMillis(10); // 10 days + String DFS_HDFS_BLOCKS_METADATA_ENABLED = + "dfs.datanode.hdfs-blocks-metadata.enabled"; + boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false; /** dfs.client.retry configuration properties */ interface Retry { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java similarity index 64% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java index a257e32bed0..d67af0eef3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java @@ -17,49 +17,73 @@ */ package org.apache.hadoop.hdfs.client.impl; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.BlockReaderFactory; -import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.util.ByteArrayManager; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.util.DataChecksum; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Failover; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.HedgedRead; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Mmap; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Read; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Retry; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.ShortCircuit; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.Write; /** - * DFSClient configuration + * DFSClient configuration. */ public class DfsClientConf { + private static final Logger LOG = LoggerFactory.getLogger(DfsClientConf + .class); private final int hdfsTimeout; // timeout value for a DFS operation. @@ -76,7 +100,7 @@ public class DfsClientConf { private final ByteArrayManager.Conf writeByteArrayManagerConf; private final int socketTimeout; private final long excludedNodesCacheExpiry; - /** Wait time window (in msec) if BlockMissingException is caught */ + /** Wait time window (in msec) if BlockMissingException is caught. */ private final int timeWindow; private final int numCachedConnRetry; private final int numBlockWriteRetry; @@ -97,137 +121,138 @@ public class DfsClientConf { private final long slowIoWarningThresholdMs; private final ShortCircuitConf shortCircuitConf; - + private final long hedgedReadThresholdMillis; private final int hedgedReadThreadpoolSize; public DfsClientConf(Configuration conf) { - // The hdfsTimeout is currently the same as the ipc timeout + // The hdfsTimeout is currently the same as the ipc timeout hdfsTimeout = Client.getTimeout(conf); maxRetryAttempts = conf.getInt( - HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY, - HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT); + Retry.MAX_ATTEMPTS_KEY, + Retry.MAX_ATTEMPTS_DEFAULT); timeWindow = conf.getInt( - HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, - HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT); + Retry.WINDOW_BASE_KEY, + Retry.WINDOW_BASE_DEFAULT); retryTimesForGetLastBlockLength = conf.getInt( - HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY, - HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT); + Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY, + Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT); retryIntervalForGetLastBlockLength = conf.getInt( - HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY, - HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT); + Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY, + Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT); maxFailoverAttempts = conf.getInt( - HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_KEY, - HdfsClientConfigKeys.Failover.MAX_ATTEMPTS_DEFAULT); + Failover.MAX_ATTEMPTS_KEY, + Failover.MAX_ATTEMPTS_DEFAULT); failoverSleepBaseMillis = conf.getInt( - HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_KEY, - HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT); + Failover.SLEEPTIME_BASE_KEY, + Failover.SLEEPTIME_BASE_DEFAULT); failoverSleepMaxMillis = conf.getInt( - HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_KEY, - HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT); + Failover.SLEEPTIME_MAX_KEY, + Failover.SLEEPTIME_MAX_DEFAULT); maxBlockAcquireFailures = conf.getInt( DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT); - datanodeSocketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsServerConstants.WRITE_TIMEOUT); + datanodeSocketWriteTimeout = conf.getInt( + DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, + HdfsConstants.WRITE_TIMEOUT); ioBufferSize = conf.getInt( CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); defaultChecksumOpt = getChecksumOptFromConf(conf); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsServerConstants.READ_TIMEOUT); + HdfsConstants.READ_TIMEOUT); /** dfs.write.packet.size is an internal config variable */ writePacketSize = conf.getInt( - DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, - DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); + DFS_CLIENT_WRITE_PACKET_SIZE_KEY, + DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); writeMaxPackets = conf.getInt( - HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_KEY, - HdfsClientConfigKeys.Write.MAX_PACKETS_IN_FLIGHT_DEFAULT); - + Write.MAX_PACKETS_IN_FLIGHT_KEY, + Write.MAX_PACKETS_IN_FLIGHT_DEFAULT); + final boolean byteArrayManagerEnabled = conf.getBoolean( - HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_KEY, - HdfsClientConfigKeys.Write.ByteArrayManager.ENABLED_DEFAULT); + Write.ByteArrayManager.ENABLED_KEY, + Write.ByteArrayManager.ENABLED_DEFAULT); if (!byteArrayManagerEnabled) { writeByteArrayManagerConf = null; } else { final int countThreshold = conf.getInt( - HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_KEY, - HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT); + Write.ByteArrayManager.COUNT_THRESHOLD_KEY, + Write.ByteArrayManager.COUNT_THRESHOLD_DEFAULT); final int countLimit = conf.getInt( - HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_KEY, - HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_LIMIT_DEFAULT); + Write.ByteArrayManager.COUNT_LIMIT_KEY, + Write.ByteArrayManager.COUNT_LIMIT_DEFAULT); final long countResetTimePeriodMs = conf.getLong( - HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY, - HdfsClientConfigKeys.Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT); + Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_KEY, + Write.ByteArrayManager.COUNT_RESET_TIME_PERIOD_MS_DEFAULT); writeByteArrayManagerConf = new ByteArrayManager.Conf( - countThreshold, countLimit, countResetTimePeriodMs); + countThreshold, countLimit, countResetTimePeriodMs); } - + defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT); defaultReplication = (short) conf.getInt( DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT); taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE"); excludedNodesCacheExpiry = conf.getLong( - HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY, - HdfsClientConfigKeys.Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT); - prefetchSize = conf.getLong(HdfsClientConfigKeys.Read.PREFETCH_SIZE_KEY, + Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_KEY, + Write.EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT); + prefetchSize = conf.getLong(Read.PREFETCH_SIZE_KEY, 10 * defaultBlockSize); numCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY, DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT); numBlockWriteRetry = conf.getInt( - HdfsClientConfigKeys.BlockWrite.RETRIES_KEY, - HdfsClientConfigKeys.BlockWrite.RETRIES_DEFAULT); + BlockWrite.RETRIES_KEY, + BlockWrite.RETRIES_DEFAULT); numBlockWriteLocateFollowingRetry = conf.getInt( - HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, - HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT); + BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, + BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT); blockWriteLocateFollowingInitialDelayMs = conf.getInt( - HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY, - HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT); + BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_KEY, + BlockWrite.LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_MS_DEFAULT); uMask = FsPermission.getUMask(conf); connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT); hdfsBlocksMetadataEnabled = conf.getBoolean( - DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, - DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT); + HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, + HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT); fileBlockStorageLocationsNumThreads = conf.getInt( - DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS, - DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT); + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS, + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT); fileBlockStorageLocationsTimeoutMs = conf.getInt( - DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS, - DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT); + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS, + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT); datanodeRestartTimeout = conf.getLong( DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT) * 1000; slowIoWarningThresholdMs = conf.getLong( - DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, - DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); - + DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, + DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); + shortCircuitConf = new ShortCircuitConf(conf); hedgedReadThresholdMillis = conf.getLong( - HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_KEY, - HdfsClientConfigKeys.HedgedRead.THRESHOLD_MILLIS_DEFAULT); + HedgedRead.THRESHOLD_MILLIS_KEY, + HedgedRead.THRESHOLD_MILLIS_DEFAULT); hedgedReadThreadpoolSize = conf.getInt( - HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY, - HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT); + HedgedRead.THREADPOOL_SIZE_KEY, + HedgedRead.THREADPOOL_SIZE_DEFAULT); } private DataChecksum.Type getChecksumType(Configuration conf) { final String checksum = conf.get( - DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, - DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); + DFS_CHECKSUM_TYPE_KEY, + DFS_CHECKSUM_TYPE_DEFAULT); try { return DataChecksum.Type.valueOf(checksum); } catch(IllegalArgumentException iae) { - DFSClient.LOG.warn("Bad checksum type: " + checksum + ". Using default " - + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); + LOG.warn("Bad checksum type: {}. Using default {}", checksum, + DFS_CHECKSUM_TYPE_DEFAULT); return DataChecksum.Type.valueOf( - DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); + DFS_CHECKSUM_TYPE_DEFAULT); } } @@ -498,8 +523,11 @@ public class DfsClientConf { return shortCircuitConf; } + /** + * Configuration for short-circuit reads. + */ public static class ShortCircuitConf { - private static final Log LOG = LogFactory.getLog(ShortCircuitConf.class); + private static final Logger LOG = DfsClientConf.LOG; private final int socketCacheCapacity; private final long socketCacheExpiry; @@ -513,9 +541,9 @@ public class DfsClientConf { private final boolean shortCircuitLocalReads; private final boolean domainSocketDataTraffic; private final int shortCircuitStreamsCacheSize; - private final long shortCircuitStreamsCacheExpiryMs; + private final long shortCircuitStreamsCacheExpiryMs; private final int shortCircuitSharedMemoryWatcherInterruptCheckMs; - + private final boolean shortCircuitMmapEnabled; private final int shortCircuitMmapCacheSize; private final long shortCircuitMmapCacheExpiryMs; @@ -524,10 +552,6 @@ public class DfsClientConf { private final long keyProviderCacheExpiryMs; - @VisibleForTesting - public BlockReaderFactory.FailureInjector brfFailureInjector = - new BlockReaderFactory.FailureInjector(); - public ShortCircuitConf(Configuration conf) { socketCacheCapacity = conf.getInt( DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, @@ -537,66 +561,64 @@ public class DfsClientConf { DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT); useLegacyBlockReader = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, - DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT); + DFS_CLIENT_USE_LEGACY_BLOCKREADER, + DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT); useLegacyBlockReaderLocal = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, - DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT); + DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, + DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT); shortCircuitLocalReads = conf.getBoolean( - HdfsClientConfigKeys.Read.ShortCircuit.KEY, - HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT); + Read.ShortCircuit.KEY, + Read.ShortCircuit.DEFAULT); domainSocketDataTraffic = conf.getBoolean( - DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, - DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT); + DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, + DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT); domainSocketPath = conf.getTrimmed( - DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, - DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT); + DFS_DOMAIN_SOCKET_PATH_KEY, + DFS_DOMAIN_SOCKET_PATH_DEFAULT); - if (LOG.isDebugEnabled()) { - LOG.debug(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL - + " = " + useLegacyBlockReaderLocal); - LOG.debug(HdfsClientConfigKeys.Read.ShortCircuit.KEY - + " = " + shortCircuitLocalReads); - LOG.debug(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC - + " = " + domainSocketDataTraffic); - LOG.debug(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY - + " = " + domainSocketPath); - } + LOG.debug(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL + + " = {}", useLegacyBlockReaderLocal); + LOG.debug(Read.ShortCircuit.KEY + + " = {}", shortCircuitLocalReads); + LOG.debug(DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC + + " = {}", domainSocketDataTraffic); + LOG.debug(DFS_DOMAIN_SOCKET_PATH_KEY + + " = {}", domainSocketPath); skipShortCircuitChecksums = conf.getBoolean( - HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, - HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT); + Read.ShortCircuit.SKIP_CHECKSUM_KEY, + Read.ShortCircuit.SKIP_CHECKSUM_DEFAULT); shortCircuitBufferSize = conf.getInt( - HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_KEY, - HdfsClientConfigKeys.Read.ShortCircuit.BUFFER_SIZE_DEFAULT); + Read.ShortCircuit.BUFFER_SIZE_KEY, + Read.ShortCircuit.BUFFER_SIZE_DEFAULT); shortCircuitStreamsCacheSize = conf.getInt( - HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY, - HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT); + Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY, + Read.ShortCircuit.STREAMS_CACHE_SIZE_DEFAULT); shortCircuitStreamsCacheExpiryMs = conf.getLong( - HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY, - HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT); + Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_KEY, + Read.ShortCircuit.STREAMS_CACHE_EXPIRY_MS_DEFAULT); shortCircuitMmapEnabled = conf.getBoolean( - HdfsClientConfigKeys.Mmap.ENABLED_KEY, - HdfsClientConfigKeys.Mmap.ENABLED_DEFAULT); + Mmap.ENABLED_KEY, + Mmap.ENABLED_DEFAULT); shortCircuitMmapCacheSize = conf.getInt( - HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, - HdfsClientConfigKeys.Mmap.CACHE_SIZE_DEFAULT); + Mmap.CACHE_SIZE_KEY, + Mmap.CACHE_SIZE_DEFAULT); shortCircuitMmapCacheExpiryMs = conf.getLong( - HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_KEY, - HdfsClientConfigKeys.Mmap.CACHE_TIMEOUT_MS_DEFAULT); + Mmap.CACHE_TIMEOUT_MS_KEY, + Mmap.CACHE_TIMEOUT_MS_DEFAULT); shortCircuitMmapCacheRetryTimeout = conf.getLong( - HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_KEY, - HdfsClientConfigKeys.Mmap.RETRY_TIMEOUT_MS_DEFAULT); + Mmap.RETRY_TIMEOUT_MS_KEY, + Mmap.RETRY_TIMEOUT_MS_DEFAULT); shortCircuitCacheStaleThresholdMs = conf.getLong( - HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY, - HdfsClientConfigKeys.ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT); + ShortCircuit.REPLICA_STALE_THRESHOLD_MS_KEY, + ShortCircuit.REPLICA_STALE_THRESHOLD_MS_DEFAULT); shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt( - DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, - DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT); + DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS, + DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT); keyProviderCacheExpiryMs = conf.getLong( - DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS, - DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT); + DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS, + DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java new file mode 100644 index 00000000000..44a8b452b27 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.client.impl; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index d4fb8608ad2..db1bf0170b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -78,6 +78,13 @@ public class HdfsConstants { public static final String CLIENT_NAMENODE_PROTOCOL_NAME = "org.apache.hadoop.hdfs.protocol.ClientProtocol"; + // Timeouts for communicating with DataNode for streaming writes/reads + public static final int READ_TIMEOUT = 60 * 1000; + public static final int READ_TIMEOUT_EXTENSION = 5 * 1000; + public static final int WRITE_TIMEOUT = 8 * 60 * 1000; + //for write pipeline + public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; + // SafeMode actions public enum SafeModeAction { SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java similarity index 85% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java rename to hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java index ea5e39d0ea5..a9adb7e2f71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java @@ -22,29 +22,30 @@ import java.util.LinkedList; import java.util.Map; import java.util.Queue; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.Time; import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Manage byte array creation and release. + * Manage byte array creation and release. */ @InterfaceAudience.Private public abstract class ByteArrayManager { - static final Log LOG = LogFactory.getLog(ByteArrayManager.class); - private static final ThreadLocal debugMessage = new ThreadLocal() { + static final Logger LOG = LoggerFactory.getLogger(ByteArrayManager.class); + private static final ThreadLocal DEBUG_MESSAGE = + new ThreadLocal() { protected StringBuilder initialValue() { return new StringBuilder(); } }; private static void logDebugMessage() { - final StringBuilder b = debugMessage.get(); - LOG.debug(b); + final StringBuilder b = DEBUG_MESSAGE.get(); + LOG.debug(b.toString()); b.setLength(0); } @@ -97,7 +98,7 @@ public abstract class ByteArrayManager { /** * Increment the counter, and reset it if there is no increment - * for acertain time period. + * for a certain time period. * * @return the new count. */ @@ -112,10 +113,10 @@ public abstract class ByteArrayManager { } /** A map from integers to counters. */ - static class CounterMap { + static final class CounterMap { /** @see ByteArrayManager.Conf#countResetTimePeriodMs */ private final long countResetTimePeriodMs; - private final Map map = new HashMap(); + private final Map map = new HashMap<>(); private CounterMap(long countResetTimePeriodMs) { this.countResetTimePeriodMs = countResetTimePeriodMs; @@ -125,7 +126,8 @@ public abstract class ByteArrayManager { * @return the counter for the given key; * and create a new counter if it does not exist. */ - synchronized Counter get(final Integer key, final boolean createIfNotExist) { + synchronized Counter get(final Integer key, final boolean + createIfNotExist) { Counter count = map.get(key); if (count == null && createIfNotExist) { count = new Counter(countResetTimePeriodMs); @@ -133,17 +135,13 @@ public abstract class ByteArrayManager { } return count; } - - synchronized void clear() { - map.clear(); - } } /** Manage byte arrays with the same fixed length. */ static class FixedLengthManager { private final int byteArrayLength; private final int maxAllocated; - private final Queue freeQueue = new LinkedList(); + private final Queue freeQueue = new LinkedList<>(); private int numAllocated = 0; @@ -157,31 +155,31 @@ public abstract class ByteArrayManager { * * If the number of allocated arrays >= maximum, the current thread is * blocked until the number of allocated arrays drops to below the maximum. - * + * * The byte array allocated by this method must be returned for recycling * via the {@link FixedLengthManager#recycle(byte[])} method. */ synchronized byte[] allocate() throws InterruptedException { if (LOG.isDebugEnabled()) { - debugMessage.get().append(", ").append(this); + DEBUG_MESSAGE.get().append(", ").append(this); } for(; numAllocated >= maxAllocated;) { if (LOG.isDebugEnabled()) { - debugMessage.get().append(": wait ..."); + DEBUG_MESSAGE.get().append(": wait ..."); logDebugMessage(); } wait(); if (LOG.isDebugEnabled()) { - debugMessage.get().append("wake up: ").append(this); + DEBUG_MESSAGE.get().append("wake up: ").append(this); } } numAllocated++; final byte[] array = freeQueue.poll(); if (LOG.isDebugEnabled()) { - debugMessage.get().append(", recycled? ").append(array != null); + DEBUG_MESSAGE.get().append(", recycled? ").append(array != null); } return array != null? array : new byte[byteArrayLength]; } @@ -197,7 +195,7 @@ public abstract class ByteArrayManager { Preconditions.checkNotNull(array); Preconditions.checkArgument(array.length == byteArrayLength); if (LOG.isDebugEnabled()) { - debugMessage.get().append(", ").append(this); + DEBUG_MESSAGE.get().append(", ").append(this); } notify(); @@ -210,7 +208,7 @@ public abstract class ByteArrayManager { if (freeQueue.size() < maxAllocated - numAllocated) { if (LOG.isDebugEnabled()) { - debugMessage.get().append(", freeQueue.offer"); + DEBUG_MESSAGE.get().append(", freeQueue.offer"); } freeQueue.offer(array); } @@ -227,7 +225,7 @@ public abstract class ByteArrayManager { /** A map from array lengths to byte array managers. */ static class ManagerMap { private final int countLimit; - private final Map map = new HashMap(); + private final Map map = new HashMap<>(); ManagerMap(int countLimit) { this.countLimit = countLimit; @@ -243,12 +241,11 @@ public abstract class ByteArrayManager { } return manager; } - - synchronized void clear() { - map.clear(); - } } + /** + * Configuration for ByteArrayManager. + */ public static class Conf { /** * The count threshold for each array length so that a manager is created @@ -265,7 +262,8 @@ public abstract class ByteArrayManager { */ private final long countResetTimePeriodMs; - public Conf(int countThreshold, int countLimit, long countResetTimePeriodMs) { + public Conf(int countThreshold, int countLimit, long + countResetTimePeriodMs) { this.countThreshold = countThreshold; this.countLimit = countLimit; this.countResetTimePeriodMs = countResetTimePeriodMs; @@ -277,20 +275,20 @@ public abstract class ByteArrayManager { * the returned array is larger than or equal to the given length. * * The current thread may be blocked if some resource is unavailable. - * + * * The byte array created by this method must be released * via the {@link ByteArrayManager#release(byte[])} method. * * @return a byte array with length larger than or equal to the given length. */ public abstract byte[] newByteArray(int size) throws InterruptedException; - + /** * Release the given byte array. - * + * * The byte array may or may not be created * by the {@link ByteArrayManager#newByteArray(int)} method. - * + * * @return the number of free array. */ public abstract int release(byte[] array); @@ -307,7 +305,7 @@ public abstract class ByteArrayManager { public byte[] newByteArray(int size) throws InterruptedException { return new byte[size]; } - + @Override public int release(byte[] array) { return 0; @@ -320,38 +318,41 @@ public abstract class ByteArrayManager { */ static class Impl extends ByteArrayManager { private final Conf conf; - + private final CounterMap counters; private final ManagerMap managers; - + Impl(Conf conf) { this.conf = conf; this.counters = new CounterMap(conf.countResetTimePeriodMs); this.managers = new ManagerMap(conf.countLimit); } - + /** * Allocate a byte array, where the length of the allocated array * is the least power of two of the given length * unless the given length is less than {@link #MIN_ARRAY_LENGTH}. - * In such case, the returned array length is equal to {@link #MIN_ARRAY_LENGTH}. + * In such case, the returned array length is equal to {@link + * #MIN_ARRAY_LENGTH}. * * If the number of allocated arrays exceeds the capacity, * the current thread is blocked until * the number of allocated arrays drops to below the capacity. - * + * * The byte array allocated by this method must be returned for recycling * via the {@link Impl#release(byte[])} method. * - * @return a byte array with length larger than or equal to the given length. + * @return a byte array with length larger than or equal to the given + * length. */ @Override - public byte[] newByteArray(final int arrayLength) throws InterruptedException { + public byte[] newByteArray(final int arrayLength) + throws InterruptedException { Preconditions.checkArgument(arrayLength >= 0); if (LOG.isDebugEnabled()) { - debugMessage.get().append("allocate(").append(arrayLength).append(")"); + DEBUG_MESSAGE.get().append("allocate(").append(arrayLength).append(")"); } - + final byte[] array; if (arrayLength == 0) { array = EMPTY_BYTE_ARRAY; @@ -361,37 +362,40 @@ public abstract class ByteArrayManager { final long count = counters.get(powerOfTwo, true).increment(); final boolean aboveThreshold = count > conf.countThreshold; // create a new manager only if the count is above threshold. - final FixedLengthManager manager = managers.get(powerOfTwo, aboveThreshold); - + final FixedLengthManager manager = + managers.get(powerOfTwo, aboveThreshold); + if (LOG.isDebugEnabled()) { - debugMessage.get().append(": count=").append(count) + DEBUG_MESSAGE.get().append(": count=").append(count) .append(aboveThreshold? ", aboveThreshold": ", belowThreshold"); } array = manager != null? manager.allocate(): new byte[powerOfTwo]; } - + if (LOG.isDebugEnabled()) { - debugMessage.get().append(", return byte[").append(array.length).append("]"); + DEBUG_MESSAGE.get().append(", return byte[") + .append(array.length).append("]"); logDebugMessage(); } return array; } - + /** * Recycle the given byte array. - * + * * The byte array may or may not be allocated * by the {@link Impl#newByteArray(int)} method. - * + * * This is a non-blocking call. */ @Override public int release(final byte[] array) { Preconditions.checkNotNull(array); if (LOG.isDebugEnabled()) { - debugMessage.get().append("recycle: array.length=").append(array.length); + DEBUG_MESSAGE.get() + .append("recycle: array.length=").append(array.length); } - + final int freeQueueSize; if (array.length == 0) { freeQueueSize = -1; @@ -399,18 +403,18 @@ public abstract class ByteArrayManager { final FixedLengthManager manager = managers.get(array.length, false); freeQueueSize = manager == null? -1: manager.recycle(array); } - + if (LOG.isDebugEnabled()) { - debugMessage.get().append(", freeQueueSize=").append(freeQueueSize); + DEBUG_MESSAGE.get().append(", freeQueueSize=").append(freeQueueSize); logDebugMessage(); } return freeQueueSize; } - + CounterMap getCounters() { return counters; } - + ManagerMap getManagers() { return managers; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java new file mode 100644 index 00000000000..5ba3de012c3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/package-info.java @@ -0,0 +1,18 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0d54014a387..47cf4091c6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -469,6 +469,8 @@ Release 2.8.0 - UNRELEASED HDFS-8911. NameNode Metric : Add Editlog counters as a JMX metric. (Anu Engineer via Arpit Agarwal) + HDFS-8803. Move DfsClientConf to hdfs-client. (Mingliang Liu via wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java index 714cd68817c..96044e9e1ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java @@ -91,7 +91,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { /** * Injects failures into specific operations during unit tests. */ - private final FailureInjector failureInjector; + private static FailureInjector failureInjector = new FailureInjector(); /** * The file name, for logging and debugging purposes. @@ -187,7 +187,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { public BlockReaderFactory(DfsClientConf conf) { this.conf = conf; - this.failureInjector = conf.getShortCircuitConf().brfFailureInjector; this.remainingCacheTries = conf.getNumCachedConnRetry(); } @@ -278,6 +277,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator { return this; } + @VisibleForTesting + public static void setFailureInjectorForTesting(FailureInjector injector) { + failureInjector = injector; + } + /** * Build a BlockReader with the given options. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java index 6359def4e2f..bf114631cf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ClientContext.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.hdfs.shortcircuit.DomainSocketFactory; @@ -138,8 +139,8 @@ public class ClientContext { */ @VisibleForTesting public static ClientContext getFromConf(Configuration conf) { - return get(conf.get(DFSConfigKeys.DFS_CLIENT_CONTEXT, - DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT), + return get(conf.get(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, + HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT), new DfsClientConf(conf)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 4e1d6978cad..a7b518ee07c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -18,11 +18,11 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_CODEC_CLASSES_KEY_PREFIX; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; import java.io.BufferedOutputStream; import java.io.DataInputStream; @@ -443,12 +443,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, */ int getDatanodeWriteTimeout(int numNodes) { final int t = dfsClientConf.getDatanodeSocketWriteTimeout(); - return t > 0? t + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION*numNodes: 0; + return t > 0? t + HdfsConstants.WRITE_TIMEOUT_EXTENSION*numNodes: 0; } int getDatanodeReadTimeout(int numNodes) { final int t = dfsClientConf.getSocketTimeout(); - return t > 0? HdfsServerConstants.READ_TIMEOUT_EXTENSION*numNodes + t: 0; + return t > 0? HdfsConstants.READ_TIMEOUT_EXTENSION*numNodes + t: 0; } @VisibleForTesting diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 31746cf71c1..963196b5240 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -44,14 +44,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size"; public static final int DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096; - public static final String DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum"; - public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = 512; + public static final String DFS_BYTES_PER_CHECKSUM_KEY = + HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; + public static final int DFS_BYTES_PER_CHECKSUM_DEFAULT = + HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; public static final String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix"; public static final String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user"; - public static final String DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type"; - public static final String DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C"; - public static final String DFS_HDFS_BLOCKS_METADATA_ENABLED = "dfs.datanode.hdfs-blocks-metadata.enabled"; - public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false; + public static final String DFS_CHECKSUM_TYPE_KEY = HdfsClientConfigKeys + .DFS_CHECKSUM_TYPE_KEY; + public static final String DFS_CHECKSUM_TYPE_DEFAULT = + HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT; + public static final String DFS_HDFS_BLOCKS_METADATA_ENABLED = + HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED; + public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = + HdfsClientConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT; public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT; @@ -489,7 +495,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_DEFAULT = 1024L * 1024L * 1024L * 10L; // 10 GB public static final String DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY = "dfs.datanode.available-space-volume-choosing-policy.balanced-space-preference-fraction"; public static final float DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_DEFAULT = 0.75f; - public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = "dfs.datanode.socket.write.timeout"; + public static final String DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY = + HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; public static final String DFS_DATANODE_STARTUP_KEY = "dfs.datanode.startup"; public static final String DFS_NAMENODE_PLUGINS_KEY = "dfs.namenode.plugins"; public static final String DFS_WEB_UGI_KEY = "dfs.web.ugi"; @@ -500,8 +507,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_USER_NAME_KEY = DFS_DATANODE_KERBEROS_PRINCIPAL_KEY; public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS = "dfs.datanode.shared.file.descriptor.paths"; public static final String DFS_DATANODE_SHARED_FILE_DESCRIPTOR_PATHS_DEFAULT = "/dev/shm,/tmp"; - public static final String DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS = "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms"; - public static final int DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = 60000; + public static final String + DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS = + HdfsClientConfigKeys + .DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS; + public static final int + DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT = + HdfsClientConfigKeys + .DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT; public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file"; public static final String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY = HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY; @@ -542,8 +555,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { 0.6f; public static final String DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY = "dfs.block.local-path-access.user"; - public static final String DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path"; - public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = ""; + public static final String DFS_DOMAIN_SOCKET_PATH_KEY = + HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY; + public static final String DFS_DOMAIN_SOCKET_PATH_DEFAULT = + HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT; public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled"; public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true; @@ -962,64 +977,136 @@ public class DFSConfigKeys extends CommonConfigurationKeys { = HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_DEFAULT; + @Deprecated + public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = + HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; + @Deprecated + public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; - public static final String DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size"; - public static final int DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024; + @Deprecated + public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY = + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; + @Deprecated + public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; + @Deprecated + public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT; + @Deprecated + public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY; + @Deprecated + public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT; - public static final String DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout"; - public static final String DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity"; - public static final int DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16; - public static final String DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec"; - public static final long DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000; + @Deprecated + public static final String DFS_CLIENT_USE_DN_HOSTNAME = + HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; + @Deprecated + public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; + @Deprecated + public static final String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = + HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; + @Deprecated + public static final String DFS_CLIENT_CACHE_DROP_BEHIND_READS = + HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; + @Deprecated + public static final String DFS_CLIENT_CACHE_READAHEAD = + HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD; + @Deprecated + public static final String DFS_CLIENT_CACHED_CONN_RETRY_KEY = + HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY; + @Deprecated + public static final int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT; - public static final String DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname"; - public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false; - public static final String DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = "dfs.client.cache.drop.behind.writes"; - public static final String DFS_CLIENT_CACHE_DROP_BEHIND_READS = "dfs.client.cache.drop.behind.reads"; - public static final String DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead"; - public static final String DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry"; - public static final int DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3; + @Deprecated + public static final String DFS_CLIENT_CONTEXT = HdfsClientConfigKeys + .DFS_CLIENT_CONTEXT; + @Deprecated + public static final String DFS_CLIENT_CONTEXT_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT; + @Deprecated + public static final String + DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS; + @Deprecated + public static final int + DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = + HdfsClientConfigKeys + .DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT; + @Deprecated + public static final String + DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS; + @Deprecated + public static final int + DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = + HdfsClientConfigKeys + .DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT; - public static final String DFS_CLIENT_CONTEXT = "dfs.client.context"; - public static final String DFS_CLIENT_CONTEXT_DEFAULT = "default"; - public static final String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = "dfs.client.file-block-storage-locations.num-threads"; - public static final int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10; - public static final String DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = "dfs.client.file-block-storage-locations.timeout.millis"; - public static final int DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000; - - public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = "dfs.client.datanode-restart.timeout"; - public static final long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30; + @Deprecated + public static final String DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = + HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY; + @Deprecated + public static final long DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT; public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource"; public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml"; public static final String DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth"; public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false; + // Much code in hdfs is not yet updated to use these keys. // the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry. - public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures"; - public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3; + @Deprecated + public static final String DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = + HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; + @Deprecated + public static final int DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT; - public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER = "dfs.client.use.legacy.blockreader"; - public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false; - public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = "dfs.client.use.legacy.blockreader.local"; - public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false; + @Deprecated + public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADER = + HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER; + @Deprecated + public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT; + @Deprecated + public static final String DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = + HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL; + @Deprecated + public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT + = HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT; public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; - public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic"; - public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false; + @Deprecated + public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = + HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; + @Deprecated + public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT; // The number of NN response dropped by client proactively in each RPC call. // For testing NN retry cache, we can set this property with positive value. public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; + @Deprecated public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY = - "dfs.client.slow.io.warning.threshold.ms"; - public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000; + HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY; + + @Deprecated + public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = + HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT; + @Deprecated public static final String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS = - "dfs.client.key.provider.cache.expiry"; + HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS; + @Deprecated public static final long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT = - TimeUnit.DAYS.toMillis(10); // 10 days + HdfsClientConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index d160b2b5a1d..5cd40a53d43 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.client.impl.DfsClientConf; @@ -202,7 +203,7 @@ public class DFSOutputStream extends FSOutputSummer } if (blockSize % bytesPerChecksum != 0) { throw new HadoopIllegalArgumentException("Invalid values: " - + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum + + HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum + ") must divide block size (=" + blockSize + ")."); } this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java index cdb937128d5..ef9f27a16c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java @@ -93,7 +93,7 @@ public class HdfsConfiguration extends Configuration { new DeprecationDelta("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY), new DeprecationDelta("dfs.socket.timeout", - DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY), + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY), new DeprecationDelta("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY), new DeprecationDelta("fs.checkpoint.edits.dir", @@ -127,19 +127,19 @@ public class HdfsConfiguration extends Configuration { new DeprecationDelta("dfs.permissions.supergroup", DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY), new DeprecationDelta("dfs.write.packet.size", - DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY), + HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY), new DeprecationDelta("dfs.block.size", DFSConfigKeys.DFS_BLOCK_SIZE_KEY), new DeprecationDelta("dfs.datanode.max.xcievers", DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY), new DeprecationDelta("io.bytes.per.checksum", - DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY), + HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY), new DeprecationDelta("dfs.federation.nameservices", DFSConfigKeys.DFS_NAMESERVICES), new DeprecationDelta("dfs.federation.nameservice.id", DFSConfigKeys.DFS_NAMESERVICE_ID), new DeprecationDelta("dfs.client.file-block-storage-locations.timeout", - DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS), + HdfsClientConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS), }); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index bd497b8c70f..40564dfab6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair; import org.apache.hadoop.hdfs.protocol.datatransfer.Sender; @@ -63,7 +64,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.balancer.Dispatcher.DDatanode.StorageGroup; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations; import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; @@ -318,7 +318,7 @@ public class Dispatcher { try { sock.connect( NetUtils.createSocketAddr(target.getDatanodeInfo().getXferAddr()), - HdfsServerConstants.READ_TIMEOUT); + HdfsConstants.READ_TIMEOUT); sock.setKeepAlive(true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 1259f4198d0..d8a60aa0c40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -279,12 +279,6 @@ public interface HdfsServerConstants { } } - // Timeouts for communicating with DataNode for streaming writes/reads - int READ_TIMEOUT = 60 * 1000; - int READ_TIMEOUT_EXTENSION = 5 * 1000; - int WRITE_TIMEOUT = 8 * 60 * 1000; - int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline - /** * Defines the NameNode role. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java index abc9390dc88..66fd5671ddd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java @@ -27,11 +27,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHO import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CACHEREPORT_INTERVAL_MSEC_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NON_LOCAL_LAZY_PERSIST_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; @@ -55,9 +55,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTI import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver; import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.security.SaslPropertiesResolver; /** @@ -107,9 +108,9 @@ public class DNConf { public DNConf(Configuration conf) { this.conf = conf; socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsServerConstants.READ_TIMEOUT); + HdfsConstants.READ_TIMEOUT); socketWriteTimeout = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsServerConstants.WRITE_TIMEOUT); + HdfsConstants.WRITE_TIMEOUT); socketKeepaliveTimeout = conf.getInt( DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT); @@ -149,8 +150,8 @@ public class DNConf { DFS_CACHEREPORT_INTERVAL_MSEC_DEFAULT); this.dfsclientSlowIoWarningThresholdMs = conf.getLong( - DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, - DFSConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); + HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY, + HdfsClientConfigKeys.DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT); this.datanodeSlowIoWarningThresholdMs = conf.getLong( DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY, DFSConfigKeys.DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 2a026af6b74..fa3b78c508f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -145,7 +145,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -921,8 +920,8 @@ public class DataNode extends ReconfigurableBase if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) || - conf.getBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, - DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) { + conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, + HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT)) { DomainPeerServer domainPeerServer = getDomainPeerServer(conf, streamingAddr.getPort()); if (domainPeerServer != null) { @@ -943,8 +942,8 @@ public class DataNode extends ReconfigurableBase if (domainSocketPath.isEmpty()) { if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) && - (!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, - DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) { + (!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, + HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) { LOG.warn("Although short-circuit local reads are configured, " + "they are disabled because you didn't configure " + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY); @@ -2141,7 +2140,7 @@ public class DataNode extends ReconfigurableBase } long writeTimeout = dnConf.socketWriteTimeout + - HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); + HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1); OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout); InputStream unbufIn = NetUtils.getInputStream(sock); DataEncryptionKeyFactory keyFactory = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 089b7cddfed..e9cf4362942 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -72,7 +72,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmR import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.protocolPB.PBHelper; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsUnsupportedException; import org.apache.hadoop.hdfs.server.datanode.DataNode.ShortCircuitFdsVersionException; import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry.NewShmInfo; @@ -705,9 +704,9 @@ class DataXceiver extends Receiver implements Runnable { mirrorSock = datanode.newSocket(); try { int timeoutValue = dnConf.socketTimeout - + (HdfsServerConstants.READ_TIMEOUT_EXTENSION * targets.length); + + (HdfsConstants.READ_TIMEOUT_EXTENSION * targets.length); int writeTimeout = dnConf.socketWriteTimeout + - (HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * targets.length); + (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length); NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue); mirrorSock.setSoTimeout(timeoutValue); mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java index c0df244e2c4..1436adc3325 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java @@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -94,7 +94,7 @@ public class SecureDataNodeStarter implements Daemon { InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf); int socketWriteTimeout = conf.getInt( DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, - HdfsServerConstants.WRITE_TIMEOUT); + HdfsConstants.WRITE_TIMEOUT); ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 7c5a89c9317..51d87d9655e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -24,12 +24,12 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index ca0ef85f656..e96e674788f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.RemotePeerFactory; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.Block; @@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.net.NetUtils; @@ -813,7 +813,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes); targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr()); } catch (IOException ie) { - if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) { + if (failures >= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) { throw new IOException("Could not obtain block " + lblock, ie); } LOG.info("Could not obtain block from any node: " + ie); @@ -849,8 +849,8 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { Peer peer = null; Socket s = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { - s.connect(addr, HdfsServerConstants.READ_TIMEOUT); - s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); + s.connect(addr, HdfsConstants.READ_TIMEOUT); + s.setSoTimeout(HdfsConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocketAndKey( dfs.getSaslDataTransferClient(), s, NamenodeFsck.this, blockToken, datanodeId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java index 7a5b39ac6c8..992d8b41add 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java @@ -27,7 +27,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.impl.DfsClientConf.ShortCircuitConf; import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.util.PerformanceAdvisory; @@ -112,7 +112,7 @@ public class DomainSocketFactory { } else { if (conf.getDomainSocketPath().isEmpty()) { throw new HadoopIllegalArgumentException(feature + " is enabled but " - + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set."); + + HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set."); } else if (DomainSocket.getLoadingFailureReason() != null) { LOG.warn(feature + " cannot be used because " + DomainSocket.getLoadingFailureReason()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java index ddeb57d3ff3..7cc78987f51 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fi.FiTestUtil; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.server.datanode.BlockReceiverAspects; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; @@ -226,11 +227,11 @@ public class TestFiPipelines { conf = new Configuration(); int customPerChecksumSize = 700; int customBlockSize = customPerChecksumSize * 3; - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100); - conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100); + conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0); } private static void initLoggers() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java index fcad32e0b4f..7a9a76f9f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol.java @@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.log4j.Level; @@ -52,7 +53,7 @@ public class TestFiDataTransferProtocol { static { conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); } static private FSDataOutputStream createFile(FileSystem fs, Path p diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java index 5832bf04914..deda317b207 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.log4j.Level; import org.junit.Assert; @@ -57,8 +58,8 @@ public class TestFiDataTransferProtocol2 { static { conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, REPLICATION); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); } static final byte[] bytes = new byte[MAX_N_PACKET * PACKET_SIZE]; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index e4380c76ca5..3455f55a392 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -337,7 +337,7 @@ public class TestEnhancedByteBufferAccess { ByteBuffer results[] = { null, null, null, null }; DistributedFileSystem fs = null; - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -599,7 +599,7 @@ public class TestEnhancedByteBufferAccess { conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false); final String CONTEXT = "testZeroCopyReadOfCachedData"; - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, DFSTestUtil.roundUpToMultiple(TEST_FILE_LENGTH, (int) NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize())); @@ -722,7 +722,7 @@ public class TestEnhancedByteBufferAccess { final String CONTEXT = "testClientMmapDisable"; FSDataInputStream fsIn = null; DistributedFileSystem fs = null; - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); try { // With HdfsClientConfigKeys.Mmap.ENABLED_KEY set to false, @@ -753,7 +753,7 @@ public class TestEnhancedByteBufferAccess { // Now try again with HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY == 0. conf.setBoolean(HdfsClientConfigKeys.Mmap.ENABLED_KEY, true); conf.setInt(HdfsClientConfigKeys.Mmap.CACHE_SIZE_KEY, 0); - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1"); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1"); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); fs = cluster.getFileSystem(); @@ -784,7 +784,7 @@ public class TestEnhancedByteBufferAccess { MiniDFSCluster cluster = null; final Path TEST_PATH = new Path("/a"); final String CONTEXT = "test2GBMmapLimit"; - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT); FSDataInputStream fsIn = null, fsIn2 = null; ByteBuffer buf1 = null, buf2 = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java index 7193fe209a3..e25a7548852 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java @@ -20,7 +20,6 @@ package org.apache.hadoop.fs; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -42,7 +41,7 @@ public class TestUnbuffer { Configuration conf = new Configuration(); // Set a new ClientContext. This way, we will have our own PeerCache, // rather than sharing one with other unit tests. - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, "testUnbufferClosesSocketsContext"); // Disable short-circuit reads. With short-circuit, we wouldn't hold open a @@ -50,9 +49,9 @@ public class TestUnbuffer { conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false); // Set a really long socket timeout to avoid test timing issues. - conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 100000000L); - conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, 100000000L); MiniDFSCluster cluster = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java index 88b7f37dcce..6d8bec323eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java @@ -36,10 +36,10 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.ShortCircuitRegistry; @@ -202,8 +202,8 @@ public class BlockReaderTestUtil { Socket sock = NetUtils. getDefaultSocketFactory(fs.getConf()).createSocket(); try { - sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); + sock.connect(addr, HdfsConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocket(sock); } finally { if (peer == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java index 486024474f5..6b9c138f66b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -52,9 +53,9 @@ public class FileAppendTest4 { private static DistributedFileSystem fs; private static void init(Configuration conf) { - conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM); + conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, PACKET_SIZE); } @BeforeClass diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java index d8acefff24e..a8ca9c7bf52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS; import static org.hamcrest.CoreMatchers.equalTo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java index c69774c2212..aad670a6717 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java @@ -126,10 +126,10 @@ public class TestBlockReaderLocal { HdfsConfiguration conf = new HdfsConfiguration(); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, !checksum); - conf.setLong(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, + conf.setLong(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BlockReaderLocalTest.BYTES_PER_CHECKSUM); conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "CRC32C"); - conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead); + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, readahead); test.setConfiguration(conf); FileInputStream dataIn = null, metaIn = null; final Path TEST_PATH = new Path("/a"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java index 760a61f59b5..3deca170481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java @@ -64,12 +64,12 @@ public class TestBlockReaderLocalLegacy { getAbsolutePath()); } conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false); conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName()); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); // Set short retry timeouts so this test runs faster conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10); return conf; @@ -164,7 +164,7 @@ public class TestBlockReaderLocalLegacy { public void testBlockReaderLocalLegacyWithAppend() throws Exception { final short REPL_FACTOR = 1; final HdfsConfiguration conf = getConfiguration(null); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java index 91de2b8741c..ca98441e8ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java @@ -169,7 +169,7 @@ public class TestClientProtocolForPipelineRecovery { @Test public void testPipelineRecoveryOnOOB() throws Exception { Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15"); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "15"); MiniDFSCluster cluster = null; try { int numDataNodes = 1; @@ -207,7 +207,7 @@ public class TestClientProtocolForPipelineRecovery { @Test public void testPipelineRecoveryOnRestartFailure() throws Exception { Configuration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5"); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY, "5"); MiniDFSCluster cluster = null; try { int numDataNodes = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java index c518c8c5aec..afa5d271fd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java @@ -25,6 +25,7 @@ import java.net.InetSocketAddress; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.junit.Assert; import org.junit.Test; @@ -89,8 +90,8 @@ public class TestConnCache { // instances. Also use a really long socket timeout so that nothing // gets closed before we get around to checking the cache size at the end. final String contextName = "testReadFromOneDNContext"; - configuration.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, contextName); - configuration.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, + configuration.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, contextName); + configuration.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 100000000L); BlockReaderTestUtil util = new BlockReaderTestUtil(1, configuration); final Path testFile = new Path("/testConnCache.dat"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index ad9f1098870..813b886b59c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; @@ -177,11 +177,11 @@ public class TestDFSClientRetries { InterruptedException { final int writeTimeout = 100; //milliseconds. // set a very short write timeout for datanode, so that tests runs fast. - conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout); + conf.setInt(HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, writeTimeout); // set a smaller block size final int blockSize = 10*1024*1024; conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); - conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, 1); // set a small buffer size final int bufferSize = 4096; conf.setInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, bufferSize); @@ -638,10 +638,11 @@ public class TestDFSClientRetries { short replicationFactor = 1; long blockSize = 128*1024*1024; // DFS block size int bufferSize = 4096; - - conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, xcievers); - conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, - retries); + + conf.setInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY, + xcievers); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY, + retries); conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin); // Disable keepalive conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java index 26412c817c0..9d88384dfe6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java @@ -73,7 +73,7 @@ public class TestDFSInputStream { @Test(timeout=60000) public void testSkipWithRemoteBlockReader() throws IOException { Configuration conf = new Configuration(); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); try { testSkipInner(cluster); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java index 1563b722608..dbe2f1e5d17 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.hdfs; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index c5d889c1432..5d2c48e5712 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage; import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil; @@ -58,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto; import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.io.IOUtils; @@ -108,8 +108,8 @@ public class TestDataTransferProtocol { StringUtils.byteToHexString(sendBuf.toByteArray())); sock = new Socket(); - sock.connect(dnAddr, HdfsServerConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); + sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); OutputStream out = sock.getOutputStream(); // Should we excuse diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java index 5cbc2167713..cd82b2d7c27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol; @@ -287,7 +288,7 @@ public class TestDatanodeDeath { conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(numDatanodes).build(); cluster.waitActive(); @@ -343,7 +344,7 @@ public class TestDatanodeDeath { conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); int myMaxNodes = 5; System.out.println("SimpleTest starting with DataNode to Kill " + datanodeToKill); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java index 44c3f144a11..f2043fbddb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDisableConnCache.java @@ -23,6 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.junit.Test; /** @@ -46,7 +47,7 @@ public class TestDisableConnCache { // Configure a new instance with no peer caching, ensure that it doesn't // cache anything confWithoutCache.setInt( - DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0); + HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0); BlockReaderTestUtil util = new BlockReaderTestUtil(1, confWithoutCache); final Path testFile = new Path("/testConnCache.dat"); util.writeFile(testFile, FILE_SIZE / 1024); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index e474acdc097..91e78e19058 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -572,7 +572,7 @@ public class TestDistributedFileSystem { final Path dir = new Path("/filechecksum"); final int block_size = 1024; final int buffer_size = conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096); - conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); + conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512); //try different number of blocks for(int n = 0; n < 5; n++) { @@ -1075,7 +1075,7 @@ public class TestDistributedFileSystem { public void testDFSClientPeerReadTimeout() throws IOException { final int timeout = 1000; final Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); @@ -1120,7 +1120,7 @@ public class TestDistributedFileSystem { public void testDFSClientPeerWriteTimeout() throws IOException { final int timeout = 1000; final Configuration conf = new HdfsConfiguration(); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); // only need cluster to create a dfs client to get a peer final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java index dd4fe144406..8a950272b9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java @@ -28,13 +28,13 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.List; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; @@ -489,8 +489,8 @@ public class TestFileAppend2 { conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 2); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000); - conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 30000); + conf.setInt(HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY, 30000); conf.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 50); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java index 969f2b584ee..284ed753d94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java @@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -84,7 +85,7 @@ public class TestFileAppend4 { conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000); // handle under-replicated blocks quickly (for replication asserts) conf.setInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5); @@ -339,7 +340,7 @@ public class TestFileAppend4 { conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4) .build(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java index 24ec9884b1e..3824311a395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java @@ -20,10 +20,10 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; @@ -66,6 +66,7 @@ import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -220,7 +221,7 @@ public class TestFileCreation { if (netIf != null) { conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf); } - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname); if (useDnHostname) { // Since the mini cluster only listens on the loopback we have to // ensure the hostname used to access DNs maps to the loopback. We diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java index a33ad18c6b3..0fcae5005b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHFlush.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -386,7 +387,7 @@ public class TestHFlush { final int fileLen = 6; Configuration conf = new HdfsConfiguration(); final int timeout = 2000; - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, timeout); final Path p = new Path("/pipelineHeartbeat/foo"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java index 2c187b1e83f..26aa28e6e8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java @@ -28,7 +28,7 @@ public class TestParallelRead extends TestParallelReadUtil { // off both short-circuit local reads and UNIX domain socket data traffic. HdfsConfiguration conf = new HdfsConfiguration(); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); // dfs.domain.socket.path should be ignored because the previous two keys // were set to false. This is a regression test for HDFS-4473. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java index c3a3fb64739..220e45bc691 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitLegacyRead.java @@ -29,8 +29,8 @@ public class TestParallelShortCircuitLegacyRead extends TestParallelReadUtil { DFSInputStream.tcpReadsDisabledForTesting = true; HdfsConfiguration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, ""); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java index 7485fa729a4..ad26e183352 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java @@ -51,13 +51,13 @@ public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil { conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false); - conf.setBoolean(DFSConfigKeys. + conf.setBoolean(HdfsClientConfigKeys. DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true); // We want to test reading from stale sockets. conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1); - conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, 5 * 60 * 1000); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32); // Avoid using the FileInputStreamCache. conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java index 9db8503044b..872ac38c560 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelUnixDomainRead.java @@ -41,7 +41,7 @@ public class TestParallelUnixDomainRead extends TestParallelReadUtil { conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath()); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true); DomainSocket.disableBindPathValidation(); setupCluster(1, conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java index a82f04ff529..e4fea607499 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPipelines.java @@ -29,6 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -148,11 +149,11 @@ public class TestPipelines { conf = new Configuration(); int customPerChecksumSize = 700; int customBlockSize = customPerChecksumSize * 3; - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100); - conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 100); + conf.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2); - conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, customBlockSize / 2); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 0); } private static void initLoggers() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index f1a921dadbf..3c9582b2ac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -212,7 +212,7 @@ public class TestPread { return; } int numBlocks = 1; - assertTrue(numBlocks <= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT); + assertTrue(numBlocks <= HdfsClientConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT); byte[] expected = new byte[numBlocks * blockSize]; Random rand = new Random(seed); rand.nextBytes(expected); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java index 378d8bba153..9d38fd77c19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRead.java @@ -21,6 +21,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.nio.ByteBuffer; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; @@ -58,7 +59,7 @@ public class TestRead { new ShortCircuitTestContext("testEOFWithBlockReaderLocal"); try { final Configuration conf = testContext.newConfiguration(); - conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE); + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); testEOF(cluster, 1); @@ -73,7 +74,7 @@ public class TestRead { @Test(timeout=60000) public void testEOFWithRemoteBlockReader() throws Exception { final Configuration conf = new Configuration(); - conf.setLong(DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE); + conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_CACHE_READAHEAD, BLOCK_SIZE); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .format(true).build(); testEOF(cluster, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java index 8ab110d5aad..cef1d6d8528 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRemoteBlockReader.java @@ -17,11 +17,13 @@ */ package org.apache.hadoop.hdfs; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; + public class TestRemoteBlockReader extends TestBlockReaderBase { HdfsConfiguration createConf() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); return conf; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java index 43f29923432..234eb22621b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java @@ -50,13 +50,13 @@ import org.apache.hadoop.hdfs.net.TcpPeerServer; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException; import org.apache.hadoop.hdfs.security.token.block.SecurityTestUtil; import org.apache.hadoop.hdfs.server.balancer.TestBalancer; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; @@ -169,8 +169,8 @@ public class TestBlockTokenWithDFS { Peer peer = null; Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { - sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); + sock.connect(addr, HdfsConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocket(sock); } finally { if (peer == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java index 70c2a4e84f7..d2bf7341014 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java @@ -45,8 +45,10 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.client.BlockReportOptions; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; @@ -55,7 +57,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.IOUtils; @@ -106,7 +107,7 @@ public class TestBlockReplacement { final Random r = new Random(); CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); - CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2); + CONF.setInt(HdfsClientConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2); CONF.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,500); cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR) .racks(INITIAL_RACKS).build(); @@ -325,7 +326,7 @@ public class TestBlockReplacement { Socket sock = new Socket(); try { sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()), - HdfsServerConstants.READ_TIMEOUT); + HdfsConstants.READ_TIMEOUT); sock.setKeepAlive(true); // sendRequest DataOutputStream out = new DataOutputStream(sock.getOutputStream()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java index bd1a7771a14..a352f5fb9d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; import org.apache.hadoop.io.IOUtils; @@ -49,7 +50,7 @@ import org.junit.Test; public class TestCachingStrategy { private static final Log LOG = LogFactory.getLog(TestCachingStrategy.class); private static final int MAX_TEST_FILE_LEN = 1024 * 1024; - private static final int WRITE_PACKET_SIZE = DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; + private static final int WRITE_PACKET_SIZE = HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; private final static TestRecordingCacheTracker tracker = new TestRecordingCacheTracker(); @@ -259,8 +260,8 @@ public class TestCachingStrategy { Configuration conf = new HdfsConfiguration(); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY, false); conf.setBoolean(DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY, false); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, true); MiniDFSCluster cluster = null; String TEST_PATH = "/test"; int TEST_PATH_LEN = MAX_TEST_FILE_LEN; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java index b9c769134aa..4e371619108 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java @@ -54,11 +54,11 @@ import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -523,8 +523,8 @@ public class TestDataNodeVolumeFailure { Peer peer = null; Socket sock = NetUtils.getDefaultSocketFactory(conf).createSocket(); try { - sock.connect(addr, HdfsServerConstants.READ_TIMEOUT); - sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT); + sock.connect(addr, HdfsConstants.READ_TIMEOUT); + sock.setSoTimeout(HdfsConstants.READ_TIMEOUT); peer = TcpPeerServer.peerFromSocket(sock); } finally { if (peer == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index ce29fc89d87..273babbff8c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -264,11 +264,12 @@ public abstract class LazyPersistTestCase { if (useSCR) { conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true); // Do not share a client context across tests. - conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString()); + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, UUID.randomUUID().toString()); conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, UserGroupInformation.getCurrentUser().getShortUserName()); if (useLegacyBlockReaderLocal) { - conf.setBoolean(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + conf.setBoolean( + HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); } else { sockDir = new TemporarySocketDirectory(); conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java index a5a09fa2890..4516696f1fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.DataNode; @@ -53,7 +54,7 @@ public class TestDatanodeRestart { // bring up a cluster of 3 Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); @@ -77,7 +78,7 @@ public class TestDatanodeRestart { public void testRbwReplicas() throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitActive(); try { @@ -146,7 +147,7 @@ public class TestDatanodeRestart { @Test public void testRecoverReplicas() throws Exception { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L); - conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); + conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); cluster.waitActive(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index 20c54fe8e52..ae61bbf3490 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdfs.shortcircuit; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_CONTEXT; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY; import static org.hamcrest.CoreMatchers.equalTo; @@ -71,6 +71,7 @@ import org.junit.Assert; import org.junit.Assume; import org.junit.Test; import org.mockito.Mockito; +import org.mockito.internal.util.reflection.Whitebox; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -671,8 +672,8 @@ public class TestShortCircuitCache { // The second read should fail, and we should only have 1 segment and 1 slot // left. - fs.getClient().getConf().getShortCircuitConf().brfFailureInjector = - new TestCleanupFailureInjector(); + BlockReaderFactory.setFailureInjectorForTesting( + new TestCleanupFailureInjector()); try { DFSTestUtil.readFileBuffer(fs, TEST_PATH2); } catch (Throwable t) { @@ -766,8 +767,8 @@ public class TestShortCircuitCache { new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); - fs.getClient().getConf().getShortCircuitConf().brfFailureInjector = - new TestPreReceiptVerificationFailureInjector(); + BlockReaderFactory.setFailureInjectorForTesting( + new TestPreReceiptVerificationFailureInjector()); final Path TEST_PATH1 = new Path("/test_file1"); DFSTestUtil.createFile(fs, TEST_PATH1, 4096, (short)1, 0xFADE2); final Path TEST_PATH2 = new Path("/test_file2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java index c24eb33d76c..8eed330430d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitLocalRead.java @@ -253,7 +253,7 @@ public class TestShortCircuitLocalRead { ignoreChecksum); // Set a random client context name so that we don't share a cache with // other invocations of this function. - conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, + conf.set(HdfsClientConfigKeys.DFS_CLIENT_CONTEXT, UUID.randomUUID().toString()); conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(), @@ -261,7 +261,7 @@ public class TestShortCircuitLocalRead { if (shortCircuitUser != null) { conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, shortCircuitUser); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true); } if (simulatedStorage) { SimulatedFSDataset.setFactory(conf); @@ -592,7 +592,7 @@ public class TestShortCircuitLocalRead { public void doTestShortCircuitReadWithRemoteBlockReader(boolean ignoreChecksum, int size, String shortCircuitUser, int readOffset, boolean shortCircuitFails) throws IOException, InterruptedException { Configuration conf = new Configuration(); - conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); + conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER, true); conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)