diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bbb2df7bed7..79c92667c3a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -375,6 +375,9 @@ Release 2.6.0 - UNRELEASED HDFS-6904. YARN unable to renew delegation token fetched via webhdfs due to incorrect service port. (jitendra) + HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin + P. McCabe) + OPTIMIZATIONS HDFS-6690. Deduplicate xattr names in memory. (wang) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index e5e8e9078c8..50ea800ccd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -135,9 +135,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY = "dfs.datanode.ram.disk.replica.tracker"; public static final Class DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_DEFAULT = RamDiskReplicaLruTracker.class; public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT = "dfs.datanode.ram.disk.low.watermark.percent"; - public static final int DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10; - public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS = "dfs.datanode.ram.disk.low.watermark.replicas"; - public static final int DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT = 3; + public static final float DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10.0f; + public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES = "dfs.datanode.ram.disk.low.watermark.bytes"; + public static final long DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT = DFS_BLOCK_SIZE_DEFAULT; // This setting is for testing/internal use only. public static final String DFS_DATANODE_DUPLICATE_REPLICA_DELETION = "dfs.datanode.duplicate.replica.deletion"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 1709066ef48..e77ea34c553 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -46,7 +46,6 @@ import javax.management.StandardMBean; import com.google.common.collect.Lists; import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -2398,24 +2397,20 @@ class FsDatasetImpl implements FsDatasetSpi { class LazyWriter implements Runnable { private volatile boolean shouldRun = true; final int checkpointerInterval; - final long estimateBlockSize; - final int lowWatermarkFreeSpacePercentage; - final int lowWatermarkFreeSpaceReplicas; + final float lowWatermarkFreeSpacePercentage; + final long lowWatermarkFreeSpaceBytes; public LazyWriter(Configuration conf) { this.checkpointerInterval = conf.getInt( DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_DEFAULT_SEC); - this.estimateBlockSize = conf.getLongBytes( - DFSConfigKeys.DFS_BLOCK_SIZE_KEY, - DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); - this.lowWatermarkFreeSpacePercentage = conf.getInt( + this.lowWatermarkFreeSpacePercentage = conf.getFloat( DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT, DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT); - this.lowWatermarkFreeSpaceReplicas = conf.getInt( - DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, - DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT); + this.lowWatermarkFreeSpaceBytes = conf.getLong( + DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, + DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT); } /** @@ -2478,6 +2473,7 @@ class FsDatasetImpl implements FsDatasetSpi { private boolean transientFreeSpaceBelowThreshold() throws IOException { long free = 0; long capacity = 0; + float percentFree = 0.0f; // Don't worry about fragmentation for now. We don't expect more than one // transient volume per DN. @@ -2492,9 +2488,9 @@ class FsDatasetImpl implements FsDatasetSpi { return false; } - int percentFree = (int) (free * 100 / capacity); - return percentFree < lowWatermarkFreeSpacePercentage || - free < (estimateBlockSize * lowWatermarkFreeSpaceReplicas); + percentFree = (float) ((double)free * 100 / capacity); + return (percentFree < lowWatermarkFreeSpacePercentage) || + (free < lowWatermarkFreeSpaceBytes); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 831e2c639ea..91374060523 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -114,7 +114,7 @@ public class TestBalancer { conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1); - conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, 1); + conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, DEFAULT_RAM_DISK_BLOCK_SIZE); } /* create a file with a length of fileLen */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java index 9f1d50a1e32..444afed2a5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java @@ -766,8 +766,8 @@ public class TestLazyPersistFiles { HEARTBEAT_RECHECK_INTERVAL_MSEC); conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, LAZY_WRITER_INTERVAL_SEC); - conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, - EVICTION_LOW_WATERMARK); + conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, + EVICTION_LOW_WATERMARK * BLOCK_SIZE); conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, useSCR);