HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin P. McCabe)

(cherry picked from commit a52eb4bc5f)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
(cherry picked from commit 2bcda17ee7)
This commit is contained in:
Colin Patrick Mccabe 2014-10-24 13:08:59 -07:00
parent f236e17216
commit 62f23d0b7d
5 changed files with 19 additions and 20 deletions

View File

@ -289,6 +289,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6904. YARN unable to renew delegation token fetched via webhdfs
due to incorrect service port. (jitendra)
HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin
P. McCabe)
OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -135,9 +135,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY = "dfs.datanode.ram.disk.replica.tracker";
public static final Class<RamDiskReplicaLruTracker> DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_DEFAULT = RamDiskReplicaLruTracker.class;
public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT = "dfs.datanode.ram.disk.low.watermark.percent";
public static final int DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10;
public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS = "dfs.datanode.ram.disk.low.watermark.replicas";
public static final int DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT = 3;
public static final float DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10.0f;
public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES = "dfs.datanode.ram.disk.low.watermark.bytes";
public static final long DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT = DFS_BLOCK_SIZE_DEFAULT;
// This setting is for testing/internal use only.
public static final String DFS_DATANODE_DUPLICATE_REPLICA_DELETION = "dfs.datanode.duplicate.replica.deletion";

View File

@ -46,7 +46,6 @@ import javax.management.StandardMBean;
import com.google.common.collect.Lists;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -2398,24 +2397,20 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
class LazyWriter implements Runnable {
private volatile boolean shouldRun = true;
final int checkpointerInterval;
final long estimateBlockSize;
final int lowWatermarkFreeSpacePercentage;
final int lowWatermarkFreeSpaceReplicas;
final float lowWatermarkFreeSpacePercentage;
final long lowWatermarkFreeSpaceBytes;
public LazyWriter(Configuration conf) {
this.checkpointerInterval = conf.getInt(
DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_DEFAULT_SEC);
this.estimateBlockSize = conf.getLongBytes(
DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
this.lowWatermarkFreeSpacePercentage = conf.getInt(
this.lowWatermarkFreeSpacePercentage = conf.getFloat(
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT,
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT);
this.lowWatermarkFreeSpaceReplicas = conf.getInt(
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS,
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT);
this.lowWatermarkFreeSpaceBytes = conf.getLong(
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT);
}
/**
@ -2478,6 +2473,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
private boolean transientFreeSpaceBelowThreshold() throws IOException {
long free = 0;
long capacity = 0;
float percentFree = 0.0f;
// Don't worry about fragmentation for now. We don't expect more than one
// transient volume per DN.
@ -2492,9 +2488,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
return false;
}
int percentFree = (int) (free * 100 / capacity);
return percentFree < lowWatermarkFreeSpacePercentage ||
free < (estimateBlockSize * lowWatermarkFreeSpaceReplicas);
percentFree = (float) ((double)free * 100 / capacity);
return (percentFree < lowWatermarkFreeSpacePercentage) ||
(free < lowWatermarkFreeSpaceBytes);
}
/**

View File

@ -114,7 +114,7 @@ public class TestBalancer {
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1);
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, 1);
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, DEFAULT_RAM_DISK_BLOCK_SIZE);
}
/* create a file with a length of <code>fileLen</code> */

View File

@ -766,8 +766,8 @@ public class TestLazyPersistFiles {
HEARTBEAT_RECHECK_INTERVAL_MSEC);
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
LAZY_WRITER_INTERVAL_SEC);
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS,
EVICTION_LOW_WATERMARK);
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
EVICTION_LOW_WATERMARK * BLOCK_SIZE);
conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, useSCR);