HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin P. McCabe)

(cherry picked from commit a52eb4bc5f)

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
This commit is contained in:
Colin Patrick Mccabe 2014-10-24 13:08:59 -07:00
parent 299c9b456d
commit 2bcda17ee7
5 changed files with 19 additions and 20 deletions

View File

@ -375,6 +375,9 @@ Release 2.6.0 - UNRELEASED
HDFS-6904. YARN unable to renew delegation token fetched via webhdfs HDFS-6904. YARN unable to renew delegation token fetched via webhdfs
due to incorrect service port. (jitendra) due to incorrect service port. (jitendra)
HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin
P. McCabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6690. Deduplicate xattr names in memory. (wang) HDFS-6690. Deduplicate xattr names in memory. (wang)

View File

@ -135,9 +135,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY = "dfs.datanode.ram.disk.replica.tracker"; public static final String DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY = "dfs.datanode.ram.disk.replica.tracker";
public static final Class<RamDiskReplicaLruTracker> DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_DEFAULT = RamDiskReplicaLruTracker.class; public static final Class<RamDiskReplicaLruTracker> DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_DEFAULT = RamDiskReplicaLruTracker.class;
public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT = "dfs.datanode.ram.disk.low.watermark.percent"; public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT = "dfs.datanode.ram.disk.low.watermark.percent";
public static final int DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10; public static final float DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10.0f;
public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS = "dfs.datanode.ram.disk.low.watermark.replicas"; public static final String DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES = "dfs.datanode.ram.disk.low.watermark.bytes";
public static final int DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT = 3; public static final long DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT = DFS_BLOCK_SIZE_DEFAULT;
// This setting is for testing/internal use only. // This setting is for testing/internal use only.
public static final String DFS_DATANODE_DUPLICATE_REPLICA_DELETION = "dfs.datanode.duplicate.replica.deletion"; public static final String DFS_DATANODE_DUPLICATE_REPLICA_DELETION = "dfs.datanode.duplicate.replica.deletion";

View File

@ -46,7 +46,6 @@
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.base.Preconditions; import com.google.common.base.Preconditions;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
@ -2398,24 +2397,20 @@ private void setupAsyncLazyPersistThreads() {
class LazyWriter implements Runnable { class LazyWriter implements Runnable {
private volatile boolean shouldRun = true; private volatile boolean shouldRun = true;
final int checkpointerInterval; final int checkpointerInterval;
final long estimateBlockSize; final float lowWatermarkFreeSpacePercentage;
final int lowWatermarkFreeSpacePercentage; final long lowWatermarkFreeSpaceBytes;
final int lowWatermarkFreeSpaceReplicas;
public LazyWriter(Configuration conf) { public LazyWriter(Configuration conf) {
this.checkpointerInterval = conf.getInt( this.checkpointerInterval = conf.getInt(
DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_DEFAULT_SEC); DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_DEFAULT_SEC);
this.estimateBlockSize = conf.getLongBytes( this.lowWatermarkFreeSpacePercentage = conf.getFloat(
DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
this.lowWatermarkFreeSpacePercentage = conf.getInt(
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT, DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT,
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT); DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT);
this.lowWatermarkFreeSpaceReplicas = conf.getInt( this.lowWatermarkFreeSpaceBytes = conf.getLong(
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT); DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT);
} }
/** /**
@ -2478,6 +2473,7 @@ private boolean saveNextReplica() {
private boolean transientFreeSpaceBelowThreshold() throws IOException { private boolean transientFreeSpaceBelowThreshold() throws IOException {
long free = 0; long free = 0;
long capacity = 0; long capacity = 0;
float percentFree = 0.0f;
// Don't worry about fragmentation for now. We don't expect more than one // Don't worry about fragmentation for now. We don't expect more than one
// transient volume per DN. // transient volume per DN.
@ -2492,9 +2488,9 @@ private boolean transientFreeSpaceBelowThreshold() throws IOException {
return false; return false;
} }
int percentFree = (int) (free * 100 / capacity); percentFree = (float) ((double)free * 100 / capacity);
return percentFree < lowWatermarkFreeSpacePercentage || return (percentFree < lowWatermarkFreeSpacePercentage) ||
free < (estimateBlockSize * lowWatermarkFreeSpaceReplicas); (free < lowWatermarkFreeSpaceBytes);
} }
/** /**

View File

@ -114,7 +114,7 @@ static void initConfWithRamDisk(Configuration conf) {
conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1); conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1);
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, 1); conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, DEFAULT_RAM_DISK_BLOCK_SIZE);
} }
/* create a file with a length of <code>fileLen</code> */ /* create a file with a length of <code>fileLen</code> */

View File

@ -766,8 +766,8 @@ private void startUpCluster(boolean hasTransientStorage,
HEARTBEAT_RECHECK_INTERVAL_MSEC); HEARTBEAT_RECHECK_INTERVAL_MSEC);
conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
LAZY_WRITER_INTERVAL_SEC); LAZY_WRITER_INTERVAL_SEC);
conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
EVICTION_LOW_WATERMARK); EVICTION_LOW_WATERMARK * BLOCK_SIZE);
conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, useSCR); conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, useSCR);