diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 73514b6ecf4..68e2537c39f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.CloseableReferenceCount;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;
import org.codehaus.jackson.annotate.JsonProperty;
@@ -118,9 +119,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
Configuration conf, StorageType storageType) throws IOException {
this.dataset = dataset;
this.storageID = storageID;
- this.reserved = conf.getLong(
+ this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
+ + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
- DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
+ DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
this.reservedForReplicas = new AtomicLong(0L);
this.currentDir = currentDir;
File parent = currentDir.getParentFile();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 842ccbf20d6..79f7911c38e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -321,6 +321,11 @@
dfs.datanode.du.reserved
0
Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+ Specific storage type based reservation is also supported. The property can be followed with
+ corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
+ For example, reserved space for RAM_DISK storage can be configured using property
+ 'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured
+ then dfs.datanode.du.reserved will be used.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index e24c72541d2..796d2490ac9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
import org.junit.Before;
import org.junit.Test;
@@ -36,7 +37,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeoutException;
-
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
@@ -143,4 +144,37 @@ public class TestFsVolumeList {
volumeList.addVolume(ref);
assertNull(ref.getVolume());
}
+
+ @Test
+ public void testDfsReservedForDifferentStorageTypes() throws IOException {
+ Configuration conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, 100L);
+
+ File volDir = new File(baseDir, "volume-0");
+ volDir.mkdirs();
+ // when storage type reserved is not configured,should consider
+ // dfs.datanode.du.reserved.
+ FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf,
+ StorageType.RAM_DISK);
+ assertEquals("", 100L, volume.getReserved());
+ // when storage type reserved is configured.
+ conf.setLong(
+ DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 1L);
+ conf.setLong(
+ DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "."
+ + StringUtils.toLowerCase(StorageType.SSD.toString()), 2L);
+ FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
+ conf, StorageType.RAM_DISK);
+ assertEquals("", 1L, volume1.getReserved());
+ FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
+ conf, StorageType.SSD);
+ assertEquals("", 2L, volume2.getReserved());
+ FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
+ conf, StorageType.DISK);
+ assertEquals("", 100L, volume3.getReserved());
+ FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
+ conf, StorageType.DEFAULT);
+ assertEquals("", 100L, volume4.getReserved());
+ }
}