From 313b4dbf282c830fd947959abaacb95da71a0fad Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 3 May 2016 16:52:43 -0700 Subject: [PATCH] HDFS-9902. Support different values of dfs.datanode.du.reserved per storage type. (Contributed by Brahma Reddy Battula) --- .../datanode/fsdataset/impl/FsVolumeImpl.java | 6 ++-- .../src/main/resources/hdfs-default.xml | 5 +++ .../fsdataset/impl/TestFsVolumeList.java | 36 ++++++++++++++++++- 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 57c39e777a0..bcff3b70aac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -58,6 +58,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.CloseableReferenceCount; import org.apache.hadoop.util.DiskChecker.DiskErrorException; +import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; import org.codehaus.jackson.annotate.JsonProperty; @@ -118,9 +119,10 @@ public class FsVolumeImpl implements FsVolumeSpi { Configuration conf, StorageType storageType) throws IOException { this.dataset = dataset; this.storageID = storageID; - this.reserved = conf.getLong( + this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong( DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, - DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT); + DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT)); this.reservedForReplicas = new AtomicLong(0L); this.currentDir = currentDir; File parent = currentDir.getParentFile(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 36ed9b0845c..c229abd70cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -321,6 +321,11 @@ dfs.datanode.du.reserved 0 Reserved space in bytes per volume. Always leave this much space free for non dfs use. + Specific storage type based reservation is also supported. The property can be followed with + corresponding storage types ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage. + For example, reserved space for RAM_DISK storage can be configured using property + 'dfs.datanode.du.reserved.ram_disk'. If specific storage type reservation is not configured + then dfs.datanode.du.reserved will be used. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java index e24c72541d2..796d2490ac9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; import org.apache.hadoop.hdfs.server.datanode.fsdataset.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.util.StringUtils; import org.junit.Before; import org.junit.Test; @@ -36,7 +37,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.concurrent.TimeoutException; - +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; @@ -143,4 +144,37 @@ public class TestFsVolumeList { volumeList.addVolume(ref); assertNull(ref.getVolume()); } + + @Test + public void testDfsReservedForDifferentStorageTypes() throws IOException { + Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, 100L); + + File volDir = new File(baseDir, "volume-0"); + volDir.mkdirs(); + // when storage type reserved is not configured,should consider + // dfs.datanode.du.reserved. + FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir, conf, + StorageType.RAM_DISK); + assertEquals("", 100L, volume.getReserved()); + // when storage type reserved is configured. + conf.setLong( + DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "." + + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 1L); + conf.setLong( + DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY + "." + + StringUtils.toLowerCase(StorageType.SSD.toString()), 2L); + FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir, + conf, StorageType.RAM_DISK); + assertEquals("", 1L, volume1.getReserved()); + FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir, + conf, StorageType.SSD); + assertEquals("", 2L, volume2.getReserved()); + FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir, + conf, StorageType.DISK); + assertEquals("", 100L, volume3.getReserved()); + FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir, + conf, StorageType.DEFAULT); + assertEquals("", 100L, volume4.getReserved()); + } }