From bf03b25f4b940d9ee8507795fb85b2b6f36e2cf7 Mon Sep 17 00:00:00 2001 From: Xiao Chen Date: Tue, 7 Aug 2018 22:04:41 -0700 Subject: [PATCH] HDFS-13728. Disk Balancer should not fail if volume usage is greater than capacity. Contributed by Stephen O'Donnell. (cherry picked from commit 6677717c689cc94a15f14c3466242e23652d473b) --- .../datamodel/DiskBalancerVolume.java | 17 ++++++++++++----- .../server/diskbalancer/TestDataModels.java | 16 ++++++++++++++++ 2 files changed, 28 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java index a9fd7f0e22a..e43b83e39ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java @@ -21,9 +21,10 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.web.JsonUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -35,6 +36,9 @@ public class DiskBalancerVolume { private static final ObjectReader READER = new ObjectMapper().readerFor(DiskBalancerVolume.class); + private static final Logger LOG = + LoggerFactory.getLogger(DiskBalancerVolume.class); + private String path; private long capacity; private String storageType; @@ -269,10 +273,13 @@ public class DiskBalancerVolume { * @param dfsUsedSpace - dfsUsedSpace for this volume. */ public void setUsed(long dfsUsedSpace) { - Preconditions.checkArgument(dfsUsedSpace < this.getCapacity(), - "DiskBalancerVolume.setUsed: dfsUsedSpace(%s) < capacity(%s)", - dfsUsedSpace, getCapacity()); - this.used = dfsUsedSpace; + if (dfsUsedSpace > this.getCapacity()) { + LOG.warn("Volume usage ("+dfsUsedSpace+") is greater than capacity ("+ + this.getCapacity()+"). Setting volume usage to the capacity"); + this.used = this.getCapacity(); + } else { + this.used = dfsUsedSpace; + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java index ace8212fd85..12fbcf1d0d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDataModels.java @@ -224,4 +224,20 @@ public class TestDataModels { Assert .assertEquals(cluster.getNodes().size(), newCluster.getNodes().size()); } + + @Test + public void testUsageLimitedToCapacity() throws Exception { + DiskBalancerTestUtil util = new DiskBalancerTestUtil(); + + // If usage is greater than capacity, then it should be set to capacity + DiskBalancerVolume v1 = util.createRandomVolume(StorageType.DISK); + v1.setCapacity(DiskBalancerTestUtil.GB); + v1.setUsed(2 * DiskBalancerTestUtil.GB); + Assert.assertEquals(v1.getUsed(),v1.getCapacity()); + // If usage is less than capacity, usage should be set to the real usage + DiskBalancerVolume v2 = util.createRandomVolume(StorageType.DISK); + v2.setCapacity(2*DiskBalancerTestUtil.GB); + v2.setUsed(DiskBalancerTestUtil.GB); + Assert.assertEquals(v1.getUsed(),DiskBalancerTestUtil.GB); + } }