HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

(cherry picked from commit fc074a359c)
This commit is contained in:
Inigo Goiri 2018-04-30 13:28:33 -07:00
parent 9d2967098d
commit 4c13e7e3a0
8 changed files with 561 additions and 20 deletions

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
import org.apache.hadoop.http.HttpConfig;
/**
@ -647,8 +648,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
public static final String DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
public static final String DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
"dfs.datanode.du.reserved.calculator";
public static final Class<? extends ReservedSpaceCalculator>
DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
public static final String DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
public static final long DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
public static final String DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
"dfs.datanode.du.reserved.pct";
public static final int DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
public static final String DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
public static final int DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
public static final String DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";

View File

@ -78,7 +78,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaTrack
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.util.CloseableReferenceCount;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.Timer;
import org.slf4j.Logger;
@ -121,7 +120,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
private final File currentDir; // <StorageDirectory>/current
private final DF usage;
private final long reserved;
private final ReservedSpaceCalculator reserved;
private CloseableReferenceCount reference = new CloseableReferenceCount();
// Disk space reserved for blocks (RBW or Re-replicating) open for write.
@ -142,10 +141,16 @@ public class FsVolumeImpl implements FsVolumeSpi {
* contention.
*/
protected ThreadPoolExecutor cacheExecutor;
FsVolumeImpl(
FsDatasetImpl dataset, String storageID, StorageDirectory sd,
FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
FileIoProvider fileIoProvider, Configuration conf) throws IOException {
// outside tests, usage created in ReservedSpaceCalculator.Builder
this(dataset, storageID, sd, fileIoProvider, conf, null);
}
FsVolumeImpl(FsDatasetImpl dataset, String storageID, StorageDirectory sd,
FileIoProvider fileIoProvider, Configuration conf, DF usage)
throws IOException {
if (sd.getStorageLocation() == null) {
throw new IOException("StorageLocation specified for storage directory " +
@ -157,23 +162,20 @@ public class FsVolumeImpl implements FsVolumeSpi {
this.storageLocation = sd.getStorageLocation();
this.currentDir = sd.getCurrentDir();
this.storageType = storageLocation.getStorageType();
this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
+ "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
this.configuredCapacity = -1;
this.usage = usage;
if (currentDir != null) {
File parent = currentDir.getParentFile();
this.usage = new DF(parent, conf);
cacheExecutor = initializeCacheExecutor(parent);
this.metrics = DataNodeVolumeMetrics.create(conf, parent.getPath());
} else {
this.usage = null;
cacheExecutor = null;
this.metrics = null;
}
this.conf = conf;
this.fileIoProvider = fileIoProvider;
this.reserved = new ReservedSpaceCalculator.Builder(conf)
.setUsage(usage).setStorageType(storageType).build();
}
protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
@ -399,7 +401,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
@VisibleForTesting
public long getCapacity() {
if (configuredCapacity < 0) {
long remaining = usage.getCapacity() - reserved;
long remaining = usage.getCapacity() - getReserved();
return remaining > 0 ? remaining : 0;
}
@ -439,8 +441,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
private long getRemainingReserved() throws IOException {
long actualNonDfsUsed = getActualNonDfsUsed();
if (actualNonDfsUsed < reserved) {
return reserved - actualNonDfsUsed;
long actualReserved = getReserved();
if (actualNonDfsUsed < actualReserved) {
return actualReserved - actualNonDfsUsed;
}
return 0L;
}
@ -454,10 +457,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
*/
public long getNonDfsUsed() throws IOException {
long actualNonDfsUsed = getActualNonDfsUsed();
if (actualNonDfsUsed < reserved) {
long actualReserved = getReserved();
if (actualNonDfsUsed < actualReserved) {
return 0L;
}
return actualNonDfsUsed - reserved;
return actualNonDfsUsed - actualReserved;
}
@VisibleForTesting
@ -476,7 +480,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
long getReserved(){
return reserved;
return reserved.getReserved();
}
@VisibleForTesting

View File

@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import java.io.IOException;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
@ -34,12 +36,14 @@ public class FsVolumeImplBuilder {
private StorageDirectory sd;
private Configuration conf;
private FileIoProvider fileIoProvider;
private DF usage;
public FsVolumeImplBuilder() {
dataset = null;
storageID = null;
sd = null;
conf = null;
usage = null;
}
FsVolumeImplBuilder setDataset(FsDatasetImpl dataset) {
@ -67,15 +71,25 @@ public class FsVolumeImplBuilder {
return this;
}
@VisibleForTesting
FsVolumeImplBuilder setUsage(DF newUsage) {
this.usage = newUsage;
return this;
}
FsVolumeImpl build() throws IOException {
if (sd.getStorageLocation().getStorageType() == StorageType.PROVIDED) {
return new ProvidedVolumeImpl(dataset, storageID, sd,
fileIoProvider != null ? fileIoProvider :
new FileIoProvider(null, null), conf);
}
if (null == usage) {
// set usage unless overridden by unit tests
usage = new DF(sd.getCurrentDir().getParentFile(), conf);
}
return new FsVolumeImpl(
dataset, storageID, sd,
fileIoProvider != null ? fileIoProvider :
new FileIoProvider(null, null), conf);
new FileIoProvider(null, null), conf, usage);
}
}

View File

@ -270,7 +270,7 @@ class ProvidedVolumeImpl extends FsVolumeImpl {
ProvidedVolumeImpl(FsDatasetImpl dataset, String storageID,
StorageDirectory sd, FileIoProvider fileIoProvider,
Configuration conf) throws IOException {
super(dataset, storageID, sd, fileIoProvider, conf);
super(dataset, storageID, sd, fileIoProvider, conf, null);
assert getStorageLocation().getStorageType() == StorageType.PROVIDED:
"Only provided storages must use ProvidedVolume";

View File

@ -0,0 +1,227 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.StringUtils;
import java.lang.reflect.Constructor;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
/**
* Used for calculating file system space reserved for non-HDFS data.
*/
public abstract class ReservedSpaceCalculator {
/**
* Used for creating instances of ReservedSpaceCalculator.
*/
public static class Builder {
private final Configuration conf;
private DF usage;
private StorageType storageType;
public Builder(Configuration conf) {
this.conf = conf;
}
public Builder setUsage(DF newUsage) {
this.usage = newUsage;
return this;
}
public Builder setStorageType(
StorageType newStorageType) {
this.storageType = newStorageType;
return this;
}
ReservedSpaceCalculator build() {
try {
Class<? extends ReservedSpaceCalculator> clazz = conf.getClass(
DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT,
ReservedSpaceCalculator.class);
Constructor constructor = clazz.getConstructor(
Configuration.class, DF.class, StorageType.class);
return (ReservedSpaceCalculator) constructor.newInstance(
conf, usage, storageType);
} catch (Exception e) {
throw new IllegalStateException(
"Error instantiating ReservedSpaceCalculator", e);
}
}
}
private final DF usage;
private final Configuration conf;
private final StorageType storageType;
ReservedSpaceCalculator(Configuration conf, DF usage,
StorageType storageType) {
this.usage = usage;
this.conf = conf;
this.storageType = storageType;
}
DF getUsage() {
return usage;
}
long getReservedFromConf(String key, long defaultValue) {
return conf.getLong(key + "." + StringUtils.toLowerCase(
storageType.toString()), conf.getLong(key, defaultValue));
}
/**
* Return the capacity of the file system space reserved for non-HDFS.
*
* @return the number of bytes reserved for non-HDFS.
*/
abstract long getReserved();
/**
* Based on absolute number of reserved bytes.
*/
public static class ReservedSpaceCalculatorAbsolute extends
ReservedSpaceCalculator {
private final long reservedBytes;
public ReservedSpaceCalculatorAbsolute(Configuration conf, DF usage,
StorageType storageType) {
super(conf, usage, storageType);
this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
DFS_DATANODE_DU_RESERVED_DEFAULT);
}
@Override
long getReserved() {
return reservedBytes;
}
}
/**
* Based on percentage of total capacity in the storage.
*/
public static class ReservedSpaceCalculatorPercentage extends
ReservedSpaceCalculator {
private final long reservedPct;
public ReservedSpaceCalculatorPercentage(Configuration conf, DF usage,
StorageType storageType) {
super(conf, usage, storageType);
this.reservedPct = getReservedFromConf(
DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
}
@Override
long getReserved() {
return getPercentage(getUsage().getCapacity(), reservedPct);
}
}
/**
* Calculates absolute and percentage based reserved space and
* picks the one that will yield more reserved space.
*/
public static class ReservedSpaceCalculatorConservative extends
ReservedSpaceCalculator {
private final long reservedBytes;
private final long reservedPct;
public ReservedSpaceCalculatorConservative(Configuration conf, DF usage,
StorageType storageType) {
super(conf, usage, storageType);
this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
DFS_DATANODE_DU_RESERVED_DEFAULT);
this.reservedPct = getReservedFromConf(
DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
}
long getReservedBytes() {
return reservedBytes;
}
long getReservedPct() {
return reservedPct;
}
@Override
long getReserved() {
return Math.max(getReservedBytes(),
getPercentage(getUsage().getCapacity(), getReservedPct()));
}
}
/**
* Calculates absolute and percentage based reserved space and
* picks the one that will yield less reserved space.
*/
public static class ReservedSpaceCalculatorAggressive extends
ReservedSpaceCalculator {
private final long reservedBytes;
private final long reservedPct;
public ReservedSpaceCalculatorAggressive(Configuration conf, DF usage,
StorageType storageType) {
super(conf, usage, storageType);
this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
DFS_DATANODE_DU_RESERVED_DEFAULT);
this.reservedPct = getReservedFromConf(
DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
}
long getReservedBytes() {
return reservedBytes;
}
long getReservedPct() {
return reservedPct;
}
@Override
long getReserved() {
return Math.min(getReservedBytes(),
getPercentage(getUsage().getCapacity(), getReservedPct()));
}
}
private static long getPercentage(long total, long percentage) {
return (total * percentage) / 100;
}
}

View File

@ -326,6 +326,20 @@
</description>
</property>
<property>
<name>dfs.datanode.du.reserved.calculator</name>
<value>org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator$ReservedSpaceCalculatorAbsolute</value>
<description>Determines the class of ReservedSpaceCalculator to be used for
calculating disk space reservedfor non-HDFS data. The default calculator is
ReservedSpaceCalculatorAbsolute which will use dfs.datanode.du.reserved
for a static reserved number of bytes. ReservedSpaceCalculatorPercentage
will use dfs.datanode.du.reserved.pct to calculate the reserved number
of bytes based on the size of the storage. ReservedSpaceCalculatorConservative and
ReservedSpaceCalculatorAggressive will use their combination, Conservative will use
maximum, Aggressive minimum. For more details see ReservedSpaceCalculator.
</description>
</property>
<property>
<name>dfs.datanode.du.reserved</name>
<value>0</value>
@ -338,6 +352,20 @@
</description>
</property>
<property>
<name>dfs.datanode.du.reserved.pct</name>
<value>0</value>
<description>Reserved space in percentage. Read dfs.datanode.du.reserved.calculator to see
when this takes effect. The actual number of bytes reserved will be calculated by using the
total capacity of the data directory in question. Specific storage type based reservation
is also supported. The property can be followed with corresponding storage types
([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
For example, reserved percentage space for RAM_DISK storage can be configured using property
'dfs.datanode.du.reserved.pct.ram_disk'. If specific storage type reservation is not configured
then dfs.datanode.du.reserved.pct will be used.
</description>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file://${hadoop.tmp.dir}/dfs/name</value>

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import com.google.common.base.Supplier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -40,15 +41,18 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestFsVolumeList {
private final Configuration conf = new Configuration();
private Configuration conf;
private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
new RoundRobinVolumeChoosingPolicy<>();
private FsDatasetImpl dataset = null;
@ -63,6 +67,7 @@ public class TestFsVolumeList {
blockScannerConf.setInt(DFSConfigKeys.
DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
blockScanner = new BlockScanner(null, blockScannerConf);
conf = new Configuration();
}
@Test(timeout=30000)
@ -227,4 +232,87 @@ public class TestFsVolumeList {
actualNonDfsUsage - duReserved;
assertEquals(expectedNonDfsUsage, spyVolume.getNonDfsUsed());
}
@Test
public void testDfsReservedPercentageForDifferentStorageTypes()
throws IOException {
conf.setClass(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage.class,
ReservedSpaceCalculator.class);
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 15);
File volDir = new File(baseDir, "volume-0");
volDir.mkdirs();
DF usage = mock(DF.class);
when(usage.getCapacity()).thenReturn(4000L);
when(usage.getAvailable()).thenReturn(1000L);
// when storage type reserved is not configured, should consider
// dfs.datanode.du.reserved.pct
FsVolumeImpl volume = new FsVolumeImplBuilder()
.setConf(conf)
.setDataset(dataset)
.setStorageID("storage-id")
.setStorageDirectory(
new StorageDirectory(StorageLocation.parse(
"[RAM_DISK]" + volDir.getPath())))
.setUsage(usage)
.build();
assertEquals(600, volume.getReserved());
assertEquals(3400, volume.getCapacity());
assertEquals(400, volume.getAvailable());
// when storage type reserved is configured.
conf.setLong(
DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+ StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 10);
conf.setLong(
DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+ StringUtils.toLowerCase(StorageType.SSD.toString()), 50);
FsVolumeImpl volume1 = new FsVolumeImplBuilder()
.setConf(conf)
.setDataset(dataset)
.setStorageID("storage-id")
.setStorageDirectory(
new StorageDirectory(StorageLocation.parse(
"[RAM_DISK]" + volDir.getPath())))
.setUsage(usage)
.build();
assertEquals(400, volume1.getReserved());
assertEquals(3600, volume1.getCapacity());
assertEquals(600, volume1.getAvailable());
FsVolumeImpl volume2 = new FsVolumeImplBuilder()
.setConf(conf)
.setDataset(dataset)
.setStorageID("storage-id")
.setStorageDirectory(
new StorageDirectory(StorageLocation.parse(
"[SSD]" + volDir.getPath())))
.setUsage(usage)
.build();
assertEquals(2000, volume2.getReserved());
assertEquals(2000, volume2.getCapacity());
assertEquals(0, volume2.getAvailable());
FsVolumeImpl volume3 = new FsVolumeImplBuilder()
.setConf(conf)
.setDataset(dataset)
.setStorageID("storage-id")
.setStorageDirectory(
new StorageDirectory(StorageLocation.parse(
"[DISK]" + volDir.getPath())))
.setUsage(usage)
.build();
assertEquals(600, volume3.getReserved());
FsVolumeImpl volume4 = new FsVolumeImplBuilder()
.setConf(conf)
.setDataset(dataset)
.setStorageID("storage-id")
.setStorageDirectory(
new StorageDirectory(StorageLocation.parse(volDir.getPath())))
.setUsage(usage)
.build();
assertEquals(600, volume4.getReserved());
}
}

View File

@ -0,0 +1,171 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.StorageType;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute;
import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAggressive;
import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorConservative;
import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage;
import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.when;
/**
* Unit testing for different types of ReservedSpace calculators.
*/
public class TestReservedSpaceCalculator {
private Configuration conf;
private DF usage;
private ReservedSpaceCalculator reserved;
@Before
public void setUp() {
conf = new Configuration();
usage = Mockito.mock(DF.class);
}
@Test
public void testReservedSpaceAbsolute() {
conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculatorAbsolute.class,
ReservedSpaceCalculator.class);
// Test both using global configuration
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY, 900);
checkReserved(StorageType.DISK, 10000, 900);
checkReserved(StorageType.SSD, 10000, 900);
checkReserved(StorageType.ARCHIVE, 10000, 900);
}
@Test
public void testReservedSpaceAbsolutePerStorageType() {
conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculatorAbsolute.class,
ReservedSpaceCalculator.class);
// Test DISK
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 500);
checkReserved(StorageType.DISK, 2300, 500);
// Test SSD
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ssd", 750);
checkReserved(StorageType.SSD, 1550, 750);
}
@Test
public void testReservedSpacePercentage() {
conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculatorPercentage.class,
ReservedSpaceCalculator.class);
// Test both using global configuration
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 10);
checkReserved(StorageType.DISK, 10000, 1000);
checkReserved(StorageType.SSD, 10000, 1000);
checkReserved(StorageType.ARCHIVE, 10000, 1000);
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 50);
checkReserved(StorageType.DISK, 4000, 2000);
checkReserved(StorageType.SSD, 4000, 2000);
checkReserved(StorageType.ARCHIVE, 4000, 2000);
}
@Test
public void testReservedSpacePercentagePerStorageType() {
conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculatorPercentage.class,
ReservedSpaceCalculator.class);
// Test DISK
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
checkReserved(StorageType.DISK, 1600, 320);
// Test SSD
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ssd", 50);
checkReserved(StorageType.SSD, 8001, 4000);
}
@Test
public void testReservedSpaceConservativePerStorageType() {
// This policy should take the maximum of the two
conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculatorConservative.class,
ReservedSpaceCalculator.class);
// Test DISK + taking the reserved bytes over percentage,
// as that gives more reserved space
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 800);
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
checkReserved(StorageType.DISK, 1600, 800);
// Test ARCHIVE + taking reserved space based on the percentage,
// as that gives more reserved space
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 1300);
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 50);
checkReserved(StorageType.ARCHIVE, 6200, 3100);
}
@Test
public void testReservedSpaceAggresivePerStorageType() {
// This policy should take the maximum of the two
conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
ReservedSpaceCalculatorAggressive.class,
ReservedSpaceCalculator.class);
// Test RAM_DISK + taking the reserved bytes over percentage,
// as that gives less reserved space
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ram_disk", 100);
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ram_disk", 10);
checkReserved(StorageType.RAM_DISK, 1600, 100);
// Test ARCHIVE + taking reserved space based on the percentage,
// as that gives less reserved space
conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 20000);
conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 5);
checkReserved(StorageType.ARCHIVE, 100000, 5000);
}
@Test(expected = IllegalStateException.class)
public void testInvalidCalculator() {
conf.set(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY, "INVALIDTYPE");
reserved = new ReservedSpaceCalculator.Builder(conf)
.setUsage(usage)
.setStorageType(StorageType.DISK)
.build();
}
private void checkReserved(StorageType storageType,
long totalCapacity, long reservedExpected) {
when(usage.getCapacity()).thenReturn(totalCapacity);
reserved = new ReservedSpaceCalculator.Builder(conf).setUsage(usage)
.setStorageType(storageType).build();
assertEquals(reservedExpected, reserved.getReserved());
}
}