HDFS-10949. DiskBalancer: deprecate TestDiskBalancer#setVolumeCapacity. Contributed by Xiaobing Zhou.

This commit is contained in:
Anu Engineer 2016-10-13 10:26:07 -07:00
parent 1291254042
commit b371c56365
1 changed files with 11 additions and 33 deletions

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
@ -137,6 +136,7 @@ public class TestDiskBalancer {
final int dataNodeCount = 1; final int dataNodeCount = 1;
final int dataNodeIndex = 0; final int dataNodeIndex = 0;
final int sourceDiskIndex = 0; final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder() MiniDFSCluster cluster = new ClusterBuilder()
.setBlockCount(blockCount) .setBlockCount(blockCount)
@ -144,6 +144,7 @@ public class TestDiskBalancer {
.setDiskCount(diskCount) .setDiskCount(diskCount)
.setNumDatanodes(dataNodeCount) .setNumDatanodes(dataNodeCount)
.setConf(conf) .setConf(conf)
.setCapacities(new long[] {cap, cap})
.build(); .build();
try { try {
DataMover dataMover = new DataMover(cluster, dataNodeIndex, DataMover dataMover = new DataMover(cluster, dataNodeIndex,
@ -174,7 +175,7 @@ public class TestDiskBalancer {
final int dataNodeCount = 1; final int dataNodeCount = 1;
final int dataNodeIndex = 0; final int dataNodeIndex = 0;
final int sourceDiskIndex = 0; final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder() MiniDFSCluster cluster = new ClusterBuilder()
.setBlockCount(blockCount) .setBlockCount(blockCount)
@ -182,9 +183,9 @@ public class TestDiskBalancer {
.setDiskCount(diskCount) .setDiskCount(diskCount)
.setNumDatanodes(dataNodeCount) .setNumDatanodes(dataNodeCount)
.setConf(conf) .setConf(conf)
.setCapacities(new long[] {cap, cap, cap})
.build(); .build();
try { try {
DataMover dataMover = new DataMover(cluster, dataNodeIndex, DataMover dataMover = new DataMover(cluster, dataNodeIndex,
sourceDiskIndex, conf, blockSize, blockCount); sourceDiskIndex, conf, blockSize, blockCount);
@ -221,6 +222,7 @@ public class TestDiskBalancer {
final int dataNodeCount = 1; final int dataNodeCount = 1;
final int dataNodeIndex = 0; final int dataNodeIndex = 0;
final int sourceDiskIndex = 0; final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder() MiniDFSCluster cluster = new ClusterBuilder()
.setBlockCount(blockCount) .setBlockCount(blockCount)
@ -228,6 +230,7 @@ public class TestDiskBalancer {
.setDiskCount(diskCount) .setDiskCount(diskCount)
.setNumDatanodes(dataNodeCount) .setNumDatanodes(dataNodeCount)
.setConf(conf) .setConf(conf)
.setCapacities(new long[] {cap, cap})
.build(); .build();
try { try {
@ -245,24 +248,6 @@ public class TestDiskBalancer {
} }
} }
/**
* Sets alll Disks capacity to size specified.
*
* @param cluster - DiskBalancerCluster
* @param size - new size of the disk
*/
private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
String diskType) {
Preconditions.checkNotNull(cluster);
for (DiskBalancerDataNode node : cluster.getNodes()) {
for (DiskBalancerVolume vol :
node.getVolumeSets().get(diskType).getVolumes()) {
vol.setCapacity(size);
}
node.getVolumeSets().get(diskType).computeVolumeDataDensity();
}
}
/** /**
* Helper class that allows us to create different kinds of MiniDFSClusters * Helper class that allows us to create different kinds of MiniDFSClusters
* and populate data. * and populate data.
@ -274,6 +259,7 @@ public class TestDiskBalancer {
private int fileLen; private int fileLen;
private int blockCount; private int blockCount;
private int diskCount; private int diskCount;
private long[] capacities;
public ClusterBuilder setConf(Configuration conf) { public ClusterBuilder setConf(Configuration conf) {
this.conf = conf; this.conf = conf;
@ -300,13 +286,9 @@ public class TestDiskBalancer {
return this; return this;
} }
private long[] getCapacities(int diskCount, int bSize, int fSize) { private ClusterBuilder setCapacities(final long[] caps) {
Preconditions.checkState(diskCount > 0); this.capacities = caps;
long[] capacities = new long[diskCount]; return this;
for (int x = 0; x < diskCount; x++) {
capacities[x] = diskCount * bSize * fSize * 2L;
}
return capacities;
} }
private StorageType[] getStorageTypes(int diskCount) { private StorageType[] getStorageTypes(int diskCount) {
@ -338,7 +320,7 @@ public class TestDiskBalancer {
// Write a file and restart the cluster // Write a file and restart the cluster
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes) .numDataNodes(numDatanodes)
.storageCapacities(getCapacities(diskCount, blockSize, fileLen)) .storageCapacities(capacities)
.storageTypes(getStorageTypes(diskCount)) .storageTypes(getStorageTypes(diskCount))
.storagesPerDatanode(diskCount) .storagesPerDatanode(diskCount)
.build(); .build();
@ -447,10 +429,6 @@ public class TestDiskBalancer {
diskBalancerCluster.readClusterInfo(); diskBalancerCluster.readClusterInfo();
List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>(); List<DiskBalancerDataNode> nodesToProcess = new LinkedList<>();
// Rewrite the capacity in the model to show that disks need
// re-balancing.
setVolumeCapacity(diskBalancerCluster, blockSize * 2L * blockCount,
"DISK");
// Pick a node to process. // Pick a node to process.
nodesToProcess.add(diskBalancerCluster.getNodeByUUID( nodesToProcess.add(diskBalancerCluster.getNodeByUUID(
node.getDatanodeUuid())); node.getDatanodeUuid()));