HDFS-10520. DiskBalancer: Fix Checkstyle issues in test code. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-15 15:28:22 -07:00 committed by Arpit Agarwal
parent 7e2be5c4a0
commit 3225c24e0e
10 changed files with 120 additions and 90 deletions

View File

@ -22,6 +22,9 @@ import java.io.IOException;
* Disk Balancer Exceptions.
*/
public class DiskBalancerException extends IOException {
/**
* Results returned by the RPC layer of DiskBalancer.
*/
public enum Result {
DISK_BALANCER_NOT_ENABLED,
INVALID_PLAN_VERSION,

View File

@ -1362,7 +1362,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
@Override
public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
FsVolumeSpi destination) throws IOException {
FsVolumeSpi destination) throws IOException {
return null;
}

View File

@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.server.diskbalancer;
import org.hamcrest.Description;
import org.hamcrest.TypeSafeMatcher;
/**
* Helps in verifying test results.
*/
public class DiskBalancerResultVerifier
extends TypeSafeMatcher<DiskBalancerException> {
private final DiskBalancerException.Result expectedResult;

View File

@ -42,7 +42,7 @@ public class DiskBalancerTestUtil {
public static final long TB = GB * 1024L;
private static int[] diskSizes =
{1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 200, 300, 400, 500, 600, 700, 800, 900};
Random rand;
private Random rand;
private String stringTable =
"ABCDEDFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0987654321";
@ -251,7 +251,7 @@ public class DiskBalancerTestUtil {
* @throws IOException
*/
public static void moveAllDataToDestVolume(FsDatasetSpi fsDataset,
FsVolumeSpi source, FsVolumeSpi dest) throws IOException {
FsVolumeSpi source, FsVolumeSpi dest) throws IOException {
for (String blockPoolID : source.getBlockPoolList()) {
FsVolumeSpi.BlockIterator sourceIter =

View File

@ -30,11 +30,14 @@ import org.junit.Test;
import java.io.IOException;
/**
* Test Class that tests connectors.
*/
public class TestConnectors {
private MiniDFSCluster cluster;
final int numDatanodes = 3;
final int volumeCount = 2; // default volumes in MiniDFSCluster.
Configuration conf;
private final int numDatanodes = 3;
private final int volumeCount = 2; // default volumes in MiniDFSCluster.
private Configuration conf;
@Before
public void setup() throws IOException {
@ -51,12 +54,12 @@ public class TestConnectors {
}
@Test
public void TestNameNodeConnector() throws Exception {
public void testNameNodeConnector() throws Exception {
cluster.waitActive();
ClusterConnector nameNodeConnector =
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster
(nameNodeConnector);
DiskBalancerCluster diskBalancerCluster =
new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
Assert.assertEquals("Expected number of Datanodes not found.",
numDatanodes, diskBalancerCluster.getNodes().size());
@ -65,18 +68,18 @@ public class TestConnectors {
}
@Test
public void TestJsonConnector() throws Exception {
public void testJsonConnector() throws Exception {
cluster.waitActive();
ClusterConnector nameNodeConnector =
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster
(nameNodeConnector);
DiskBalancerCluster diskBalancerCluster =
new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
String diskBalancerJson = diskBalancerCluster.toJson();
DiskBalancerCluster serializedCluster = DiskBalancerCluster.parseJson
(diskBalancerJson);
DiskBalancerCluster serializedCluster =
DiskBalancerCluster.parseJson(diskBalancerJson);
Assert.assertEquals("Parsed cluster is not equal to persisted info.",
diskBalancerCluster.getNodes().size(), serializedCluster.getNodes()
.size());
diskBalancerCluster.getNodes().size(),
serializedCluster.getNodes().size());
}
}

View File

@ -30,9 +30,12 @@ import java.util.List;
import java.util.TreeSet;
import java.util.UUID;
/**
* Tests DiskBalancer Data models.
*/
public class TestDataModels {
@Test
public void TestCreateRandomVolume() throws Exception {
public void testCreateRandomVolume() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK);
Assert.assertNotNull(vol.getUuid());
@ -46,7 +49,7 @@ public class TestDataModels {
}
@Test
public void TestCreateRandomVolumeSet() throws Exception {
public void testCreateRandomVolumeSet() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet =
util.createRandomVolumeSet(StorageType.SSD, 10);
@ -57,7 +60,7 @@ public class TestDataModels {
}
@Test
public void TestCreateRandomDataNode() throws Exception {
public void testCreateRandomDataNode() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode(
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 10);
@ -65,7 +68,7 @@ public class TestDataModels {
}
@Test
public void TestDiskQueues() throws Exception {
public void testDiskQueues() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode(
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 3);
@ -93,7 +96,7 @@ public class TestDataModels {
}
@Test
public void TestNoBalancingNeededEvenDataSpread() throws Exception {
public void testNoBalancingNeededEvenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -119,7 +122,7 @@ public class TestDataModels {
}
@Test
public void TestNoBalancingNeededTransientDisks() throws Exception {
public void testNoBalancingNeededTransientDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -145,7 +148,7 @@ public class TestDataModels {
}
@Test
public void TestNoBalancingNeededFailedDisks() throws Exception {
public void testNoBalancingNeededFailedDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -172,7 +175,7 @@ public class TestDataModels {
}
@Test
public void TestNeedBalancingUnevenDataSpread() throws Exception {
public void testNeedBalancingUnevenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -196,7 +199,7 @@ public class TestDataModels {
}
@Test
public void TestVolumeSerialize() throws Exception {
public void testVolumeSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume volume = util.createRandomVolume(StorageType.DISK);
String originalString = volume.toJson();
@ -207,7 +210,7 @@ public class TestDataModels {
}
@Test
public void TestClusterSerialize() throws Exception {
public void testClusterSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
// Create a Cluster with 3 datanodes, 3 disk types and 3 disks in each type

View File

@ -35,7 +35,8 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.test.GenericTestUtils;
@ -51,10 +52,13 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Test Disk Balancer.
*/
public class TestDiskBalancer {
@Test
public void TestDiskBalancerNameNodeConnectivity() throws Exception {
public void testDiskBalancerNameNodeConnectivity() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2;
@ -65,13 +69,13 @@ public class TestDiskBalancer {
ClusterConnector nameNodeConnector =
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster DiskBalancerCluster = new DiskBalancerCluster
(nameNodeConnector);
DiskBalancerCluster.readClusterInfo();
assertEquals(DiskBalancerCluster.getNodes().size(), numDatanodes);
DiskBalancerCluster diskBalancerCluster =
new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo();
assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
DataNode dnNode = cluster.getDataNodes().get(0);
DiskBalancerDataNode dbDnNode =
DiskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
assertEquals(dnNode.getDatanodeId().getIpAddr(),
dbDnNode.getDataNodeIP());
@ -88,24 +92,23 @@ public class TestDiskBalancer {
/**
* This test simulates a real Data node working with DiskBalancer.
*
* <p>
* Here is the overview of this test.
*
* <p>
* 1. Write a bunch of blocks and move them to one disk to create imbalance.
* 2. Rewrite the capacity of the disks in DiskBalancer Model so that
* planner will produce a move plan.
* 3. Execute the move plan and wait unitl the plan is done.
* 4. Verify the source disk has blocks now.
* 2. Rewrite the capacity of the disks in DiskBalancer Model so that planner
* will produce a move plan. 3. Execute the move plan and wait unitl the plan
* is done. 4. Verify the source disk has blocks now.
*
* @throws Exception
*/
@Test
public void TestDiskBalancerEndToEnd() throws Exception {
public void testDiskBalancerEndToEnd() throws Exception {
Configuration conf = new HdfsConfiguration();
final int DEFAULT_BLOCK_SIZE = 100;
final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final int numDatanodes = 1;
final String fileName = "/tmp.txt";
@ -116,12 +119,12 @@ public class TestDiskBalancer {
// Write a file and restart the cluster
long [] capacities = new long[]{ DEFAULT_BLOCK_SIZE * 2 * fileLen,
DEFAULT_BLOCK_SIZE * 2 * fileLen };
long[] capacities = new long[]{defaultBlockSize * 2 * fileLen,
defaultBlockSize * 2 * fileLen};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.storageCapacities(capacities)
.storageTypes(new StorageType[] {StorageType.DISK, StorageType.DISK})
.storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
.storagesPerDatanode(2)
.build();
FsVolumeImpl source = null;
@ -144,9 +147,9 @@ public class TestDiskBalancer {
source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(
dnNode.getFSDataset(), source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(),
source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
}
cluster.restartDataNodes();
@ -164,7 +167,8 @@ public class TestDiskBalancer {
// Rewrite the capacity in the model to show that disks need
// re-balancing.
setVolumeCapacity(diskBalancerCluster, DEFAULT_BLOCK_SIZE * 2 * fileLen, "DISK");
setVolumeCapacity(diskBalancerCluster, defaultBlockSize * 2 * fileLen,
"DISK");
// Pick a node to process.
nodesToProcess.add(diskBalancerCluster.getNodeByUUID(dnNode
.getDatanodeUuid()));
@ -220,13 +224,12 @@ public class TestDiskBalancer {
}
// Tolerance
long delta = (plan.getVolumeSetPlans().get(0).getBytesToMove()
* 10) / 100;
assertTrue(
(DiskBalancerTestUtil.getBlockCount(source) *
DEFAULT_BLOCK_SIZE + delta) >=
defaultBlockSize + delta) >=
plan.getVolumeSetPlans().get(0).getBytesToMove());
} finally {
@ -236,13 +239,14 @@ public class TestDiskBalancer {
/**
* Sets alll Disks capacity to size specified.
* @param cluster - DiskBalancerCluster
* @param size - new size of the disk
*
* @param cluster - DiskBalancerCluster
* @param size - new size of the disk
*/
private void setVolumeCapacity(DiskBalancerCluster cluster, long size,
String diskType) {
Preconditions.checkNotNull(cluster);
for(DiskBalancerDataNode node : cluster.getNodes()) {
for (DiskBalancerDataNode node : cluster.getNodes()) {
for (DiskBalancerVolume vol :
node.getVolumeSets().get(diskType).getVolumes()) {
vol.setCapacity(size);

View File

@ -53,6 +53,9 @@ import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Resu
import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_UNDER_PROGRESS;
import static org.junit.Assert.assertTrue;
/**
* Test DiskBalancer RPC.
*/
public class TestDiskBalancerRPC {
@Rule
public ExpectedException thrown = ExpectedException.none();
@ -91,7 +94,7 @@ public class TestDiskBalancerRPC {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
char hashArray[] = planHash.toCharArray();
char[] hashArray = planHash.toCharArray();
hashArray[0]++;
planHash = String.valueOf(hashArray);
int planVersion = rpcTestHelper.getPlanVersion();
@ -126,7 +129,8 @@ public class TestDiskBalancerRPC {
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN));
dataNode.submitDiskBalancerPlan(planHash, planVersion, "",
false); }
false);
}
@Test
public void testCancelPlan() throws Exception {
@ -145,7 +149,7 @@ public class TestDiskBalancerRPC {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
char hashArray[] = planHash.toCharArray();
char[] hashArray= planHash.toCharArray();
hashArray[0]++;
planHash = String.valueOf(hashArray);
NodePlan plan = rpcTestHelper.getPlan();
@ -234,10 +238,10 @@ public class TestDiskBalancerRPC {
@Test
public void testMoveBlockAcrossVolume() throws Exception {
Configuration conf = new HdfsConfiguration();
final int DEFAULT_BLOCK_SIZE = 100;
final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
String fileName = "/tmp.txt";
Path filePath = new Path(fileName);
final int numDatanodes = 1;

View File

@ -59,19 +59,22 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Tests diskbalancer with a mock mover.
*/
public class TestDiskBalancerWithMockMover {
static final Log LOG = LogFactory.getLog(TestDiskBalancerWithMockMover.class);
@Rule
public ExpectedException thrown = ExpectedException.none();
MiniDFSCluster cluster;
String sourceName;
String destName;
String sourceUUID;
String destUUID;
String nodeID;
DataNode dataNode;
private MiniDFSCluster cluster;
private String sourceName;
private String destName;
private String sourceUUID;
private String destUUID;
private String nodeID;
private DataNode dataNode;
/**
* Checks that we return the right error if diskbalancer is not enabled.
@ -178,12 +181,12 @@ public class TestDiskBalancerWithMockMover {
@Test
public void testSubmitWithOlderPlan() throws Exception {
final long MILLISECOND_IN_AN_HOUR = 1000 * 60 * 60L;
final long millisecondInAnHour = 1000 * 60 * 60L;
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer();
plan.setTimeStamp(Time.now() - (32 * MILLISECOND_IN_AN_HOUR));
plan.setTimeStamp(Time.now() - (32 * millisecondInAnHour));
thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
.Result.OLD_PLAN_SUBMITTED));
@ -316,10 +319,10 @@ public class TestDiskBalancerWithMockMover {
@Before
public void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
final int NUM_STORAGES_PER_DN = 2;
final int numStoragesPerDn = 2;
cluster = new MiniDFSCluster
.Builder(conf).numDataNodes(3)
.storagesPerDatanode(NUM_STORAGES_PER_DN)
.storagesPerDatanode(numStoragesPerDn)
.build();
cluster.waitActive();
dataNode = cluster.getDataNodes().get(0);
@ -602,8 +605,8 @@ public class TestDiskBalancerWithMockMover {
DiskBalancerDataNode node = balancerCluster.getNodes().get(dnIndex);
node.setDataNodeUUID(nodeID);
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeName(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("DISK"), plan);
setVolumeNames(plan);
return plan;

View File

@ -40,6 +40,9 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
/**
* Test Planner.
*/
public class TestPlanner {
static final Logger LOG =
LoggerFactory.getLogger(TestPlanner.class);
@ -56,8 +59,8 @@ public class TestPlanner {
cluster.setNodesToProcess(cluster.getNodes());
DiskBalancerDataNode node = cluster.getNodes().get(0);
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeName(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
}
@ -115,8 +118,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeName(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// With a single volume we should not have any plans for moves.
@ -183,8 +186,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeUUID(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from
@ -219,8 +222,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeName(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// since we have same size of data in all disks , we should have
@ -250,8 +253,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeName(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should see 2 move plans. One from volume100 to volume0-1
@ -260,10 +263,12 @@ public class TestPlanner {
assertEquals(2, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
assertTrue(step.getSizeString(
step.getBytesToMove()).matches("33.[2|3|4] G"));
step = plan.getVolumeSetPlans().get(1);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G"));
assertTrue(step.getSizeString(
step.getBytesToMove()).matches("33.[2|3|4] G"));
}
@Test
@ -287,8 +292,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort
());
NodePlan plan = new NodePlan(node.getDataNodeName(),
node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
//We should see NO moves since the total data on the volume100
@ -315,11 +320,13 @@ public class TestPlanner {
Step step = newPlan.getVolumeSetPlans().get(0);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
assertTrue(step.getSizeString(
step.getBytesToMove()).matches("18.[6|7|8] G"));
step = newPlan.getVolumeSetPlans().get(1);
assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
assertTrue(
step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
}