HDFS-10520. DiskBalancer: Fix Checkstyle issues in test code. Contributed by Anu Engineer.

This commit is contained in:
Anu Engineer 2016-06-15 15:28:22 -07:00 committed by Arpit Agarwal
parent 7e2be5c4a0
commit 3225c24e0e
10 changed files with 120 additions and 90 deletions

View File

@ -22,6 +22,9 @@ import java.io.IOException;
* Disk Balancer Exceptions. * Disk Balancer Exceptions.
*/ */
public class DiskBalancerException extends IOException { public class DiskBalancerException extends IOException {
/**
* Results returned by the RPC layer of DiskBalancer.
*/
public enum Result { public enum Result {
DISK_BALANCER_NOT_ENABLED, DISK_BALANCER_NOT_ENABLED,
INVALID_PLAN_VERSION, INVALID_PLAN_VERSION,

View File

@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.server.diskbalancer;
import org.hamcrest.Description; import org.hamcrest.Description;
import org.hamcrest.TypeSafeMatcher; import org.hamcrest.TypeSafeMatcher;
/**
* Helps in verifying test results.
*/
public class DiskBalancerResultVerifier public class DiskBalancerResultVerifier
extends TypeSafeMatcher<DiskBalancerException> { extends TypeSafeMatcher<DiskBalancerException> {
private final DiskBalancerException.Result expectedResult; private final DiskBalancerException.Result expectedResult;

View File

@ -42,7 +42,7 @@ public class DiskBalancerTestUtil {
public static final long TB = GB * 1024L; public static final long TB = GB * 1024L;
private static int[] diskSizes = private static int[] diskSizes =
{1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 200, 300, 400, 500, 600, 700, 800, 900}; {1, 2, 3, 4, 5, 6, 7, 8, 9, 100, 200, 300, 400, 500, 600, 700, 800, 900};
Random rand; private Random rand;
private String stringTable = private String stringTable =
"ABCDEDFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0987654321"; "ABCDEDFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0987654321";

View File

@ -30,11 +30,14 @@ import org.junit.Test;
import java.io.IOException; import java.io.IOException;
/**
* Test Class that tests connectors.
*/
public class TestConnectors { public class TestConnectors {
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
final int numDatanodes = 3; private final int numDatanodes = 3;
final int volumeCount = 2; // default volumes in MiniDFSCluster. private final int volumeCount = 2; // default volumes in MiniDFSCluster.
Configuration conf; private Configuration conf;
@Before @Before
public void setup() throws IOException { public void setup() throws IOException {
@ -51,12 +54,12 @@ public class TestConnectors {
} }
@Test @Test
public void TestNameNodeConnector() throws Exception { public void testNameNodeConnector() throws Exception {
cluster.waitActive(); cluster.waitActive();
ClusterConnector nameNodeConnector = ClusterConnector nameNodeConnector =
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf); ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster DiskBalancerCluster diskBalancerCluster =
(nameNodeConnector); new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo(); diskBalancerCluster.readClusterInfo();
Assert.assertEquals("Expected number of Datanodes not found.", Assert.assertEquals("Expected number of Datanodes not found.",
numDatanodes, diskBalancerCluster.getNodes().size()); numDatanodes, diskBalancerCluster.getNodes().size());
@ -65,18 +68,18 @@ public class TestConnectors {
} }
@Test @Test
public void TestJsonConnector() throws Exception { public void testJsonConnector() throws Exception {
cluster.waitActive(); cluster.waitActive();
ClusterConnector nameNodeConnector = ClusterConnector nameNodeConnector =
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf); ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster DiskBalancerCluster diskBalancerCluster =
(nameNodeConnector); new DiskBalancerCluster(nameNodeConnector);
diskBalancerCluster.readClusterInfo(); diskBalancerCluster.readClusterInfo();
String diskBalancerJson = diskBalancerCluster.toJson(); String diskBalancerJson = diskBalancerCluster.toJson();
DiskBalancerCluster serializedCluster = DiskBalancerCluster.parseJson DiskBalancerCluster serializedCluster =
(diskBalancerJson); DiskBalancerCluster.parseJson(diskBalancerJson);
Assert.assertEquals("Parsed cluster is not equal to persisted info.", Assert.assertEquals("Parsed cluster is not equal to persisted info.",
diskBalancerCluster.getNodes().size(), serializedCluster.getNodes() diskBalancerCluster.getNodes().size(),
.size()); serializedCluster.getNodes().size());
} }
} }

View File

@ -30,9 +30,12 @@ import java.util.List;
import java.util.TreeSet; import java.util.TreeSet;
import java.util.UUID; import java.util.UUID;
/**
* Tests DiskBalancer Data models.
*/
public class TestDataModels { public class TestDataModels {
@Test @Test
public void TestCreateRandomVolume() throws Exception { public void testCreateRandomVolume() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK); DiskBalancerVolume vol = util.createRandomVolume(StorageType.DISK);
Assert.assertNotNull(vol.getUuid()); Assert.assertNotNull(vol.getUuid());
@ -46,7 +49,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestCreateRandomVolumeSet() throws Exception { public void testCreateRandomVolumeSet() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolumeSet vSet = DiskBalancerVolumeSet vSet =
util.createRandomVolumeSet(StorageType.SSD, 10); util.createRandomVolumeSet(StorageType.SSD, 10);
@ -57,7 +60,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestCreateRandomDataNode() throws Exception { public void testCreateRandomDataNode() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode( DiskBalancerDataNode node = util.createRandomDataNode(
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 10); new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 10);
@ -65,7 +68,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestDiskQueues() throws Exception { public void testDiskQueues() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode( DiskBalancerDataNode node = util.createRandomDataNode(
new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 3); new StorageType[]{StorageType.DISK, StorageType.RAM_DISK}, 3);
@ -93,7 +96,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestNoBalancingNeededEvenDataSpread() throws Exception { public void testNoBalancingNeededEvenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString()); new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -119,7 +122,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestNoBalancingNeededTransientDisks() throws Exception { public void testNoBalancingNeededTransientDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString()); new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -145,7 +148,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestNoBalancingNeededFailedDisks() throws Exception { public void testNoBalancingNeededFailedDisks() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString()); new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -172,7 +175,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestNeedBalancingUnevenDataSpread() throws Exception { public void testNeedBalancingUnevenDataSpread() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = DiskBalancerDataNode node =
new DiskBalancerDataNode(UUID.randomUUID().toString()); new DiskBalancerDataNode(UUID.randomUUID().toString());
@ -196,7 +199,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestVolumeSerialize() throws Exception { public void testVolumeSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerVolume volume = util.createRandomVolume(StorageType.DISK); DiskBalancerVolume volume = util.createRandomVolume(StorageType.DISK);
String originalString = volume.toJson(); String originalString = volume.toJson();
@ -207,7 +210,7 @@ public class TestDataModels {
} }
@Test @Test
public void TestClusterSerialize() throws Exception { public void testClusterSerialize() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil(); DiskBalancerTestUtil util = new DiskBalancerTestUtil();
// Create a Cluster with 3 datanodes, 3 disk types and 3 disks in each type // Create a Cluster with 3 datanodes, 3 disk types and 3 disks in each type

View File

@ -35,7 +35,8 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory; import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
.DiskBalancerDataNode;
import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan; import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
@ -51,10 +52,13 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
/**
* Test Disk Balancer.
*/
public class TestDiskBalancer { public class TestDiskBalancer {
@Test @Test
public void TestDiskBalancerNameNodeConnectivity() throws Exception { public void testDiskBalancerNameNodeConnectivity() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int numDatanodes = 2; final int numDatanodes = 2;
@ -65,13 +69,13 @@ public class TestDiskBalancer {
ClusterConnector nameNodeConnector = ClusterConnector nameNodeConnector =
ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf); ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
DiskBalancerCluster DiskBalancerCluster = new DiskBalancerCluster DiskBalancerCluster diskBalancerCluster =
(nameNodeConnector); new DiskBalancerCluster(nameNodeConnector);
DiskBalancerCluster.readClusterInfo(); diskBalancerCluster.readClusterInfo();
assertEquals(DiskBalancerCluster.getNodes().size(), numDatanodes); assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
DataNode dnNode = cluster.getDataNodes().get(0); DataNode dnNode = cluster.getDataNodes().get(0);
DiskBalancerDataNode dbDnNode = DiskBalancerDataNode dbDnNode =
DiskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid()); diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID()); assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
assertEquals(dnNode.getDatanodeId().getIpAddr(), assertEquals(dnNode.getDatanodeId().getIpAddr(),
dbDnNode.getDataNodeIP()); dbDnNode.getDataNodeIP());
@ -88,24 +92,23 @@ public class TestDiskBalancer {
/** /**
* This test simulates a real Data node working with DiskBalancer. * This test simulates a real Data node working with DiskBalancer.
* * <p>
* Here is the overview of this test. * Here is the overview of this test.
* * <p>
* 1. Write a bunch of blocks and move them to one disk to create imbalance. * 1. Write a bunch of blocks and move them to one disk to create imbalance.
* 2. Rewrite the capacity of the disks in DiskBalancer Model so that * 2. Rewrite the capacity of the disks in DiskBalancer Model so that planner
* planner will produce a move plan. * will produce a move plan. 3. Execute the move plan and wait unitl the plan
* 3. Execute the move plan and wait unitl the plan is done. * is done. 4. Verify the source disk has blocks now.
* 4. Verify the source disk has blocks now.
* *
* @throws Exception * @throws Exception
*/ */
@Test @Test
public void TestDiskBalancerEndToEnd() throws Exception { public void testDiskBalancerEndToEnd() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
final int DEFAULT_BLOCK_SIZE = 100; final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
final int numDatanodes = 1; final int numDatanodes = 1;
final String fileName = "/tmp.txt"; final String fileName = "/tmp.txt";
@ -116,8 +119,8 @@ public class TestDiskBalancer {
// Write a file and restart the cluster // Write a file and restart the cluster
long [] capacities = new long[]{ DEFAULT_BLOCK_SIZE * 2 * fileLen, long[] capacities = new long[]{defaultBlockSize * 2 * fileLen,
DEFAULT_BLOCK_SIZE * 2 * fileLen }; defaultBlockSize * 2 * fileLen};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes) .numDataNodes(numDatanodes)
.storageCapacities(capacities) .storageCapacities(capacities)
@ -144,8 +147,8 @@ public class TestDiskBalancer {
source = (FsVolumeImpl) refs.get(0); source = (FsVolumeImpl) refs.get(0);
dest = (FsVolumeImpl) refs.get(1); dest = (FsVolumeImpl) refs.get(1);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0); assertTrue(DiskBalancerTestUtil.getBlockCount(source) > 0);
DiskBalancerTestUtil.moveAllDataToDestVolume( DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(),
dnNode.getFSDataset(), source, dest); source, dest);
assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0); assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
} }
@ -164,7 +167,8 @@ public class TestDiskBalancer {
// Rewrite the capacity in the model to show that disks need // Rewrite the capacity in the model to show that disks need
// re-balancing. // re-balancing.
setVolumeCapacity(diskBalancerCluster, DEFAULT_BLOCK_SIZE * 2 * fileLen, "DISK"); setVolumeCapacity(diskBalancerCluster, defaultBlockSize * 2 * fileLen,
"DISK");
// Pick a node to process. // Pick a node to process.
nodesToProcess.add(diskBalancerCluster.getNodeByUUID(dnNode nodesToProcess.add(diskBalancerCluster.getNodeByUUID(dnNode
.getDatanodeUuid())); .getDatanodeUuid()));
@ -220,13 +224,12 @@ public class TestDiskBalancer {
} }
// Tolerance // Tolerance
long delta = (plan.getVolumeSetPlans().get(0).getBytesToMove() long delta = (plan.getVolumeSetPlans().get(0).getBytesToMove()
* 10) / 100; * 10) / 100;
assertTrue( assertTrue(
(DiskBalancerTestUtil.getBlockCount(source) * (DiskBalancerTestUtil.getBlockCount(source) *
DEFAULT_BLOCK_SIZE + delta) >= defaultBlockSize + delta) >=
plan.getVolumeSetPlans().get(0).getBytesToMove()); plan.getVolumeSetPlans().get(0).getBytesToMove());
} finally { } finally {
@ -236,6 +239,7 @@ public class TestDiskBalancer {
/** /**
* Sets alll Disks capacity to size specified. * Sets alll Disks capacity to size specified.
*
* @param cluster - DiskBalancerCluster * @param cluster - DiskBalancerCluster
* @param size - new size of the disk * @param size - new size of the disk
*/ */

View File

@ -53,6 +53,9 @@ import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Resu
import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_UNDER_PROGRESS; import static org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result.PLAN_UNDER_PROGRESS;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
/**
* Test DiskBalancer RPC.
*/
public class TestDiskBalancerRPC { public class TestDiskBalancerRPC {
@Rule @Rule
public ExpectedException thrown = ExpectedException.none(); public ExpectedException thrown = ExpectedException.none();
@ -91,7 +94,7 @@ public class TestDiskBalancerRPC {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke(); RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode(); DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash(); String planHash = rpcTestHelper.getPlanHash();
char hashArray[] = planHash.toCharArray(); char[] hashArray = planHash.toCharArray();
hashArray[0]++; hashArray[0]++;
planHash = String.valueOf(hashArray); planHash = String.valueOf(hashArray);
int planVersion = rpcTestHelper.getPlanVersion(); int planVersion = rpcTestHelper.getPlanVersion();
@ -126,7 +129,8 @@ public class TestDiskBalancerRPC {
thrown.expect(DiskBalancerException.class); thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN)); thrown.expect(new DiskBalancerResultVerifier(Result.INVALID_PLAN));
dataNode.submitDiskBalancerPlan(planHash, planVersion, "", dataNode.submitDiskBalancerPlan(planHash, planVersion, "",
false); } false);
}
@Test @Test
public void testCancelPlan() throws Exception { public void testCancelPlan() throws Exception {
@ -145,7 +149,7 @@ public class TestDiskBalancerRPC {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke(); RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode(); DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash(); String planHash = rpcTestHelper.getPlanHash();
char hashArray[] = planHash.toCharArray(); char[] hashArray= planHash.toCharArray();
hashArray[0]++; hashArray[0]++;
planHash = String.valueOf(hashArray); planHash = String.valueOf(hashArray);
NodePlan plan = rpcTestHelper.getPlan(); NodePlan plan = rpcTestHelper.getPlan();
@ -234,10 +238,10 @@ public class TestDiskBalancerRPC {
@Test @Test
public void testMoveBlockAcrossVolume() throws Exception { public void testMoveBlockAcrossVolume() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
final int DEFAULT_BLOCK_SIZE = 100; final int defaultBlockSize = 100;
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true); conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE); conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
String fileName = "/tmp.txt"; String fileName = "/tmp.txt";
Path filePath = new Path(fileName); Path filePath = new Path(fileName);
final int numDatanodes = 1; final int numDatanodes = 1;

View File

@ -59,19 +59,22 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
/**
* Tests diskbalancer with a mock mover.
*/
public class TestDiskBalancerWithMockMover { public class TestDiskBalancerWithMockMover {
static final Log LOG = LogFactory.getLog(TestDiskBalancerWithMockMover.class); static final Log LOG = LogFactory.getLog(TestDiskBalancerWithMockMover.class);
@Rule @Rule
public ExpectedException thrown = ExpectedException.none(); public ExpectedException thrown = ExpectedException.none();
MiniDFSCluster cluster; private MiniDFSCluster cluster;
String sourceName; private String sourceName;
String destName; private String destName;
String sourceUUID; private String sourceUUID;
String destUUID; private String destUUID;
String nodeID; private String nodeID;
DataNode dataNode; private DataNode dataNode;
/** /**
* Checks that we return the right error if diskbalancer is not enabled. * Checks that we return the right error if diskbalancer is not enabled.
@ -178,12 +181,12 @@ public class TestDiskBalancerWithMockMover {
@Test @Test
public void testSubmitWithOlderPlan() throws Exception { public void testSubmitWithOlderPlan() throws Exception {
final long MILLISECOND_IN_AN_HOUR = 1000 * 60 * 60L; final long millisecondInAnHour = 1000 * 60 * 60L;
MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke(); MockMoverHelper mockMoverHelper = new MockMoverHelper().invoke();
NodePlan plan = mockMoverHelper.getPlan(); NodePlan plan = mockMoverHelper.getPlan();
DiskBalancer balancer = mockMoverHelper.getBalancer(); DiskBalancer balancer = mockMoverHelper.getBalancer();
plan.setTimeStamp(Time.now() - (32 * MILLISECOND_IN_AN_HOUR)); plan.setTimeStamp(Time.now() - (32 * millisecondInAnHour));
thrown.expect(DiskBalancerException.class); thrown.expect(DiskBalancerException.class);
thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException thrown.expect(new DiskBalancerResultVerifier(DiskBalancerException
.Result.OLD_PLAN_SUBMITTED)); .Result.OLD_PLAN_SUBMITTED));
@ -316,10 +319,10 @@ public class TestDiskBalancerWithMockMover {
@Before @Before
public void setUp() throws Exception { public void setUp() throws Exception {
Configuration conf = new HdfsConfiguration(); Configuration conf = new HdfsConfiguration();
final int NUM_STORAGES_PER_DN = 2; final int numStoragesPerDn = 2;
cluster = new MiniDFSCluster cluster = new MiniDFSCluster
.Builder(conf).numDataNodes(3) .Builder(conf).numDataNodes(3)
.storagesPerDatanode(NUM_STORAGES_PER_DN) .storagesPerDatanode(numStoragesPerDn)
.build(); .build();
cluster.waitActive(); cluster.waitActive();
dataNode = cluster.getDataNodes().get(0); dataNode = cluster.getDataNodes().get(0);
@ -602,8 +605,8 @@ public class TestDiskBalancerWithMockMover {
DiskBalancerDataNode node = balancerCluster.getNodes().get(dnIndex); DiskBalancerDataNode node = balancerCluster.getNodes().get(dnIndex);
node.setDataNodeUUID(nodeID); node.setDataNodeUUID(nodeID);
GreedyPlanner planner = new GreedyPlanner(10.0f, node); GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeName(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("DISK"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("DISK"), plan);
setVolumeNames(plan); setVolumeNames(plan);
return plan; return plan;

View File

@ -40,6 +40,9 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
/**
* Test Planner.
*/
public class TestPlanner { public class TestPlanner {
static final Logger LOG = static final Logger LOG =
LoggerFactory.getLogger(TestPlanner.class); LoggerFactory.getLogger(TestPlanner.class);
@ -56,8 +59,8 @@ public class TestPlanner {
cluster.setNodesToProcess(cluster.getNodes()); cluster.setNodesToProcess(cluster.getNodes());
DiskBalancerDataNode node = cluster.getNodes().get(0); DiskBalancerDataNode node = cluster.getNodes().get(0);
GreedyPlanner planner = new GreedyPlanner(10.0f, node); GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeName(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
} }
@ -115,8 +118,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size()); Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node); GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeName(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// With a single volume we should not have any plans for moves. // With a single volume we should not have any plans for moves.
@ -183,8 +186,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size()); Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(5.0f, node); GreedyPlanner planner = new GreedyPlanner(5.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeUUID(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeUUID(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should have only one planned move from // We should have only one planned move from
@ -219,8 +222,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size()); Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node); GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeName(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// since we have same size of data in all disks , we should have // since we have same size of data in all disks , we should have
@ -250,8 +253,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size()); Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node); GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeName(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
// We should see 2 move plans. One from volume100 to volume0-1 // We should see 2 move plans. One from volume100 to volume0-1
@ -260,10 +263,12 @@ public class TestPlanner {
assertEquals(2, plan.getVolumeSetPlans().size()); assertEquals(2, plan.getVolumeSetPlans().size());
Step step = plan.getVolumeSetPlans().get(0); Step step = plan.getVolumeSetPlans().get(0);
assertEquals("volume100", step.getSourceVolume().getPath()); assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G")); assertTrue(step.getSizeString(
step.getBytesToMove()).matches("33.[2|3|4] G"));
step = plan.getVolumeSetPlans().get(1); step = plan.getVolumeSetPlans().get(1);
assertEquals("volume100", step.getSourceVolume().getPath()); assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("33.[2|3|4] G")); assertTrue(step.getSizeString(
step.getBytesToMove()).matches("33.[2|3|4] G"));
} }
@Test @Test
@ -287,8 +292,8 @@ public class TestPlanner {
Assert.assertEquals(1, cluster.getNodes().size()); Assert.assertEquals(1, cluster.getNodes().size());
GreedyPlanner planner = new GreedyPlanner(10.0f, node); GreedyPlanner planner = new GreedyPlanner(10.0f, node);
NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort NodePlan plan = new NodePlan(node.getDataNodeName(),
()); node.getDataNodePort());
planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan); planner.balanceVolumeSet(node, node.getVolumeSets().get("SSD"), plan);
//We should see NO moves since the total data on the volume100 //We should see NO moves since the total data on the volume100
@ -315,11 +320,13 @@ public class TestPlanner {
Step step = newPlan.getVolumeSetPlans().get(0); Step step = newPlan.getVolumeSetPlans().get(0);
assertEquals("volume100", step.getSourceVolume().getPath()); assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G")); assertTrue(step.getSizeString(
step.getBytesToMove()).matches("18.[6|7|8] G"));
step = newPlan.getVolumeSetPlans().get(1); step = newPlan.getVolumeSetPlans().get(1);
assertEquals("volume100", step.getSourceVolume().getPath()); assertEquals("volume100", step.getSourceVolume().getPath());
assertTrue(step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G")); assertTrue(
step.getSizeString(step.getBytesToMove()).matches("18.[6|7|8] G"));
} }