HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.
This commit is contained in:
parent
68af4e199a
commit
9e82e5a86e
|
@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
|
||||||
list.add(cluster.stopDataNode(0));
|
list.add(cluster.stopDataNode(0));
|
||||||
list.add(cluster.stopDataNode(0));
|
list.add(cluster.stopDataNode(0));
|
||||||
cluster.restartNameNodes();
|
cluster.restartNameNodes();
|
||||||
cluster.restartDataNode(list.get(0), true);
|
cluster.restartDataNode(list.get(0), false);
|
||||||
cluster.restartDataNode(list.get(1), true);
|
cluster.restartDataNode(list.get(1), false);
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs.satisfyStoragePolicy(filePath);
|
fs.satisfyStoragePolicy(filePath);
|
||||||
Thread.sleep(3000 * 6);
|
DFSTestUtil.waitExpectedStorageType(filePath.toString(),
|
||||||
cluster.restartDataNode(list.get(2), true);
|
StorageType.ARCHIVE, 2, 30000, cluster.getFileSystem());
|
||||||
|
cluster.restartDataNode(list.get(2), false);
|
||||||
DFSTestUtil.waitExpectedStorageType(filePath.toString(),
|
DFSTestUtil.waitExpectedStorageType(filePath.toString(),
|
||||||
StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
|
StorageType.ARCHIVE, 3, 30000, cluster.getFileSystem());
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
|
||||||
*/
|
*/
|
||||||
@Test(timeout = 300000)
|
@Test(timeout = 300000)
|
||||||
public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
|
public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
|
||||||
// start 10 datanodes
|
// start 9 datanodes
|
||||||
int numOfDatanodes = 10;
|
int numOfDatanodes = 9;
|
||||||
int storagesPerDatanode = 2;
|
int storagesPerDatanode = 2;
|
||||||
long capacity = 20 * defaultStripeBlockSize;
|
long capacity = 20 * defaultStripeBlockSize;
|
||||||
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
|
long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
|
||||||
|
@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
|
||||||
{StorageType.DISK, StorageType.ARCHIVE},
|
{StorageType.DISK, StorageType.ARCHIVE},
|
||||||
{StorageType.DISK, StorageType.ARCHIVE},
|
{StorageType.DISK, StorageType.ARCHIVE},
|
||||||
{StorageType.DISK, StorageType.ARCHIVE},
|
{StorageType.DISK, StorageType.ARCHIVE},
|
||||||
{StorageType.DISK, StorageType.ARCHIVE},
|
|
||||||
{StorageType.DISK, StorageType.ARCHIVE}})
|
{StorageType.DISK, StorageType.ARCHIVE}})
|
||||||
.storageCapacities(capacities)
|
.storageCapacities(capacities)
|
||||||
.build();
|
.build();
|
||||||
|
@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
|
||||||
}
|
}
|
||||||
cluster.restartNameNodes();
|
cluster.restartNameNodes();
|
||||||
// Restart half datanodes
|
// Restart half datanodes
|
||||||
for (int i = 0; i < numOfDatanodes / 2; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
cluster.restartDataNode(list.get(i), true);
|
cluster.restartDataNode(list.get(i), false);
|
||||||
}
|
}
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
fs.satisfyStoragePolicy(fooFile);
|
fs.satisfyStoragePolicy(fooFile);
|
||||||
Thread.sleep(3000 * 6);
|
DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
|
||||||
|
StorageType.ARCHIVE, 5, 30000, cluster.getFileSystem());
|
||||||
//Start reaming datanodes
|
//Start reaming datanodes
|
||||||
for (int i = numOfDatanodes - 1; i > numOfDatanodes / 2; i--) {
|
for (int i = numOfDatanodes - 1; i >= 5; i--) {
|
||||||
cluster.restartDataNode(list.get(i), true);
|
cluster.restartDataNode(list.get(i), false);
|
||||||
}
|
}
|
||||||
// verify storage types and locations.
|
// verify storage types and locations.
|
||||||
waitExpectedStorageType(cluster, fooFile.toString(), fileLen,
|
waitExpectedStorageType(cluster, fooFile.toString(), fileLen,
|
||||||
|
|
Loading…
Reference in New Issue