HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

This commit is contained in:
Uma Maheswara Rao G 2017-07-17 10:24:06 -07:00 committed by Uma Maheswara Rao Gangumalla
parent 9e82e5a86e
commit 4bcf61c696
6 changed files with 19 additions and 19 deletions

View File

@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean {
private final BlockIdManager blockIdManager;
/** For satisfying block storage policies */
/** For satisfying block storage policies. */
private final StoragePolicySatisfier sps;
private final BlockStorageMovementNeeded storageMovementNeeded =
new BlockStorageMovementNeeded();

View File

@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
/**
* Block movement status code.
*/
public static enum BlockMovementStatus {
public enum BlockMovementStatus {
/** Success. */
DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
/**
@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
private final int code;
private BlockMovementStatus(int code) {
BlockMovementStatus(int code) {
this.code = code;
}
@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
private final DatanodeInfo target;
private final BlockMovementStatus status;
public BlockMovementResult(long trackId, long blockId,
BlockMovementResult(long trackId, long blockId,
DatanodeInfo target, BlockMovementStatus status) {
this.trackId = trackId;
this.blockId = blockId;

View File

@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
}
private static class StorageTypeNodePair {
public StorageType storageType = null;
public DatanodeDescriptor dn = null;
private StorageType storageType = null;
private DatanodeDescriptor dn = null;
public StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) {
StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) {
this.storageType = storageType;
this.dn = dn;
}

View File

@ -79,9 +79,8 @@ public interface DatanodeProtocol {
final static int DNA_CACHE = 9; // cache blocks
final static int DNA_UNCACHE = 10; // uncache blocks
final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command
final static int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
final static int DNA_DROP_SPS_WORK_COMMAND = 13; // block storage movement
// command
int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
int DNA_DROP_SPS_WORK_COMMAND = 13; // drop sps work command
/**
* Register Datanode.

View File

@ -126,8 +126,9 @@ public class TestMover {
nnMap.put(nn, null);
}
final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors(
nnMap, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf,
final List<NameNodeConnector> nncs = NameNodeConnector.
newNameNodeConnectors(nnMap, Mover.class.getSimpleName(),
HdfsServerConstants.MOVER_ID_PATH, conf,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>());
}

View File

@ -496,8 +496,8 @@ public class TestStoragePolicySatisfier {
namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
hdfsCluster.triggerHeartbeats();
// No block movement will be scheduled as there is no target node available
// with the required storage type.
// No block movement will be scheduled as there is no target node
// available with the required storage type.
waitForAttemptedItems(1, 30000);
DFSTestUtil.waitExpectedStorageType(
file, StorageType.DISK, 3, 30000, dfs);
@ -1174,14 +1174,14 @@ public class TestStoragePolicySatisfier {
private void startAdditionalDNs(final Configuration conf,
int newNodesRequired, int existingNodesNum, StorageType[][] newTypes,
int storagesPerDatanode, long capacity, final MiniDFSCluster cluster)
int storagesPerDn, long nodeCapacity, final MiniDFSCluster cluster)
throws IOException {
long[][] capacities;
existingNodesNum += newNodesRequired;
capacities = new long[newNodesRequired][storagesPerDatanode];
capacities = new long[newNodesRequired][storagesPerDn];
for (int i = 0; i < newNodesRequired; i++) {
for (int j = 0; j < storagesPerDatanode; j++) {
capacities[i][j] = capacity;
for (int j = 0; j < storagesPerDn; j++) {
capacities[i][j] = nodeCapacity;
}
}