HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

This commit is contained in:
Uma Maheswara Rao G 2017-07-17 10:24:06 -07:00 committed by Uma Maheswara Rao Gangumalla
parent 9e82e5a86e
commit 4bcf61c696
6 changed files with 19 additions and 19 deletions

View File

@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean {
private final BlockIdManager blockIdManager; private final BlockIdManager blockIdManager;
/** For satisfying block storage policies */ /** For satisfying block storage policies. */
private final StoragePolicySatisfier sps; private final StoragePolicySatisfier sps;
private final BlockStorageMovementNeeded storageMovementNeeded = private final BlockStorageMovementNeeded storageMovementNeeded =
new BlockStorageMovementNeeded(); new BlockStorageMovementNeeded();

View File

@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
/** /**
* Block movement status code. * Block movement status code.
*/ */
public static enum BlockMovementStatus { public enum BlockMovementStatus {
/** Success. */ /** Success. */
DN_BLK_STORAGE_MOVEMENT_SUCCESS(0), DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
/** /**
@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
private final int code; private final int code;
private BlockMovementStatus(int code) { BlockMovementStatus(int code) {
this.code = code; this.code = code;
} }
@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
private final DatanodeInfo target; private final DatanodeInfo target;
private final BlockMovementStatus status; private final BlockMovementStatus status;
public BlockMovementResult(long trackId, long blockId, BlockMovementResult(long trackId, long blockId,
DatanodeInfo target, BlockMovementStatus status) { DatanodeInfo target, BlockMovementStatus status) {
this.trackId = trackId; this.trackId = trackId;
this.blockId = blockId; this.blockId = blockId;

View File

@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
} }
private static class StorageTypeNodePair { private static class StorageTypeNodePair {
public StorageType storageType = null; private StorageType storageType = null;
public DatanodeDescriptor dn = null; private DatanodeDescriptor dn = null;
public StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) { StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) {
this.storageType = storageType; this.storageType = storageType;
this.dn = dn; this.dn = dn;
} }

View File

@ -79,9 +79,8 @@ public interface DatanodeProtocol {
final static int DNA_CACHE = 9; // cache blocks final static int DNA_CACHE = 9; // cache blocks
final static int DNA_UNCACHE = 10; // uncache blocks final static int DNA_UNCACHE = 10; // uncache blocks
final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command
final static int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command
final static int DNA_DROP_SPS_WORK_COMMAND = 13; // block storage movement int DNA_DROP_SPS_WORK_COMMAND = 13; // drop sps work command
// command
/** /**
* Register Datanode. * Register Datanode.

View File

@ -126,9 +126,10 @@ public class TestMover {
nnMap.put(nn, null); nnMap.put(nn, null);
} }
final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors( final List<NameNodeConnector> nncs = NameNodeConnector.
nnMap, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf, newNameNodeConnectors(nnMap, Mover.class.getSimpleName(),
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS); HdfsServerConstants.MOVER_ID_PATH, conf,
NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS);
return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>()); return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>());
} }

View File

@ -496,8 +496,8 @@ public class TestStoragePolicySatisfier {
namesystem.getBlockManager().satisfyStoragePolicy(inode.getId()); namesystem.getBlockManager().satisfyStoragePolicy(inode.getId());
hdfsCluster.triggerHeartbeats(); hdfsCluster.triggerHeartbeats();
// No block movement will be scheduled as there is no target node available // No block movement will be scheduled as there is no target node
// with the required storage type. // available with the required storage type.
waitForAttemptedItems(1, 30000); waitForAttemptedItems(1, 30000);
DFSTestUtil.waitExpectedStorageType( DFSTestUtil.waitExpectedStorageType(
file, StorageType.DISK, 3, 30000, dfs); file, StorageType.DISK, 3, 30000, dfs);
@ -1174,14 +1174,14 @@ public class TestStoragePolicySatisfier {
private void startAdditionalDNs(final Configuration conf, private void startAdditionalDNs(final Configuration conf,
int newNodesRequired, int existingNodesNum, StorageType[][] newTypes, int newNodesRequired, int existingNodesNum, StorageType[][] newTypes,
int storagesPerDatanode, long capacity, final MiniDFSCluster cluster) int storagesPerDn, long nodeCapacity, final MiniDFSCluster cluster)
throws IOException { throws IOException {
long[][] capacities; long[][] capacities;
existingNodesNum += newNodesRequired; existingNodesNum += newNodesRequired;
capacities = new long[newNodesRequired][storagesPerDatanode]; capacities = new long[newNodesRequired][storagesPerDn];
for (int i = 0; i < newNodesRequired; i++) { for (int i = 0; i < newNodesRequired; i++) {
for (int j = 0; j < storagesPerDatanode; j++) { for (int j = 0; j < storagesPerDn; j++) {
capacities[i][j] = capacity; capacities[i][j] = nodeCapacity;
} }
} }