diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 988067cf933..8b7abaa3ad8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean { private final BlockIdManager blockIdManager; - /** For satisfying block storage policies */ + /** For satisfying block storage policies. */ private final StoragePolicySatisfier sps; private final BlockStorageMovementNeeded storageMovementNeeded = new BlockStorageMovementNeeded(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java index f4f97dd1374..196cd582004 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java @@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker { /** * Block movement status code. */ - public static enum BlockMovementStatus { + public enum BlockMovementStatus { /** Success. */ DN_BLK_STORAGE_MOVEMENT_SUCCESS(0), /** @@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker { private final int code; - private BlockMovementStatus(int code) { + BlockMovementStatus(int code) { this.code = code; } @@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker { private final DatanodeInfo target; private final BlockMovementStatus status; - public BlockMovementResult(long trackId, long blockId, + BlockMovementResult(long trackId, long blockId, DatanodeInfo target, BlockMovementStatus status) { this.trackId = trackId; this.blockId = blockId; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java index 00b4cd0260f..af3b7f2f670 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java @@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable { } private static class StorageTypeNodePair { - public StorageType storageType = null; - public DatanodeDescriptor dn = null; + private StorageType storageType = null; + private DatanodeDescriptor dn = null; - public StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) { + StorageTypeNodePair(StorageType storageType, DatanodeDescriptor dn) { this.storageType = storageType; this.dn = dn; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java index 892efb3a5d4..5e1f14883f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java @@ -79,9 +79,8 @@ public interface DatanodeProtocol { final static int DNA_CACHE = 9; // cache blocks final static int DNA_UNCACHE = 10; // uncache blocks final static int DNA_ERASURE_CODING_RECONSTRUCTION = 11; // erasure coding reconstruction command - final static int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command - final static int DNA_DROP_SPS_WORK_COMMAND = 13; // block storage movement - // command + int DNA_BLOCK_STORAGE_MOVEMENT = 12; // block storage movement command + int DNA_DROP_SPS_WORK_COMMAND = 13; // drop sps work command /** * Register Datanode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index c396387e63c..f0f264c38dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -126,9 +126,10 @@ public class TestMover { nnMap.put(nn, null); } - final List nncs = NameNodeConnector.newNameNodeConnectors( - nnMap, Mover.class.getSimpleName(), HdfsServerConstants.MOVER_ID_PATH, conf, - NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS); + final List nncs = NameNodeConnector. + newNameNodeConnectors(nnMap, Mover.class.getSimpleName(), + HdfsServerConstants.MOVER_ID_PATH, conf, + NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS); return new Mover(nncs.get(0), conf, new AtomicInteger(0), new HashMap<>()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java index 10ceae7e7e8..7f96003007c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java @@ -496,8 +496,8 @@ public class TestStoragePolicySatisfier { namesystem.getBlockManager().satisfyStoragePolicy(inode.getId()); hdfsCluster.triggerHeartbeats(); - // No block movement will be scheduled as there is no target node available - // with the required storage type. + // No block movement will be scheduled as there is no target node + // available with the required storage type. waitForAttemptedItems(1, 30000); DFSTestUtil.waitExpectedStorageType( file, StorageType.DISK, 3, 30000, dfs); @@ -1174,14 +1174,14 @@ public class TestStoragePolicySatisfier { private void startAdditionalDNs(final Configuration conf, int newNodesRequired, int existingNodesNum, StorageType[][] newTypes, - int storagesPerDatanode, long capacity, final MiniDFSCluster cluster) + int storagesPerDn, long nodeCapacity, final MiniDFSCluster cluster) throws IOException { long[][] capacities; existingNodesNum += newNodesRequired; - capacities = new long[newNodesRequired][storagesPerDatanode]; + capacities = new long[newNodesRequired][storagesPerDn]; for (int i = 0; i < newNodesRequired; i++) { - for (int j = 0; j < storagesPerDatanode; j++) { - capacities[i][j] = capacity; + for (int j = 0; j < storagesPerDn; j++) { + capacities[i][j] = nodeCapacity; } }