From 2ffd84273ac490724fe7e7825664bb6d09ef0e99 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 29 Jun 2015 12:12:34 -0700 Subject: [PATCH] HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and DatanodeStorageInfo. Contributed by Zhe Zhang. --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../CacheReplicationMonitor.java | 4 +-- .../blockmanagement/DatanodeDescriptor.java | 33 ++++++++++--------- .../blockmanagement/DatanodeManager.java | 26 +++++++-------- .../blockmanagement/DatanodeStorageInfo.java | 13 ++++++-- 5 files changed, 45 insertions(+), 34 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e55f3404b9b..0c56f2b08fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -682,6 +682,9 @@ Release 2.8.0 - UNRELEASED HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery blocks. (Zhe Zhang via jing9) + HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and + DatanodeStorageInfo. (Zhe Zhang via wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index a0f35039e47..2f81ddfad74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -452,8 +452,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable { file.getFullPathName(), cachedTotal, neededTotal); } - private String findReasonForNotCaching(CachedBlock cblock, - BlockInfo blockInfo) { + private String findReasonForNotCaching(CachedBlock cblock, + BlockInfo blockInfo) { if (blockInfo == null) { // Somehow, a cache report with the block arrived, but the block // reports from the DataNode haven't (yet?) described such a block. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index dd7b3011df4..99def6b1f5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -64,7 +64,8 @@ public class DatanodeDescriptor extends DatanodeInfo { // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. - public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + public final DecommissioningStatus decommissioningStatus = + new DecommissioningStatus(); private long curBlockReportId = 0; @@ -115,7 +116,7 @@ public class DatanodeDescriptor extends DatanodeInfo { return null; } - List results = new ArrayList(); + List results = new ArrayList<>(); for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) { results.add(blockq.poll()); } @@ -135,7 +136,7 @@ public class DatanodeDescriptor extends DatanodeInfo { } private final Map storageMap = - new HashMap(); + new HashMap<>(); /** * A list of CachedBlock objects on this datanode. @@ -217,12 +218,14 @@ public class DatanodeDescriptor extends DatanodeInfo { private long bandwidth; /** A queue of blocks to be replicated by this datanode */ - private final BlockQueue replicateBlocks = new BlockQueue(); + private final BlockQueue replicateBlocks = + new BlockQueue<>(); /** A queue of blocks to be recovered by this datanode */ private final BlockQueue recoverBlocks = - new BlockQueue(); + new BlockQueue<>(); /** A set of blocks to be invalidated by this datanode */ - private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); + private final LightWeightHashSet invalidateBlocks = + new LightWeightHashSet<>(); /* Variables for maintaining number of blocks scheduled to be written to * this storage. This count is approximate and might be slightly bigger @@ -230,9 +233,9 @@ public class DatanodeDescriptor extends DatanodeInfo { * while writing the block). */ private EnumCounters currApproxBlocksScheduled - = new EnumCounters(StorageType.class); + = new EnumCounters<>(StorageType.class); private EnumCounters prevApproxBlocksScheduled - = new EnumCounters(StorageType.class); + = new EnumCounters<>(StorageType.class); private long lastBlocksScheduledRollTime = 0; private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min private int volumeFailures = 0; @@ -276,6 +279,7 @@ public class DatanodeDescriptor extends DatanodeInfo { return storageMap.get(storageID); } } + DatanodeStorageInfo[] getStorageInfos() { synchronized (storageMap) { final Collection storages = storageMap.values(); @@ -321,7 +325,7 @@ public class DatanodeDescriptor extends DatanodeInfo { Long.toHexString(curBlockReportId)); iter.remove(); if (zombies == null) { - zombies = new LinkedList(); + zombies = new LinkedList<>(); } zombies.add(storageInfo); } @@ -350,10 +354,7 @@ public class DatanodeDescriptor extends DatanodeInfo { */ boolean removeBlock(String storageID, BlockInfo b) { DatanodeStorageInfo s = getStorageInfo(storageID); - if (s != null) { - return s.removeBlock(b); - } - return false; + return s != null && s.removeBlock(b); } public void resetBlocks() { @@ -449,7 +450,7 @@ public class DatanodeDescriptor extends DatanodeInfo { + this.volumeFailures + " to " + volFailures); synchronized (storageMap) { failedStorageInfos = - new HashSet(storageMap.values()); + new HashSet<>(storageMap.values()); } } @@ -505,7 +506,7 @@ public class DatanodeDescriptor extends DatanodeInfo { HashMap excessStorages; // Init excessStorages with all known storages. - excessStorages = new HashMap(storageMap); + excessStorages = new HashMap<>(storageMap); // Remove storages that the DN reported in the heartbeat. for (final StorageReport report : reports) { @@ -542,7 +543,7 @@ public class DatanodeDescriptor extends DatanodeInfo { private final List> iterators; private BlockIterator(final DatanodeStorageInfo... storages) { - List> iterators = new ArrayList>(); + List> iterators = new ArrayList<>(); for (DatanodeStorageInfo e : storages) { iterators.add(e.getBlockIterator()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 8143fb44a11..4266004f49c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -85,7 +85,7 @@ public class DatanodeManager { * Mapping: StorageID -> DatanodeDescriptor */ private final Map datanodeMap - = new HashMap(); + = new HashMap<>(); /** Cluster network topology */ private final NetworkTopology networktopology; @@ -162,7 +162,7 @@ public class DatanodeManager { * Software version -> Number of datanodes with this version */ private HashMap datanodesSoftwareVersions = - new HashMap(4, 0.75f); + new HashMap<>(4, 0.75f); /** * The minimum time between resending caching directives to Datanodes, @@ -217,7 +217,7 @@ public class DatanodeManager { // locations of those hosts in the include list and store the mapping // in the cache; so future calls to resolve will be fast. if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { - final ArrayList locations = new ArrayList(); + final ArrayList locations = new ArrayList<>(); for (InetSocketAddress addr : hostFileManager.getIncludes()) { locations.add(addr.getAddress().getHostAddress()); } @@ -370,7 +370,7 @@ public class DatanodeManager { // here we should get node but not datanode only . Node client = getDatanodeByHost(targethost); if (client == null) { - List hosts = new ArrayList (1); + List hosts = new ArrayList<> (1); hosts.add(targethost); List resolvedHosts = dnsToSwitchMapping.resolve(hosts); if (resolvedHosts != null && !resolvedHosts.isEmpty()) { @@ -522,7 +522,7 @@ public class DatanodeManager { void datanodeDump(final PrintWriter out) { synchronized (datanodeMap) { Map sortedDatanodeMap = - new TreeMap(datanodeMap); + new TreeMap<>(datanodeMap); out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for (DatanodeDescriptor node : sortedDatanodeMap.values()) { out.println(node.dumpDatanode()); @@ -660,7 +660,7 @@ public class DatanodeManager { private void countSoftwareVersions() { synchronized(datanodeMap) { - HashMap versionCount = new HashMap(); + HashMap versionCount = new HashMap<>(); for(DatanodeDescriptor dn: datanodeMap.values()) { // Check isAlive too because right after removeDatanode(), // isDatanodeDead() is still true @@ -677,7 +677,7 @@ public class DatanodeManager { public HashMap getDatanodesSoftwareVersions() { synchronized(datanodeMap) { - return new HashMap (this.datanodesSoftwareVersions); + return new HashMap<> (this.datanodesSoftwareVersions); } } @@ -710,7 +710,7 @@ public class DatanodeManager { */ private String resolveNetworkLocation (DatanodeID node) throws UnresolvedTopologyException { - List names = new ArrayList(1); + List names = new ArrayList<>(1); if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { names.add(node.getIpAddr()); } else { @@ -1000,7 +1000,7 @@ public class DatanodeManager { // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. - List invalidNodeNames = new ArrayList(3); + List invalidNodeNames = new ArrayList<>(3); // clear cache for nodes in IP or Hostname invalidNodeNames.add(nodeReg.getIpAddr()); invalidNodeNames.add(nodeReg.getHostName()); @@ -1275,7 +1275,7 @@ public class DatanodeManager { final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes(); synchronized(datanodeMap) { - nodes = new ArrayList(datanodeMap.size()); + nodes = new ArrayList<>(datanodeMap.size()); for (DatanodeDescriptor dn : datanodeMap.values()) { final boolean isDead = isDatanodeDead(dn); final boolean isDecommissioning = dn.isDecommissionInProgress(); @@ -1351,7 +1351,7 @@ public class DatanodeManager { VolumeFailureSummary volumeFailureSummary) throws IOException { synchronized (heartbeatManager) { synchronized (datanodeMap) { - DatanodeDescriptor nodeinfo = null; + DatanodeDescriptor nodeinfo; try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredNodeException e) { @@ -1389,7 +1389,7 @@ public class DatanodeManager { final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List recoveryLocations = - new ArrayList(storages.length); + new ArrayList<>(storages.length); for (int i = 0; i < storages.length; i++) { if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) { recoveryLocations.add(storages[i]); @@ -1431,7 +1431,7 @@ public class DatanodeManager { return new DatanodeCommand[] { brCommand }; } - final List cmds = new ArrayList(); + final List cmds = new ArrayList<>(); //check pending replication List pendingList = nodeinfo.getReplicationCommand( maxTransfers); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 65b83e1a247..92841a634cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -37,8 +37,9 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; public class DatanodeStorageInfo { public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; - public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) { - return toDatanodeInfos(Arrays.asList(storages)); + public static DatanodeInfo[] toDatanodeInfos( + DatanodeStorageInfo[] storages) { + return storages == null ? null: toDatanodeInfos(Arrays.asList(storages)); } static DatanodeInfo[] toDatanodeInfos(List storages) { final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; @@ -58,6 +59,9 @@ public class DatanodeStorageInfo { } public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { + if (storages == null) { + return null; + } String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); @@ -66,6 +70,9 @@ public class DatanodeStorageInfo { } public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { + if (storages == null) { + return null; + } StorageType[] storageTypes = new StorageType[storages.length]; for(int i = 0; i < storageTypes.length; i++) { storageTypes[i] = storages[i].getStorageType(); @@ -380,6 +387,6 @@ public class DatanodeStorageInfo { } static enum AddBlockResult { - ADDED, REPLACED, ALREADY_EXIST; + ADDED, REPLACED, ALREADY_EXIST } }