diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index a1d945f9d9b..d2b276187ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -346,6 +346,9 @@ Release 2.8.0 - UNRELEASED HDFS-8623. Refactor NameNode handling of invalid, corrupt, and under-recovery blocks. (Zhe Zhang via jing9) + HDFS-8653. Code cleanup for DatanodeManager, DatanodeDescriptor and + DatanodeStorageInfo. (Zhe Zhang via wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index a0f35039e47..2f81ddfad74 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -452,8 +452,8 @@ public class CacheReplicationMonitor extends Thread implements Closeable { file.getFullPathName(), cachedTotal, neededTotal); } - private String findReasonForNotCaching(CachedBlock cblock, - BlockInfo blockInfo) { + private String findReasonForNotCaching(CachedBlock cblock, + BlockInfo blockInfo) { if (blockInfo == null) { // Somehow, a cache report with the block arrived, but the block // reports from the DataNode haven't (yet?) described such a block. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index 846ca338443..45a56b5730c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -66,7 +66,8 @@ public class DatanodeDescriptor extends DatanodeInfo { // Stores status of decommissioning. // If node is not decommissioning, do not use this object for anything. - public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus(); + public final DecommissioningStatus decommissioningStatus = + new DecommissioningStatus(); private long curBlockReportId = 0; @@ -117,7 +118,7 @@ public class DatanodeDescriptor extends DatanodeInfo { return null; } - List results = new ArrayList(); + List results = new ArrayList<>(); for(; !blockq.isEmpty() && numBlocks > 0; numBlocks--) { results.add(blockq.poll()); } @@ -137,7 +138,7 @@ public class DatanodeDescriptor extends DatanodeInfo { } private final Map storageMap = - new HashMap(); + new HashMap<>(); /** * A list of CachedBlock objects on this datanode. @@ -219,12 +220,14 @@ public class DatanodeDescriptor extends DatanodeInfo { private long bandwidth; /** A queue of blocks to be replicated by this datanode */ - private final BlockQueue replicateBlocks = new BlockQueue(); + private final BlockQueue replicateBlocks = + new BlockQueue<>(); /** A queue of blocks to be recovered by this datanode */ private final BlockQueue recoverBlocks = - new BlockQueue(); + new BlockQueue<>(); /** A set of blocks to be invalidated by this datanode */ - private final LightWeightHashSet invalidateBlocks = new LightWeightHashSet(); + private final LightWeightHashSet invalidateBlocks = + new LightWeightHashSet<>(); /* Variables for maintaining number of blocks scheduled to be written to * this storage. This count is approximate and might be slightly bigger @@ -232,9 +235,9 @@ public class DatanodeDescriptor extends DatanodeInfo { * while writing the block). */ private EnumCounters currApproxBlocksScheduled - = new EnumCounters(StorageType.class); + = new EnumCounters<>(StorageType.class); private EnumCounters prevApproxBlocksScheduled - = new EnumCounters(StorageType.class); + = new EnumCounters<>(StorageType.class); private long lastBlocksScheduledRollTime = 0; private static final int BLOCKS_SCHEDULED_ROLL_INTERVAL = 600*1000; //10min private int volumeFailures = 0; @@ -278,6 +281,7 @@ public class DatanodeDescriptor extends DatanodeInfo { return storageMap.get(storageID); } } + DatanodeStorageInfo[] getStorageInfos() { synchronized (storageMap) { final Collection storages = storageMap.values(); @@ -323,7 +327,7 @@ public class DatanodeDescriptor extends DatanodeInfo { Long.toHexString(curBlockReportId)); iter.remove(); if (zombies == null) { - zombies = new LinkedList(); + zombies = new LinkedList<>(); } zombies.add(storageInfo); } @@ -352,10 +356,7 @@ public class DatanodeDescriptor extends DatanodeInfo { */ boolean removeBlock(String storageID, BlockInfo b) { DatanodeStorageInfo s = getStorageInfo(storageID); - if (s != null) { - return s.removeBlock(b); - } - return false; + return s != null && s.removeBlock(b); } public void resetBlocks() { @@ -451,7 +452,7 @@ public class DatanodeDescriptor extends DatanodeInfo { + this.volumeFailures + " to " + volFailures); synchronized (storageMap) { failedStorageInfos = - new HashSet(storageMap.values()); + new HashSet<>(storageMap.values()); } } @@ -507,7 +508,7 @@ public class DatanodeDescriptor extends DatanodeInfo { HashMap excessStorages; // Init excessStorages with all known storages. - excessStorages = new HashMap(storageMap); + excessStorages = new HashMap<>(storageMap); // Remove storages that the DN reported in the heartbeat. for (final StorageReport report : reports) { @@ -544,7 +545,7 @@ public class DatanodeDescriptor extends DatanodeInfo { private final List> iterators; private BlockIterator(final DatanodeStorageInfo... storages) { - List> iterators = new ArrayList>(); + List> iterators = new ArrayList<>(); for (DatanodeStorageInfo e : storages) { iterators.add(e.getBlockIterator()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index c26d31019e1..90b173a7136 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -85,7 +85,7 @@ public class DatanodeManager { * Mapping: StorageID -> DatanodeDescriptor */ private final Map datanodeMap - = new HashMap(); + = new HashMap<>(); /** Cluster network topology */ private final NetworkTopology networktopology; @@ -162,7 +162,7 @@ public class DatanodeManager { * Software version -> Number of datanodes with this version */ private HashMap datanodesSoftwareVersions = - new HashMap(4, 0.75f); + new HashMap<>(4, 0.75f); /** * The minimum time between resending caching directives to Datanodes, @@ -217,7 +217,7 @@ public class DatanodeManager { // locations of those hosts in the include list and store the mapping // in the cache; so future calls to resolve will be fast. if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { - final ArrayList locations = new ArrayList(); + final ArrayList locations = new ArrayList<>(); for (InetSocketAddress addr : hostFileManager.getIncludes()) { locations.add(addr.getAddress().getHostAddress()); } @@ -370,7 +370,7 @@ public class DatanodeManager { // here we should get node but not datanode only . Node client = getDatanodeByHost(targethost); if (client == null) { - List hosts = new ArrayList (1); + List hosts = new ArrayList<> (1); hosts.add(targethost); List resolvedHosts = dnsToSwitchMapping.resolve(hosts); if (resolvedHosts != null && !resolvedHosts.isEmpty()) { @@ -522,7 +522,7 @@ public class DatanodeManager { void datanodeDump(final PrintWriter out) { synchronized (datanodeMap) { Map sortedDatanodeMap = - new TreeMap(datanodeMap); + new TreeMap<>(datanodeMap); out.println("Metasave: Number of datanodes: " + datanodeMap.size()); for (DatanodeDescriptor node : sortedDatanodeMap.values()) { out.println(node.dumpDatanode()); @@ -660,7 +660,7 @@ public class DatanodeManager { private void countSoftwareVersions() { synchronized(datanodeMap) { - HashMap versionCount = new HashMap(); + HashMap versionCount = new HashMap<>(); for(DatanodeDescriptor dn: datanodeMap.values()) { // Check isAlive too because right after removeDatanode(), // isDatanodeDead() is still true @@ -677,7 +677,7 @@ public class DatanodeManager { public HashMap getDatanodesSoftwareVersions() { synchronized(datanodeMap) { - return new HashMap (this.datanodesSoftwareVersions); + return new HashMap<> (this.datanodesSoftwareVersions); } } @@ -710,7 +710,7 @@ public class DatanodeManager { */ private String resolveNetworkLocation (DatanodeID node) throws UnresolvedTopologyException { - List names = new ArrayList(1); + List names = new ArrayList<>(1); if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) { names.add(node.getIpAddr()); } else { @@ -999,7 +999,7 @@ public class DatanodeManager { // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. - List invalidNodeNames = new ArrayList(3); + List invalidNodeNames = new ArrayList<>(3); // clear cache for nodes in IP or Hostname invalidNodeNames.add(nodeReg.getIpAddr()); invalidNodeNames.add(nodeReg.getHostName()); @@ -1274,7 +1274,7 @@ public class DatanodeManager { final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes(); synchronized(datanodeMap) { - nodes = new ArrayList(datanodeMap.size()); + nodes = new ArrayList<>(datanodeMap.size()); for (DatanodeDescriptor dn : datanodeMap.values()) { final boolean isDead = isDatanodeDead(dn); final boolean isDecommissioning = dn.isDecommissionInProgress(); @@ -1350,7 +1350,7 @@ public class DatanodeManager { VolumeFailureSummary volumeFailureSummary) throws IOException { synchronized (heartbeatManager) { synchronized (datanodeMap) { - DatanodeDescriptor nodeinfo = null; + DatanodeDescriptor nodeinfo; try { nodeinfo = getDatanode(nodeReg); } catch(UnregisteredNodeException e) { @@ -1388,7 +1388,7 @@ public class DatanodeManager { final DatanodeStorageInfo[] storages = b.getExpectedStorageLocations(); // Skip stale nodes during recovery - not heart beated for some time (30s by default). final List recoveryLocations = - new ArrayList(storages.length); + new ArrayList<>(storages.length); for (int i = 0; i < storages.length; i++) { if (!storages[i].getDatanodeDescriptor().isStale(staleInterval)) { recoveryLocations.add(storages[i]); @@ -1430,7 +1430,7 @@ public class DatanodeManager { return new DatanodeCommand[] { brCommand }; } - final List cmds = new ArrayList(); + final List cmds = new ArrayList<>(); //check pending replication List pendingList = nodeinfo.getReplicationCommand( maxTransfers); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java index 65b83e1a247..92841a634cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java @@ -37,8 +37,9 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; public class DatanodeStorageInfo { public static final DatanodeStorageInfo[] EMPTY_ARRAY = {}; - public static DatanodeInfo[] toDatanodeInfos(DatanodeStorageInfo[] storages) { - return toDatanodeInfos(Arrays.asList(storages)); + public static DatanodeInfo[] toDatanodeInfos( + DatanodeStorageInfo[] storages) { + return storages == null ? null: toDatanodeInfos(Arrays.asList(storages)); } static DatanodeInfo[] toDatanodeInfos(List storages) { final DatanodeInfo[] datanodes = new DatanodeInfo[storages.size()]; @@ -58,6 +59,9 @@ public class DatanodeStorageInfo { } public static String[] toStorageIDs(DatanodeStorageInfo[] storages) { + if (storages == null) { + return null; + } String[] storageIDs = new String[storages.length]; for(int i = 0; i < storageIDs.length; i++) { storageIDs[i] = storages[i].getStorageID(); @@ -66,6 +70,9 @@ public class DatanodeStorageInfo { } public static StorageType[] toStorageTypes(DatanodeStorageInfo[] storages) { + if (storages == null) { + return null; + } StorageType[] storageTypes = new StorageType[storages.length]; for(int i = 0; i < storageTypes.length; i++) { storageTypes[i] = storages[i].getStorageType(); @@ -380,6 +387,6 @@ public class DatanodeStorageInfo { } static enum AddBlockResult { - ADDED, REPLACED, ALREADY_EXIST; + ADDED, REPLACED, ALREADY_EXIST } }