diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index f97c2f2c80b..b89998d9895 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -383,7 +383,7 @@ public class DelegationTokenSecretManager namesystem.logUpdateMasterKey(key); } } finally { - namesystem.readUnlock(); + namesystem.readUnlock("logUpdateMasterKey"); } } catch (InterruptedException ie) { // AbstractDelegationTokenManager may crash if an exception is thrown. @@ -412,7 +412,7 @@ public class DelegationTokenSecretManager namesystem.logExpireDelegationToken(dtId); } } finally { - namesystem.readUnlock(); + namesystem.readUnlock("logExpireToken"); } } catch (InterruptedException ie) { // AbstractDelegationTokenManager may crash if an exception is thrown. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 2b647704e3f..494c2f01c99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2017,7 +2017,7 @@ public class BlockManager implements BlockStatsMXBean { blocksToReconstruct = neededReconstruction .chooseLowRedundancyBlocks(blocksToProcess, reset); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("computeBlockReconstructionWork"); } return computeReconstructionWorkForBlocks(blocksToReconstruct); } @@ -2051,7 +2051,7 @@ public class BlockManager implements BlockStatsMXBean { } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("computeReconstructionWorkForBlocks"); } // Step 2: choose target nodes for each reconstruction task @@ -2092,7 +2092,7 @@ public class BlockManager implements BlockStatsMXBean { } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("computeReconstructionWorkForBlocks"); } if (blockLog.isDebugEnabled()) { @@ -2577,7 +2577,7 @@ public class BlockManager implements BlockStatsMXBean { } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processPendingReconstructions"); } /* If we know the target datanodes where the replication timedout, * we could invoke decBlocksScheduled() on it. Its ok for now. @@ -2826,7 +2826,7 @@ public class BlockManager implements BlockStatsMXBean { storageInfo.receivedBlockReport(); } finally { endTime = Time.monotonicNow(); - namesystem.writeUnlock(); + namesystem.writeUnlock("processReport"); } if(blockLog.isDebugEnabled()) { @@ -2870,7 +2870,7 @@ public class BlockManager implements BlockStatsMXBean { context.getTotalRpcs(), Long.toHexString(context.getReportId())); } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("removeBRLeaseIfNeeded"); } } @@ -2908,7 +2908,7 @@ public class BlockManager implements BlockStatsMXBean { postponedMisreplicatedBlocks.addAll(rescannedMisreplicatedBlocks); rescannedMisreplicatedBlocks.clear(); long endSize = postponedMisreplicatedBlocks.size(); - namesystem.writeUnlock(); + namesystem.writeUnlock("rescanPostponedMisreplicatedBlocks"); LOG.info("Rescan of postponedMisreplicatedBlocks completed in {}" + " msecs. {} blocks are left. {} blocks were removed.", (Time.monotonicNow() - startTime), endSize, (startSize - endSize)); @@ -3775,7 +3775,7 @@ public class BlockManager implements BlockStatsMXBean { break; } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processMisReplicatesAsync"); // Make sure it is out of the write lock for sufficiently long time. Thread.sleep(sleepDuration); } @@ -3830,7 +3830,7 @@ public class BlockManager implements BlockStatsMXBean { "Re-scanned block {}, result is {}", blk, r); } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processMisReplicatedBlocks"); } } } catch (InterruptedException ex) { @@ -4553,7 +4553,7 @@ public class BlockManager implements BlockStatsMXBean { // testPlacementWithLocalRackNodesDecommissioned, it is not protected by // lock, only when called by DatanodeManager.refreshNodes have writeLock if (namesystem.hasWriteLock()) { - namesystem.writeUnlock(); + namesystem.writeUnlock("processExtraRedundancyBlocksOnInService"); try { Thread.sleep(1); } catch (InterruptedException e) { @@ -4685,7 +4685,7 @@ public class BlockManager implements BlockStatsMXBean { repl.outOfServiceReplicas(), oldExpectedReplicas); } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("updateNeededReconstructions"); } } @@ -4742,7 +4742,7 @@ public class BlockManager implements BlockStatsMXBean { return 0; } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("invalidateWorkForOneNode"); } blockLog.debug("BLOCK* {}: ask {} to delete {}", getClass().getSimpleName(), dn, toInvalidate); @@ -4974,7 +4974,7 @@ public class BlockManager implements BlockStatsMXBean { } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("markedDeleteBlockScrubberThread"); } } } @@ -5092,7 +5092,7 @@ public class BlockManager implements BlockStatsMXBean { this.updateState(); this.scheduledReplicationBlocksCount = workFound; } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("computeDatanodeWork"); } workFound += this.computeInvalidateWork(nodesToProcess); return workFound; @@ -5332,7 +5332,7 @@ public class BlockManager implements BlockStatsMXBean { action = queue.poll(); } while (action != null); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processQueue"); metrics.addBlockOpsBatched(processed - 1); } } catch (InterruptedException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java index de26929c616..4349ba01401 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java @@ -670,7 +670,7 @@ class BlockManagerSafeMode { break; } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("leaveSafeMode"); } try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index 4cc404f55c5..1e5f952040d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -302,7 +302,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable { rescanCachedBlockMap(); blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime(); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("cacheReplicationMonitorRescan"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java index eb65b3843a9..9b38f2353fb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminBackoffMonitor.java @@ -215,7 +215,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase processPendingNodes(); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("DatanodeAdminMonitorV2Thread"); } // After processing the above, various parts of the check() method will // take and drop the read / write lock as needed. Aside from the @@ -345,12 +345,12 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase // which added the node to the cancelled list. Therefore expired // maintenance nodes do not need to be added to the toRemove list. dnAdmin.stopMaintenance(dn); - namesystem.writeUnlock(); + namesystem.writeUnlock("processMaintenanceNodes"); namesystem.writeLock(); } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processMaintenanceNodes"); } } @@ -409,7 +409,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processCompletedNodes"); } } @@ -531,7 +531,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase // replication if (blocksProcessed >= blocksPerLock) { blocksProcessed = 0; - namesystem.writeUnlock(); + namesystem.writeUnlock("moveBlocksToPending"); namesystem.writeLock(); } blocksProcessed++; @@ -553,7 +553,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase } } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("moveBlocksToPending"); } LOG.debug("{} blocks are now pending replication", pendingCount); } @@ -637,7 +637,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase try { storage = dn.getStorageInfos(); } finally { - namesystem.readUnlock(); + namesystem.readUnlock("scanDatanodeStorage"); } for (DatanodeStorageInfo s : storage) { @@ -667,7 +667,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase numBlocksChecked++; } } finally { - namesystem.readUnlock(); + namesystem.readUnlock("scanDatanodeStorage"); } } } @@ -722,7 +722,7 @@ public class DatanodeAdminBackoffMonitor extends DatanodeAdminMonitorBase suspectBlocks.getOutOfServiceBlockCount()); } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("processPendingReplication"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java index 21e3eb0322d..c1d62e35c03 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java @@ -158,7 +158,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase LOG.warn("DatanodeAdminMonitor caught exception when processing node.", e); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("DatanodeAdminMonitorThread"); } if (numBlocksChecked + numNodesChecked > 0) { LOG.info("Checked {} blocks and {} nodes this tick. {} nodes are now " + @@ -373,7 +373,7 @@ public class DatanodeAdminDefaultMonitor extends DatanodeAdminMonitorBase // lock. // Yielding is required in case of block number is greater than the // configured per-iteration-limit. - namesystem.writeUnlock(); + namesystem.writeUnlock("processBlocksInternal"); try { LOG.debug("Yielded lock during decommission/maintenance check"); Thread.sleep(0, 500); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index cb601e94f82..092ef6502a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -852,7 +852,7 @@ public class DatanodeManager { + node + " does not exist"); } } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("removeDatanode"); } } @@ -1296,7 +1296,7 @@ public class DatanodeManager { refreshDatanodes(); countSoftwareVersions(); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("refreshNodes"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java index 372cb237ca1..b923ba3a655 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java @@ -503,7 +503,7 @@ class HeartbeatManager implements DatanodeStatistics { try { dm.removeDeadDatanode(dead, !dead.isMaintenance()); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("removeDeadDatanode"); } } if (failedStorage != null) { @@ -512,7 +512,7 @@ class HeartbeatManager implements DatanodeStatistics { try { blockManager.removeBlocksAssociatedTo(failedStorage); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("removeBlocksAssociatedTo"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index 504df6068ef..7bde21dcb69 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -222,7 +222,7 @@ public class BackupImage extends FSImage { try { getNamesystem().dir.updateCountForQuota(); } finally { - getNamesystem().writeUnlock(); + getNamesystem().writeUnlock("applyEdits"); } } finally { backupInputStream.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java index d18d448ab25..ab657fb6724 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java @@ -250,7 +250,7 @@ class Checkpointer extends Daemon { sig.mostRecentCheckpointTxId); bnImage.reloadFromImageFile(file, backupNode.getNamesystem()); } finally { - backupNode.namesystem.writeUnlock(); + backupNode.namesystem.writeUnlock("doCheckpointByBackupNode"); } } rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index 62a81f4064e..7bf58799716 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -192,7 +192,7 @@ public class EncryptionZoneManager { try { iip = dir.resolvePath(pc, zone, DirOp.READ); } finally { - dir.getFSNamesystem().readUnlock(); + dir.getFSNamesystem().readUnlock("pauseForTestingAfterNthCheckpoint"); } reencryptionHandler .pauseForTestingAfterNthCheckpoint(iip.getLastINode().getId(), count); @@ -224,7 +224,7 @@ public class EncryptionZoneManager { return getReencryptionStatus().getZoneStatus(inode.getId()); } finally { dir.readUnlock(); - dir.getFSNamesystem().readUnlock(); + dir.getFSNamesystem().readUnlock("getZoneStatus"); } } @@ -285,7 +285,7 @@ public class EncryptionZoneManager { try { reencryptionHandler.stopThreads(); } finally { - dir.getFSNamesystem().writeUnlock(); + dir.getFSNamesystem().writeUnlock("stopReencryptThread"); } if (reencryptHandlerExecutor != null) { reencryptHandlerExecutor.shutdownNow(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index 2971af18298..2110a408b08 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -656,7 +656,7 @@ final class FSDirEncryptionZoneOp { Preconditions.checkNotNull(ezKeyName); // Generate EDEK while not holding the fsn lock. - fsn.writeUnlock(); + fsn.writeUnlock("getEncryptionKeyInfo"); try { EncryptionFaultInjector.getInstance().startFileBeforeGenerateKey(); return new EncryptionKeyInfo(protocolVersion, suite, ezKeyName, @@ -733,7 +733,7 @@ final class FSDirEncryptionZoneOp { dir.ezManager.checkEncryptionZoneRoot(iip.getLastINode(), zone); return dir.ezManager.getKeyName(iip); } finally { - dir.getFSNamesystem().readUnlock(); + dir.getFSNamesystem().readUnlock("getKeyNameForZone"); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index ef59b9f75ea..389bd6455c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1798,6 +1798,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, this.fsLock.readUnlock(); } + @Override public void readUnlock(String opName) { this.fsLock.readUnlock(opName); } @@ -1822,6 +1823,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean, this.fsLock.writeUnlock(); } + @Override public void writeUnlock(String opName) { this.fsLock.writeUnlock(opName); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java index 3b8c3316539..3325222267d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java @@ -243,7 +243,7 @@ public class FsImageValidation { loader.load(fsImageFile, false); } finally { namesystem.getFSDirectory().writeUnlock(); - namesystem.writeUnlock(); + namesystem.writeUnlock("loadImage"); } } t.cancel(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index ef0eef8510c..50f3b0bb2e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -2090,7 +2090,7 @@ public class NameNode extends ReconfigurableBase implements @Override public void writeUnlock() { namesystem.unlockRetryCache(); - namesystem.writeUnlock(); + namesystem.writeUnlock("HAState"); } /** Check if an operation of given category is allowed */ @@ -2254,7 +2254,7 @@ public class NameNode extends ReconfigurableBase implements throw new ReconfigurationException(property, newVal, getConf().get( property), e); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("reconfReplicationParameters"); } } @@ -2291,7 +2291,7 @@ public class NameNode extends ReconfigurableBase implements throw new ReconfigurationException(property, newVal, getConf().get( property), nfe); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("reconfHeartbeatInterval"); LOG.info("RECONFIGURE* changed heartbeatInterval to " + datanodeManager.getHeartbeatInterval()); } @@ -2315,7 +2315,7 @@ public class NameNode extends ReconfigurableBase implements throw new ReconfigurationException(property, newVal, getConf().get( property), nfe); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("reconfHeartbeatRecheckInterval"); LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to " + datanodeManager.getHeartbeatRecheckInterval()); } @@ -2434,7 +2434,7 @@ public class NameNode extends ReconfigurableBase implements throw new ReconfigurationException(property, newVal, getConf().get( property), e); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("reconfigureSlowNodesParameters"); } } @@ -2454,7 +2454,7 @@ public class NameNode extends ReconfigurableBase implements } catch (NumberFormatException e) { throw new ReconfigurationException(property, newVal, getConf().get(property), e); } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("reconfigureBlockInvalidateLimit"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java index 1a60879a970..2a7002b5cd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java @@ -350,7 +350,7 @@ public class ReencryptionHandler implements Runnable { getReencryptionStatus().markZoneStarted(zoneId); resetSubmissionTracker(zoneId); } finally { - dir.getFSNamesystem().readUnlock(); + dir.getFSNamesystem().readUnlock("reEncryptThread"); } try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 74730522d86..a0c8e9840f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -1098,7 +1098,7 @@ public class SecondaryNameNode implements Runnable, try { dstImage.reloadFromImageFile(file, dstNamesystem); } finally { - dstNamesystem.writeUnlock(); + dstNamesystem.writeUnlock("reloadFromImageFile"); } dstNamesystem.imageLoadComplete(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index ec1169b85a8..25596dce9f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -385,7 +385,7 @@ public class EditLogTailer { lastLoadedTxnId = image.getLastAppliedTxId(); return editsLoaded; } finally { - namesystem.writeUnlock(); + namesystem.writeUnlock("doTailEdits"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java index d57da2204aa..c62455c724f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDeletionGc.java @@ -79,7 +79,7 @@ public class SnapshotDeletionGc { LOG.error("Failed to chooseDeletedSnapshot", e); throw e; } finally { - namesystem.readUnlock(); + namesystem.readUnlock("gcDeletedSnapshot"); } if (deleted == null) { LOG.trace("{}: no snapshots are marked as deleted.", name); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java index deaeaa43247..05c1a06abda 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/RwLock.java @@ -28,6 +28,12 @@ public interface RwLock { /** Release read lock. */ public void readUnlock(); + /** + * Release read lock with operation name. + * @param opName Option name. + */ + public void readUnlock(String opName); + /** Check if the current thread holds read lock. */ public boolean hasReadLock(); @@ -40,6 +46,12 @@ public interface RwLock { /** Release write lock. */ public void writeUnlock(); + /** + * Release write lock with operation name. + * @param opName Option name. + */ + public void writeUnlock(String opName); + /** Check if the current thread holds write lock. */ public boolean hasWriteLock(); }