diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 41c41e6ed6b..d4375cdfd73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -153,22 +153,22 @@ class FsDatasetImpl implements FsDatasetSpi { public StorageReport[] getStorageReports(String bpid) throws IOException { List reports; - synchronized (statsLock) { - List curVolumes = volumes.getVolumes(); - reports = new ArrayList<>(curVolumes.size()); - for (FsVolumeImpl volume : curVolumes) { - try (FsVolumeReference ref = volume.obtainReference()) { - StorageReport sr = new StorageReport(volume.toDatanodeStorage(), - false, - volume.getCapacity(), - volume.getDfsUsed(), - volume.getAvailable(), - volume.getBlockPoolUsed(bpid), - volume.getNonDfsUsed()); - reports.add(sr); - } catch (ClosedChannelException e) { - continue; - } + // Volumes are the references from a copy-on-write snapshot, so the + // access on the volume metrics doesn't require an additional lock. + List curVolumes = volumes.getVolumes(); + reports = new ArrayList<>(curVolumes.size()); + for (FsVolumeImpl volume : curVolumes) { + try (FsVolumeReference ref = volume.obtainReference()) { + StorageReport sr = new StorageReport(volume.toDatanodeStorage(), + false, + volume.getCapacity(), + volume.getDfsUsed(), + volume.getAvailable(), + volume.getBlockPoolUsed(bpid), + volume.getNonDfsUsed()); + reports.add(sr); + } catch (ClosedChannelException e) { + continue; } } @@ -247,9 +247,6 @@ class FsDatasetImpl implements FsDatasetSpi { private final int smallBufferSize; - // Used for synchronizing access to usage stats - private final Object statsLock = new Object(); - final LocalFileSystem localFS; private boolean blockPinningEnabled; @@ -583,9 +580,7 @@ class FsDatasetImpl implements FsDatasetSpi { */ @Override // FSDatasetMBean public long getDfsUsed() throws IOException { - synchronized(statsLock) { - return volumes.getDfsUsed(); - } + return volumes.getDfsUsed(); } /** @@ -593,9 +588,7 @@ class FsDatasetImpl implements FsDatasetSpi { */ @Override // FSDatasetMBean public long getBlockPoolUsed(String bpid) throws IOException { - synchronized(statsLock) { - return volumes.getBlockPoolUsed(bpid); - } + return volumes.getBlockPoolUsed(bpid); } /** @@ -611,9 +604,7 @@ class FsDatasetImpl implements FsDatasetSpi { */ @Override // FSDatasetMBean public long getCapacity() throws IOException { - synchronized(statsLock) { - return volumes.getCapacity(); - } + return volumes.getCapacity(); } /** @@ -621,9 +612,7 @@ class FsDatasetImpl implements FsDatasetSpi { */ @Override // FSDatasetMBean public long getRemaining() throws IOException { - synchronized(statsLock) { - return volumes.getRemaining(); - } + return volumes.getRemaining(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index b948fb788a4..7224e692c79 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; -import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.Block; @@ -342,43 +341,38 @@ public class FsVolumeImpl implements FsVolumeSpi { private void decDfsUsedAndNumBlocks(String bpid, long value, boolean blockFileDeleted) { - try(AutoCloseableLock lock = dataset.acquireDatasetLock()) { - BlockPoolSlice bp = bpSlices.get(bpid); - if (bp != null) { - bp.decDfsUsed(value); - if (blockFileDeleted) { - bp.decrNumBlocks(); - } + // BlockPoolSlice map is thread safe, and update the space used or + // number of blocks are atomic operations, so it doesn't require to + // hold the dataset lock. + BlockPoolSlice bp = bpSlices.get(bpid); + if (bp != null) { + bp.decDfsUsed(value); + if (blockFileDeleted) { + bp.decrNumBlocks(); } } } void incDfsUsedAndNumBlocks(String bpid, long value) { - try(AutoCloseableLock lock = dataset.acquireDatasetLock()) { - BlockPoolSlice bp = bpSlices.get(bpid); - if (bp != null) { - bp.incDfsUsed(value); - bp.incrNumBlocks(); - } + BlockPoolSlice bp = bpSlices.get(bpid); + if (bp != null) { + bp.incDfsUsed(value); + bp.incrNumBlocks(); } } void incDfsUsed(String bpid, long value) { - try(AutoCloseableLock lock = dataset.acquireDatasetLock()) { - BlockPoolSlice bp = bpSlices.get(bpid); - if (bp != null) { - bp.incDfsUsed(value); - } + BlockPoolSlice bp = bpSlices.get(bpid); + if (bp != null) { + bp.incDfsUsed(value); } } @VisibleForTesting public long getDfsUsed() throws IOException { long dfsUsed = 0; - try(AutoCloseableLock lock = dataset.acquireDatasetLock()) { - for(BlockPoolSlice s : bpSlices.values()) { - dfsUsed += s.getDfsUsed(); - } + for(BlockPoolSlice s : bpSlices.values()) { + dfsUsed += s.getDfsUsed(); } return dfsUsed; }