HDFS-7060. Avoid taking locks when sending heartbeats from the DataNode. Contributed by Jiandan Yang.

This commit is contained in:
Weiwei Yang 2017-11-08 10:22:13 +08:00
parent 51e882d5c9
commit bb8a6eea52
2 changed files with 37 additions and 54 deletions

View File

@ -153,7 +153,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
public StorageReport[] getStorageReports(String bpid)
throws IOException {
List<StorageReport> reports;
synchronized (statsLock) {
// Volumes are the references from a copy-on-write snapshot, so the
// access on the volume metrics doesn't require an additional lock.
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
reports = new ArrayList<>(curVolumes.size());
for (FsVolumeImpl volume : curVolumes) {
@ -170,7 +171,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
continue;
}
}
}
return reports.toArray(new StorageReport[reports.size()]);
}
@ -247,9 +247,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
private final int smallBufferSize;
// Used for synchronizing access to usage stats
private final Object statsLock = new Object();
final LocalFileSystem localFS;
private boolean blockPinningEnabled;
@ -583,20 +580,16 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
*/
@Override // FSDatasetMBean
public long getDfsUsed() throws IOException {
synchronized(statsLock) {
return volumes.getDfsUsed();
}
}
/**
* Return the total space used by dfs datanode
*/
@Override // FSDatasetMBean
public long getBlockPoolUsed(String bpid) throws IOException {
synchronized(statsLock) {
return volumes.getBlockPoolUsed(bpid);
}
}
/**
* Return true - if there are still valid volumes on the DataNode.
@ -611,20 +604,16 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
*/
@Override // FSDatasetMBean
public long getCapacity() throws IOException {
synchronized(statsLock) {
return volumes.getCapacity();
}
}
/**
* Return how many bytes can still be stored in the FSDataset
*/
@Override // FSDatasetMBean
public long getRemaining() throws IOException {
synchronized(statsLock) {
return volumes.getRemaining();
}
}
/**
* Return the number of failed volumes in the FSDataset.

View File

@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.protocol.Block;
@ -342,7 +341,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
private void decDfsUsedAndNumBlocks(String bpid, long value,
boolean blockFileDeleted) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
// BlockPoolSlice map is thread safe, and update the space used or
// number of blocks are atomic operations, so it doesn't require to
// hold the dataset lock.
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
@ -351,35 +352,28 @@ public class FsVolumeImpl implements FsVolumeSpi {
}
}
}
}
void incDfsUsedAndNumBlocks(String bpid, long value) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
bp.incrNumBlocks();
}
}
}
void incDfsUsed(String bpid, long value) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
}
}
}
@VisibleForTesting
public long getDfsUsed() throws IOException {
long dfsUsed = 0;
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
for(BlockPoolSlice s : bpSlices.values()) {
dfsUsed += s.getDfsUsed();
}
}
return dfsUsed;
}