HDFS-7060. Avoid taking locks when sending heartbeats from the DataNode. Contributed by Jiandan Yang.
This commit is contained in:
parent
51e882d5c9
commit
bb8a6eea52
|
@ -153,22 +153,22 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
public StorageReport[] getStorageReports(String bpid)
|
||||
throws IOException {
|
||||
List<StorageReport> reports;
|
||||
synchronized (statsLock) {
|
||||
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
|
||||
reports = new ArrayList<>(curVolumes.size());
|
||||
for (FsVolumeImpl volume : curVolumes) {
|
||||
try (FsVolumeReference ref = volume.obtainReference()) {
|
||||
StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
|
||||
false,
|
||||
volume.getCapacity(),
|
||||
volume.getDfsUsed(),
|
||||
volume.getAvailable(),
|
||||
volume.getBlockPoolUsed(bpid),
|
||||
volume.getNonDfsUsed());
|
||||
reports.add(sr);
|
||||
} catch (ClosedChannelException e) {
|
||||
continue;
|
||||
}
|
||||
// Volumes are the references from a copy-on-write snapshot, so the
|
||||
// access on the volume metrics doesn't require an additional lock.
|
||||
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
|
||||
reports = new ArrayList<>(curVolumes.size());
|
||||
for (FsVolumeImpl volume : curVolumes) {
|
||||
try (FsVolumeReference ref = volume.obtainReference()) {
|
||||
StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
|
||||
false,
|
||||
volume.getCapacity(),
|
||||
volume.getDfsUsed(),
|
||||
volume.getAvailable(),
|
||||
volume.getBlockPoolUsed(bpid),
|
||||
volume.getNonDfsUsed());
|
||||
reports.add(sr);
|
||||
} catch (ClosedChannelException e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -247,9 +247,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
|
||||
private final int smallBufferSize;
|
||||
|
||||
// Used for synchronizing access to usage stats
|
||||
private final Object statsLock = new Object();
|
||||
|
||||
final LocalFileSystem localFS;
|
||||
|
||||
private boolean blockPinningEnabled;
|
||||
|
@ -583,9 +580,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getDfsUsed() throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getDfsUsed();
|
||||
}
|
||||
return volumes.getDfsUsed();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -593,9 +588,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getBlockPoolUsed(String bpid) throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getBlockPoolUsed(bpid);
|
||||
}
|
||||
return volumes.getBlockPoolUsed(bpid);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -611,9 +604,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getCapacity() throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getCapacity();
|
||||
}
|
||||
return volumes.getCapacity();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -621,9 +612,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getRemaining() throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getRemaining();
|
||||
}
|
||||
return volumes.getRemaining();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
|
|||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
|
@ -342,43 +341,38 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
|||
|
||||
private void decDfsUsedAndNumBlocks(String bpid, long value,
|
||||
boolean blockFileDeleted) {
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.decDfsUsed(value);
|
||||
if (blockFileDeleted) {
|
||||
bp.decrNumBlocks();
|
||||
}
|
||||
// BlockPoolSlice map is thread safe, and update the space used or
|
||||
// number of blocks are atomic operations, so it doesn't require to
|
||||
// hold the dataset lock.
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.decDfsUsed(value);
|
||||
if (blockFileDeleted) {
|
||||
bp.decrNumBlocks();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void incDfsUsedAndNumBlocks(String bpid, long value) {
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.incDfsUsed(value);
|
||||
bp.incrNumBlocks();
|
||||
}
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.incDfsUsed(value);
|
||||
bp.incrNumBlocks();
|
||||
}
|
||||
}
|
||||
|
||||
void incDfsUsed(String bpid, long value) {
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.incDfsUsed(value);
|
||||
}
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.incDfsUsed(value);
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public long getDfsUsed() throws IOException {
|
||||
long dfsUsed = 0;
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
for(BlockPoolSlice s : bpSlices.values()) {
|
||||
dfsUsed += s.getDfsUsed();
|
||||
}
|
||||
for(BlockPoolSlice s : bpSlices.values()) {
|
||||
dfsUsed += s.getDfsUsed();
|
||||
}
|
||||
return dfsUsed;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue