HDFS-15046. Backport HDFS-7060 to branch-2.10. Contributed by Lisheng Sun.
This commit is contained in:
parent
9f7820e729
commit
1d0e2ba335
@ -159,24 +159,20 @@ public DatanodeStorage getStorage(final String storageUuid) {
|
||||
public StorageReport[] getStorageReports(String bpid)
|
||||
throws IOException {
|
||||
List<StorageReport> reports;
|
||||
synchronized (statsLock) {
|
||||
// Volumes are the references from a copy-on-write snapshot, so the
|
||||
// access on the volume metrics doesn't require an additional lock.
|
||||
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
|
||||
reports = new ArrayList<>(curVolumes.size());
|
||||
for (FsVolumeImpl volume : curVolumes) {
|
||||
try (FsVolumeReference ref = volume.obtainReference()) {
|
||||
StorageReport sr = new StorageReport(volume.toDatanodeStorage(),
|
||||
false,
|
||||
volume.getCapacity(),
|
||||
volume.getDfsUsed(),
|
||||
volume.getAvailable(),
|
||||
volume.getBlockPoolUsed(bpid),
|
||||
volume.getNonDfsUsed());
|
||||
StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false,
|
||||
volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(),
|
||||
volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
|
||||
reports.add(sr);
|
||||
} catch (ClosedChannelException e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return reports.toArray(new StorageReport[reports.size()]);
|
||||
}
|
||||
@ -292,9 +288,6 @@ public LengthInputStream getMetaDataInputStream(ExtendedBlock b)
|
||||
|
||||
private final int smallBufferSize;
|
||||
|
||||
// Used for synchronizing access to usage stats
|
||||
private final Object statsLock = new Object();
|
||||
|
||||
final LocalFileSystem localFS;
|
||||
|
||||
private boolean blockPinningEnabled;
|
||||
@ -643,20 +636,16 @@ private StorageType getStorageTypeFromLocations(
|
||||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getDfsUsed() throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getDfsUsed();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the total space used by dfs datanode
|
||||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getBlockPoolUsed(String bpid) throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getBlockPoolUsed(bpid);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true - if there are still valid volumes on the DataNode.
|
||||
@ -677,20 +666,16 @@ public boolean hasEnoughResource() {
|
||||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getCapacity() {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getCapacity();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return how many bytes can still be stored in the FSDataset
|
||||
*/
|
||||
@Override // FSDatasetMBean
|
||||
public long getRemaining() throws IOException {
|
||||
synchronized(statsLock) {
|
||||
return volumes.getRemaining();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of failed volumes in the FSDataset.
|
||||
|
@ -49,7 +49,6 @@
|
||||
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
|
||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
@ -323,7 +322,9 @@ void onMetaFileDeletion(String bpid, long value) {
|
||||
|
||||
private void decDfsUsedAndNumBlocks(String bpid, long value,
|
||||
boolean blockFileDeleted) {
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
// BlockPoolSlice map is thread safe, and update the space used or
|
||||
// number of blocks are atomic operations, so it doesn't require to
|
||||
// hold the dataset lock.
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.decDfsUsed(value);
|
||||
@ -332,35 +333,28 @@ private void decDfsUsedAndNumBlocks(String bpid, long value,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void incDfsUsedAndNumBlocks(String bpid, long value) {
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.incDfsUsed(value);
|
||||
bp.incrNumBlocks();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void incDfsUsed(String bpid, long value) {
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||
if (bp != null) {
|
||||
bp.incDfsUsed(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public long getDfsUsed() throws IOException {
|
||||
long dfsUsed = 0;
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
for(BlockPoolSlice s : bpSlices.values()) {
|
||||
dfsUsed += s.getDfsUsed();
|
||||
}
|
||||
}
|
||||
return dfsUsed;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user