HDFS-15415. Reduce locking in Datanode DirectoryScanner. Contributed by Stephen O'Donnell.
Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>
This commit is contained in:
parent
947b0a154a
commit
71d006130e
@ -42,7 +42,6 @@
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.util.AutoCloseableLock;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.Block;
|
||||
@ -406,8 +405,6 @@ private void scan() {
|
||||
clear();
|
||||
Map<String, ScanInfo[]> diskReport = getDiskReport();
|
||||
|
||||
// Hold FSDataset lock to prevent further changes to the block map
|
||||
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||
for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
|
||||
String bpid = entry.getKey();
|
||||
ScanInfo[] blockpoolReport = entry.getValue();
|
||||
@ -463,8 +460,8 @@ private void scan() {
|
||||
d++;
|
||||
|
||||
if (d < blockpoolReport.length) {
|
||||
// There may be multiple on-disk records for the same block, don't increment
|
||||
// the memory record pointer if so.
|
||||
// There may be multiple on-disk records for the same block,
|
||||
// don't increment the memory record pointer if so.
|
||||
ScanInfo nextInfo = blockpoolReport[d];
|
||||
if (nextInfo.getBlockId() != info.getBlockId()) {
|
||||
++m;
|
||||
@ -487,7 +484,6 @@ private void scan() {
|
||||
}
|
||||
LOG.info(statsRecord.toString());
|
||||
} //end for
|
||||
} //end synchronized
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
x
Reference in New Issue
Block a user