HDFS-10682. Replace FsDatasetImpl object lock with a separate lock object. (Contributed by Chen Liang)
This commit is contained in:
parent
bb6d866207
commit
ad0ac6cced
|
@ -46,6 +46,7 @@ import org.apache.hadoop.io.LongWritable;
|
||||||
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
|
import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.net.SocketOutputStream;
|
import org.apache.hadoop.net.SocketOutputStream;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.htrace.core.Sampler;
|
import org.apache.htrace.core.Sampler;
|
||||||
import org.apache.htrace.core.TraceScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
@ -242,7 +243,7 @@ class BlockSender implements java.io.Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
final long replicaVisibleLength;
|
final long replicaVisibleLength;
|
||||||
synchronized(datanode.data) {
|
try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) {
|
||||||
replica = getReplica(block, datanode);
|
replica = getReplica(block, datanode);
|
||||||
replicaVisibleLength = replica.getVisibleLength();
|
replicaVisibleLength = replica.getVisibleLength();
|
||||||
}
|
}
|
||||||
|
|
|
@ -201,6 +201,7 @@ import org.apache.hadoop.tracing.TraceAdminProtocolPB;
|
||||||
import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
|
import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
|
||||||
import org.apache.hadoop.tracing.TraceUtils;
|
import org.apache.hadoop.tracing.TraceUtils;
|
||||||
import org.apache.hadoop.tracing.TracerConfigurationManager;
|
import org.apache.hadoop.tracing.TracerConfigurationManager;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.DiskChecker;
|
import org.apache.hadoop.util.DiskChecker;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
|
@ -2831,7 +2832,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
final BlockConstructionStage stage;
|
final BlockConstructionStage stage;
|
||||||
|
|
||||||
//get replica information
|
//get replica information
|
||||||
synchronized(data) {
|
try(AutoCloseableLock lock = data.acquireDatasetLock()) {
|
||||||
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
|
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
|
||||||
b.getBlockId());
|
b.getBlockId());
|
||||||
if (null == storedBlock) {
|
if (null == storedBlock) {
|
||||||
|
|
|
@ -44,12 +44,14 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.StopWatch;
|
import org.apache.hadoop.util.StopWatch;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -583,7 +585,7 @@ public class DirectoryScanner implements Runnable {
|
||||||
Map<String, ScanInfo[]> diskReport = getDiskReport();
|
Map<String, ScanInfo[]> diskReport = getDiskReport();
|
||||||
|
|
||||||
// Hold FSDataset lock to prevent further changes to the block map
|
// Hold FSDataset lock to prevent further changes to the block map
|
||||||
synchronized(dataset) {
|
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||||
for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
|
for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
|
||||||
String bpid = entry.getKey();
|
String bpid = entry.getKey();
|
||||||
ScanInfo[] blockpoolReport = entry.getValue();
|
ScanInfo[] blockpoolReport = entry.getValue();
|
||||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -641,4 +642,9 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
|
||||||
* Confirm whether the block is deleting
|
* Confirm whether the block is deleting
|
||||||
*/
|
*/
|
||||||
boolean isDeletingBlock(String bpid, long blockId);
|
boolean isDeletingBlock(String bpid, long blockId);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Acquire the lock of the dataset.
|
||||||
|
*/
|
||||||
|
AutoCloseableLock acquireDatasetLock();
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.CloseableReferenceCount;
|
import org.apache.hadoop.util.CloseableReferenceCount;
|
||||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -304,7 +305,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
||||||
|
|
||||||
private void decDfsUsedAndNumBlocks(String bpid, long value,
|
private void decDfsUsedAndNumBlocks(String bpid, long value,
|
||||||
boolean blockFileDeleted) {
|
boolean blockFileDeleted) {
|
||||||
synchronized(dataset) {
|
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||||
if (bp != null) {
|
if (bp != null) {
|
||||||
bp.decDfsUsed(value);
|
bp.decDfsUsed(value);
|
||||||
|
@ -316,7 +317,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
||||||
}
|
}
|
||||||
|
|
||||||
void incDfsUsedAndNumBlocks(String bpid, long value) {
|
void incDfsUsedAndNumBlocks(String bpid, long value) {
|
||||||
synchronized (dataset) {
|
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||||
if (bp != null) {
|
if (bp != null) {
|
||||||
bp.incDfsUsed(value);
|
bp.incDfsUsed(value);
|
||||||
|
@ -326,7 +327,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
||||||
}
|
}
|
||||||
|
|
||||||
void incDfsUsed(String bpid, long value) {
|
void incDfsUsed(String bpid, long value) {
|
||||||
synchronized(dataset) {
|
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||||
BlockPoolSlice bp = bpSlices.get(bpid);
|
BlockPoolSlice bp = bpSlices.get(bpid);
|
||||||
if (bp != null) {
|
if (bp != null) {
|
||||||
bp.incDfsUsed(value);
|
bp.incDfsUsed(value);
|
||||||
|
@ -337,7 +338,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public long getDfsUsed() throws IOException {
|
public long getDfsUsed() throws IOException {
|
||||||
long dfsUsed = 0;
|
long dfsUsed = 0;
|
||||||
synchronized(dataset) {
|
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
|
||||||
for(BlockPoolSlice s : bpSlices.values()) {
|
for(BlockPoolSlice s : bpSlices.values()) {
|
||||||
dfsUsed += s.getDfsUsed();
|
dfsUsed += s.getDfsUsed();
|
||||||
}
|
}
|
||||||
|
|
|
@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||||
import org.apache.hadoop.metrics2.util.MBeans;
|
import org.apache.hadoop.metrics2.util.MBeans;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -113,6 +114,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
||||||
"dfs.datanode.simulateddatastorage.state";
|
"dfs.datanode.simulateddatastorage.state";
|
||||||
private static final DatanodeStorage.State DEFAULT_STATE =
|
private static final DatanodeStorage.State DEFAULT_STATE =
|
||||||
DatanodeStorage.State.NORMAL;
|
DatanodeStorage.State.NORMAL;
|
||||||
|
|
||||||
|
private final AutoCloseableLock datasetLock;
|
||||||
|
|
||||||
static final byte[] nullCrcFileData;
|
static final byte[] nullCrcFileData;
|
||||||
static {
|
static {
|
||||||
|
@ -550,6 +553,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
||||||
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
|
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
|
||||||
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));
|
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));
|
||||||
this.volume = new SimulatedVolume(this.storage);
|
this.volume = new SimulatedVolume(this.storage);
|
||||||
|
this.datasetLock = new AutoCloseableLock();
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void injectBlocks(String bpid,
|
public synchronized void injectBlocks(String bpid,
|
||||||
|
@ -1365,5 +1369,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
|
||||||
public boolean isDeletingBlock(String bpid, long blockId) {
|
public boolean isDeletingBlock(String bpid, long blockId) {
|
||||||
throw new UnsupportedOperationException();
|
throw new UnsupportedOperationException();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AutoCloseableLock acquireDatasetLock() {
|
||||||
|
return datasetLock.acquire();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
|
@ -693,7 +694,7 @@ public class TestBlockRecovery {
|
||||||
final RecoveringBlock recoveringBlock = new RecoveringBlock(
|
final RecoveringBlock recoveringBlock = new RecoveringBlock(
|
||||||
block.getBlock(), locations, block.getBlock()
|
block.getBlock(), locations, block.getBlock()
|
||||||
.getGenerationStamp() + 1);
|
.getGenerationStamp() + 1);
|
||||||
synchronized (dataNode.data) {
|
try(AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
|
||||||
Thread.sleep(2000);
|
Thread.sleep(2000);
|
||||||
dataNode.initReplicaRecovery(recoveringBlock);
|
dataNode.initReplicaRecovery(recoveringBlock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -113,7 +114,7 @@ public class TestDirectoryScanner {
|
||||||
|
|
||||||
/** Truncate a block file */
|
/** Truncate a block file */
|
||||||
private long truncateBlockFile() throws IOException {
|
private long truncateBlockFile() throws IOException {
|
||||||
synchronized (fds) {
|
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
|
||||||
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
|
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
|
||||||
File f = b.getBlockFile();
|
File f = b.getBlockFile();
|
||||||
File mf = b.getMetaFile();
|
File mf = b.getMetaFile();
|
||||||
|
@ -138,7 +139,7 @@ public class TestDirectoryScanner {
|
||||||
|
|
||||||
/** Delete a block file */
|
/** Delete a block file */
|
||||||
private long deleteBlockFile() {
|
private long deleteBlockFile() {
|
||||||
synchronized(fds) {
|
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
|
||||||
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
|
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
|
||||||
File f = b.getBlockFile();
|
File f = b.getBlockFile();
|
||||||
File mf = b.getMetaFile();
|
File mf = b.getMetaFile();
|
||||||
|
@ -154,7 +155,7 @@ public class TestDirectoryScanner {
|
||||||
|
|
||||||
/** Delete block meta file */
|
/** Delete block meta file */
|
||||||
private long deleteMetaFile() {
|
private long deleteMetaFile() {
|
||||||
synchronized(fds) {
|
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
|
||||||
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
|
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
|
||||||
File file = b.getMetaFile();
|
File file = b.getMetaFile();
|
||||||
// Delete a metadata file
|
// Delete a metadata file
|
||||||
|
@ -173,7 +174,7 @@ public class TestDirectoryScanner {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void duplicateBlock(long blockId) throws IOException {
|
private void duplicateBlock(long blockId) throws IOException {
|
||||||
synchronized (fds) {
|
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
|
||||||
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
|
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
|
||||||
try (FsDatasetSpi.FsVolumeReferences volumes =
|
try (FsDatasetSpi.FsVolumeReferences volumes =
|
||||||
fds.getFsVolumeReferences()) {
|
fds.getFsVolumeReferences()) {
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
|
||||||
import org.apache.hadoop.metrics2.MetricsCollector;
|
import org.apache.hadoop.metrics2.MetricsCollector;
|
||||||
|
import org.apache.hadoop.util.AutoCloseableLock;
|
||||||
|
|
||||||
public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
|
public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
|
||||||
|
|
||||||
|
@ -448,4 +449,9 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
|
||||||
public boolean isDeletingBlock(String bpid, long blockId) {
|
public boolean isDeletingBlock(String bpid, long blockId) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AutoCloseableLock acquireDatasetLock() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue