HADOOP-10682. Replace FsDatasetImpl object lock with a separate lock object. (Chen Liang)

This commit is contained in:
Arpit Agarwal 2016-08-08 12:02:53 -07:00
parent 625585950a
commit 8c0638471f
10 changed files with 535 additions and 446 deletions

View File

@ -115,6 +115,7 @@
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.client.BlockReportOptions;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.net.DomainPeerServer;
@ -2877,7 +2878,7 @@ void transferReplicaForPipelineRecovery(final ExtendedBlock b,
final BlockConstructionStage stage;
//get replica information
synchronized(data) {
try(AutoCloseableLock lock = data.acquireDatasetLock()) {
Block storedBlock = data.getStoredBlock(b.getBlockPoolId(),
b.getBlockId());
if (null == storedBlock) {

View File

@ -44,6 +44,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -583,7 +584,7 @@ private void scan() {
Map<String, ScanInfo[]> diskReport = getDiskReport();
// Hold FSDataset lock to prevent further changes to the block map
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
String bpid = entry.getKey();
ScanInfo[] blockpoolReport = entry.getValue();

View File

@ -22,6 +22,7 @@
import org.apache.commons.codec.digest.DigestUtils;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus
@ -454,7 +455,7 @@ private Map<String, FsVolumeSpi> getStorageIDToVolumeMap()
Map<String, FsVolumeSpi> pathMap = new HashMap<>();
FsDatasetSpi.FsVolumeReferences references;
try {
synchronized (this.dataset) {
try(AutoCloseableLock lock = this.dataset.acquireDatasetLock()) {
references = this.dataset.getFsVolumeReferences();
for (int ndx = 0; ndx < references.size(); ndx++) {
FsVolumeSpi vol = references.get(ndx);

View File

@ -35,6 +35,7 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -639,4 +640,9 @@ ReplicaInfo moveBlockAcrossStorage(final ExtendedBlock block,
*/
ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block,
FsVolumeSpi destination) throws IOException;
/**
* Acquire the lock of the data set.
*/
AutoCloseableLock acquireDatasetLock();
}

View File

@ -45,6 +45,7 @@
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -304,7 +305,7 @@ void onMetaFileDeletion(String bpid, long value) {
private void decDfsUsedAndNumBlocks(String bpid, long value,
boolean blockFileDeleted) {
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.decDfsUsed(value);
@ -316,7 +317,7 @@ private void decDfsUsedAndNumBlocks(String bpid, long value,
}
void incDfsUsedAndNumBlocks(String bpid, long value) {
synchronized (dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
@ -326,7 +327,7 @@ void incDfsUsedAndNumBlocks(String bpid, long value) {
}
void incDfsUsed(String bpid, long value) {
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
BlockPoolSlice bp = bpSlices.get(bpid);
if (bp != null) {
bp.incDfsUsed(value);
@ -337,7 +338,7 @@ void incDfsUsed(String bpid, long value) {
@VisibleForTesting
public long getDfsUsed() throws IOException {
long dfsUsed = 0;
synchronized(dataset) {
try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
for(BlockPoolSlice s : bpSlices.values()) {
dfsUsed += s.getDfsUsed();
}

View File

@ -39,6 +39,7 @@
import org.apache.commons.lang.ArrayUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@ -115,6 +116,9 @@ public static byte simulatedByte(Block b, long offsetInBlk) {
DatanodeStorage.State.NORMAL;
static final byte[] nullCrcFileData;
private final AutoCloseableLock datasetLock;
static {
DataChecksum checksum = DataChecksum.newDataChecksum(
DataChecksum.Type.NULL, 16*1024 );
@ -550,6 +554,7 @@ public SimulatedFSDataset(DataNode datanode, DataStorage storage, Configuration
conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY),
conf.getEnum(CONFIG_PROPERTY_STATE, DEFAULT_STATE));
this.volume = new SimulatedVolume(this.storage);
this.datasetLock = new AutoCloseableLock();
}
public synchronized void injectBlocks(String bpid,
@ -1366,5 +1371,9 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
return null;
}
@Override
public AutoCloseableLock acquireDatasetLock() {
return datasetLock.acquire();
}
}

View File

@ -66,6 +66,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.StripedFileTestUtil;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
@ -725,7 +726,7 @@ public void run() {
final RecoveringBlock recoveringBlock = new RecoveringBlock(
block.getBlock(), locations, block.getBlock()
.getGenerationStamp() + 1);
synchronized (dataNode.data) {
try(AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
Thread.sleep(2000);
dataNode.initReplicaRecovery(recoveringBlock);
}

View File

@ -52,6 +52,7 @@
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -113,7 +114,7 @@ private List<LocatedBlock> createFile(String fileNamePrefix,
/** Truncate a block file */
private long truncateBlockFile() throws IOException {
synchronized (fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
@ -138,7 +139,7 @@ private long truncateBlockFile() throws IOException {
/** Delete a block file */
private long deleteBlockFile() {
synchronized(fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File f = b.getBlockFile();
File mf = b.getMetaFile();
@ -154,7 +155,7 @@ private long deleteBlockFile() {
/** Delete block meta file */
private long deleteMetaFile() {
synchronized(fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
for (ReplicaInfo b : FsDatasetTestUtil.getReplicas(fds, bpid)) {
File file = b.getMetaFile();
// Delete a metadata file
@ -173,7 +174,7 @@ private long deleteMetaFile() {
* @throws IOException
*/
private void duplicateBlock(long blockId) throws IOException {
synchronized (fds) {
try(AutoCloseableLock lock = fds.acquireDatasetLock()) {
ReplicaInfo b = FsDatasetTestUtil.fetchReplicaInfo(fds, bpid, blockId);
try (FsDatasetSpi.FsVolumeReferences volumes =
fds.getFsVolumeReferences()) {

View File

@ -23,6 +23,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
@ -450,4 +451,8 @@ public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
return null;
}
@Override
public AutoCloseableLock acquireDatasetLock() {
return null;
}
}